Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
class_table.h
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_CLASS_TABLE_H_
6#define RUNTIME_VM_CLASS_TABLE_H_
7
8#include <memory>
9#include <tuple>
10#include <utility>
11
12#include "platform/allocation.h"
13#include "platform/assert.h"
14#include "platform/atomic.h"
15#include "platform/utils.h"
16
17#include "vm/bitfield.h"
18#include "vm/class_id.h"
19#include "vm/flags.h"
20#include "vm/globals.h"
21#include "vm/tagged_pointer.h"
22
23namespace dart {
24
25class Class;
26class ClassTable;
27class Isolate;
28class IsolateGroup;
29class JSONArray;
30class JSONObject;
31class JSONStream;
32template <typename T>
33class MallocGrowableArray;
34class ObjectPointerVisitor;
35class PersistentHandle;
36
37// A 64-bit bitmap describing unboxed fields in a class.
38//
39// There is a bit for each word in an instance of the class.
40//
41// Words corresponding to set bits must be ignored by the GC because they
42// don't contain pointers. All words beyond the first 64 words of an object
43// are expected to contain pointers.
45 public:
46 UnboxedFieldBitmap() : bitmap_(0) {}
47 explicit UnboxedFieldBitmap(uint64_t bitmap) : bitmap_(bitmap) {}
50
51 DART_FORCE_INLINE bool Get(intptr_t position) const {
52 if (position >= Length()) return false;
53 return Utils::TestBit(bitmap_, position);
54 }
55 DART_FORCE_INLINE void Set(intptr_t position) {
56 ASSERT(position < Length());
57 bitmap_ |= Utils::Bit<decltype(bitmap_)>(position);
58 }
59 DART_FORCE_INLINE void Clear(intptr_t position) {
60 ASSERT(position < Length());
61 bitmap_ &= ~Utils::Bit<decltype(bitmap_)>(position);
62 }
63 DART_FORCE_INLINE uint64_t Value() const { return bitmap_; }
64 DART_FORCE_INLINE bool IsEmpty() const { return bitmap_ == 0; }
65 DART_FORCE_INLINE void Reset() { bitmap_ = 0; }
66
67 DART_FORCE_INLINE static constexpr intptr_t Length() {
68 return sizeof(decltype(bitmap_)) * kBitsPerByte;
69 }
70
71 private:
72 uint64_t bitmap_;
73};
74
75// Allocator used to manage memory for ClassTable arrays and ClassTable
76// objects themselves.
77//
78// This allocator provides delayed free functionality: normally class tables
79// can't be freed unless all mutator and helper threads are stopped because
80// some of these threads might be holding a pointer to a table which we
81// want to free. Instead of stopping the world whenever we need to free
82// a table (e.g. freeing old table after growing) we delay freeing until an
83// occasional GC which will need to stop the world anyway.
85 public:
88
89 // Allocate an array of T with |len| elements.
90 //
91 // Does *not* initialize the memory.
92 template <class T>
93 inline T* Alloc(intptr_t len) {
94 return reinterpret_cast<T*>(dart::malloc(len * sizeof(T)));
95 }
96
97 // Allocate a zero initialized array of T with |len| elements.
98 template <class T>
99 inline T* AllocZeroInitialized(intptr_t len) {
100 return reinterpret_cast<T*>(dart::calloc(len, sizeof(T)));
101 }
102
103 // Clone the given |array| with |size| elements.
104 template <class T>
105 inline T* Clone(T* array, intptr_t size) {
106 if (array == nullptr) {
107 ASSERT(size == 0);
108 return nullptr;
109 }
110 auto result = Alloc<T>(size);
111 memmove(result, array, size * sizeof(T));
112 return result;
113 }
114
115 // Copy |size| elements from the given |array| into a new
116 // array with space for |new_size| elements. Then |Free|
117 // the original |array|.
118 //
119 // |new_size| is expected to be larger than |size|.
120 template <class T>
121 inline T* Realloc(T* array, intptr_t size, intptr_t new_size) {
122 ASSERT(size < new_size);
123 auto result = AllocZeroInitialized<T>(new_size);
124 if (size != 0) {
125 ASSERT(result != nullptr);
126 memmove(result, array, size * sizeof(T));
127 }
128 Free(array);
129 return result;
130 }
131
132 // Schedule deletion of the given ClassTable.
133 void Free(ClassTable* table);
134
135 // Schedule freeing of the given pointer.
136 void Free(void* ptr);
137
138 // Free all objects which were scheduled by |Free|. Expected to only be
139 // called on |IsolateGroup| shutdown or when the world is stopped and no
140 // thread can be using a stale class table pointer.
141 void FreePending();
142
143 private:
144 typedef void (*Deleter)(void*);
146};
147
148// A table with the given |Columns| indexed by class id.
149//
150// Each column is a continuous array of a the given type. All columns have
151// the same number of used elements (|num_cids()|) and the same capacity.
152template <typename CidType, typename... Columns>
154 public:
156 : allocator_(allocator) {}
157
159 std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); },
160 columns_);
161 }
162
163 CidIndexedTable(const CidIndexedTable& other) = delete;
164
165 void SetNumCidsAndCapacity(intptr_t new_num_cids, intptr_t new_capacity) {
166 columns_ = std::apply(
167 [&](auto&... column) {
168 return std::make_tuple(
169 allocator_->Realloc(column.load(), num_cids_, new_capacity)...);
170 },
171 columns_);
172 capacity_ = new_capacity;
173 SetNumCids(new_num_cids);
174 }
175
176 void AllocateIndex(intptr_t index, bool* did_grow) {
177 *did_grow = EnsureCapacity(index);
178 SetNumCids(Utils::Maximum(num_cids_, index + 1));
179 }
180
181 intptr_t AddRow(bool* did_grow) {
182 *did_grow = EnsureCapacity(num_cids_);
183 intptr_t id = num_cids_;
184 SetNumCids(num_cids_ + 1);
185 return id;
186 }
187
188 void ShrinkTo(intptr_t new_num_cids) {
189 ASSERT(new_num_cids <= num_cids_);
190 num_cids_ = new_num_cids;
191 }
192
193 bool IsValidIndex(intptr_t index) const {
194 return 0 <= index && index < num_cids_;
195 }
196
197 void CopyFrom(const CidIndexedTable& other) {
198 ASSERT(allocator_ == other.allocator_);
199
200 std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); },
201 columns_);
202
203 columns_ = std::apply(
204 [&](auto&... column) {
205 return std::make_tuple(
206 allocator_->Clone(column.load(), other.num_cids_)...);
207 },
208 other.columns_);
209 capacity_ = num_cids_ = other.num_cids_;
210 }
211
212 void Remap(intptr_t* old_to_new_cid) {
213 CidIndexedTable clone(allocator_);
214 clone.CopyFrom(*this);
215 RemapAllColumns(clone, old_to_new_cid,
216 std::index_sequence_for<Columns...>{});
217 }
218
219 template <
220 intptr_t kColumnIndex,
221 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
223 return std::get<kColumnIndex>(columns_).load();
224 }
225
226 template <
227 intptr_t kColumnIndex,
228 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
229 const T* GetColumn() const {
230 return std::get<kColumnIndex>(columns_).load();
231 }
232
233 template <
234 intptr_t kColumnIndex,
235 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
236 T& At(intptr_t index) {
237 ASSERT(IsValidIndex(index));
238 return GetColumn<kColumnIndex>()[index];
239 }
240
241 template <
242 intptr_t kColumnIndex,
243 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
244 const T& At(intptr_t index) const {
245 ASSERT(IsValidIndex(index));
246 return GetColumn<kColumnIndex>()[index];
247 }
248
249 intptr_t num_cids() const { return num_cids_; }
250 intptr_t capacity() const { return capacity_; }
251
252 private:
253 friend class ClassTable;
254
255 // Wrapper around AcqRelAtomic<T*> which makes it assignable and copyable
256 // so that we could put it inside an std::tuple.
257 template <typename T>
258 struct Ptr {
259 Ptr() : ptr(nullptr) {}
260 Ptr(T* ptr) : ptr(ptr) {} // NOLINT
261
262 Ptr(const Ptr& other) { ptr.store(other.ptr.load()); }
263
264 Ptr& operator=(const Ptr& other) {
265 ptr.store(other.load());
266 return *this;
267 }
268
269 T* load() const { return ptr.load(); }
270
271 AcqRelAtomic<T*> ptr = {nullptr};
272 };
273
274 void SetNumCids(intptr_t new_num_cids) {
275 if (new_num_cids > kClassIdTagMax) {
276 FATAL("Too many classes");
277 }
278 num_cids_ = new_num_cids;
279 }
280
281 bool EnsureCapacity(intptr_t index) {
282 if (index >= capacity_) {
283 SetNumCidsAndCapacity(num_cids_, index + kCapacityIncrement);
284 return true;
285 }
286 return false;
287 }
288
289 template <intptr_t kColumnIndex>
290 void RemapColumn(const CidIndexedTable& old, intptr_t* old_to_new_cid) {
291 auto new_column = GetColumn<kColumnIndex>();
292 auto old_column = old.GetColumn<kColumnIndex>();
293 for (intptr_t i = 0; i < num_cids_; i++) {
294 new_column[old_to_new_cid[i]] = old_column[i];
295 }
296 }
297
298 template <std::size_t... Is>
299 void RemapAllColumns(const CidIndexedTable& old,
300 intptr_t* old_to_new_cid,
301 std::index_sequence<Is...>) {
302 (RemapColumn<Is>(old, old_to_new_cid), ...);
303 }
304
305 static constexpr intptr_t kCapacityIncrement = 256;
306
307 ClassTableAllocator* allocator_;
308 intptr_t num_cids_ = 0;
309 intptr_t capacity_ = 0;
310 std::tuple<Ptr<Columns>...> columns_;
311};
312
313// Registry of all known classes.
314//
315// The GC will only use information about instance size and unboxed field maps
316// to scan instances and will not access class objects themselves. This
317// information is stored in separate columns of the |classes_| table.
318//
319// # Concurrency & atomicity
320//
321// This table is read concurrently without locking (e.g. by GC threads) so
322// there are some invariants that need to be observed when working with it.
323//
324// * When table is updated (e.g. when the table is grown or a new class is
325// registered in a table) there must be a release barrier after the update.
326// Such barrier will ensure that stores which populate the table are not
327// reordered past the store which exposes the new grown table or exposes
328// a new class id;
329// * Old versions of the table can only be freed when the world is stopped:
330// no mutator and no helper threads are running. To avoid freeing a table
331// which some other thread is reading from.
332//
333// Note that torn reads are not a concern (e.g. it is fine to use
334// memmove to copy class table contents) as long as an appropriate
335// barrier is issued before the copy of the table can be observed.
336//
337// # Hot reload
338//
339// Each IsolateGroup contains two ClassTable fields: |class_table| and
340// |heap_walk_class_table|. GC visitors use the second field to get ClassTable
341// instance which they will use for visiting pointers inside instances in
342// the heap. Usually these two fields will be pointing to the same table,
343// except when IsolateGroup is in the middle of reload.
344//
345// When reloading |class_table| will be pointing to a copy of the original
346// table. Kernel loading will be modifying this table, while GC
347// workers can continue using original table still available through
348// |heap_walk_class_table|. If hot reload succeeds, |heap_walk_class_table|
349// will be dropped and |class_table| will become the source of truth. Otherwise,
350// original table will be restored from |heap_walk_class_table|.
351//
352// See IsolateGroup methods CloneClassTableForReload, RestoreOriginalClassTable,
353// DropOriginalClassTable.
355 public:
356 explicit ClassTable(ClassTableAllocator* allocator);
357
358 ~ClassTable();
359
360 ClassTable* Clone() const { return new ClassTable(*this); }
361
362 ClassPtr At(intptr_t cid) const {
363 if (IsTopLevelCid(cid)) {
364 return top_level_classes_.At<kClassIndex>(IndexFromTopLevelCid(cid));
365 }
366 return classes_.At<kClassIndex>(cid);
367 }
368
369 int32_t SizeAt(intptr_t index) const {
370 if (IsTopLevelCid(index)) {
371 return 0;
372 }
373 return classes_.At<kSizeIndex>(index);
374 }
375
376 void SetAt(intptr_t index, ClassPtr raw_cls);
377 void UpdateClassSize(intptr_t cid, ClassPtr raw_cls);
378
379 bool IsValidIndex(intptr_t cid) const {
380 if (IsTopLevelCid(cid)) {
381 return top_level_classes_.IsValidIndex(IndexFromTopLevelCid(cid));
382 }
383 return classes_.IsValidIndex(cid);
384 }
385
386 bool HasValidClassAt(intptr_t cid) const { return At(cid) != nullptr; }
387
390 return classes_.At<kUnboxedFieldBitmapIndex>(cid);
391 }
392
395 classes_.At<kUnboxedFieldBitmapIndex>(cid) = map;
396 }
397
398#if !defined(PRODUCT)
400 return !IsTopLevelCid(cid) &&
401 (classes_.At<kAllocationTracingStateIndex>(cid) != kTracingDisabled);
402 }
403
404 void SetTraceAllocationFor(intptr_t cid, bool trace) {
405 classes_.At<kAllocationTracingStateIndex>(cid) =
406 trace ? kTraceAllocationBit : kTracingDisabled;
407 }
408
409 void SetCollectInstancesFor(intptr_t cid, bool trace) {
410 auto& slot = classes_.At<kAllocationTracingStateIndex>(cid);
411 if (trace) {
412 slot |= kCollectInstancesBit;
413 } else {
414 slot &= ~kCollectInstancesBit;
415 }
416 }
417
418 bool CollectInstancesFor(intptr_t cid) {
419 auto& slot = classes_.At<kAllocationTracingStateIndex>(cid);
420 return (slot & kCollectInstancesBit) != 0;
421 }
422
424 cached_allocation_tracing_state_table_.store(
425 classes_.GetColumn<kAllocationTracingStateIndex>());
426 }
427#else
429#endif // !defined(PRODUCT)
430
431#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
433
434 const char* UserVisibleNameFor(intptr_t cid) {
435 if (!classes_.IsValidIndex(cid)) {
436 return nullptr;
437 }
438 return classes_.At<kClassNameIndex>(cid);
439 }
440
441 void SetUserVisibleNameFor(intptr_t cid, const char* name) {
442 ASSERT(classes_.At<kClassNameIndex>(cid) == nullptr);
443 classes_.At<kClassNameIndex>(cid) = name;
444 }
445#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
446
447 intptr_t NumCids() const { return classes_.num_cids(); }
448 intptr_t Capacity() const { return classes_.capacity(); }
449
450 intptr_t NumTopLevelCids() const { return top_level_classes_.num_cids(); }
451
452 void Register(const Class& cls);
453 void AllocateIndex(intptr_t index);
454
455 void RegisterTopLevel(const Class& cls);
456 void UnregisterTopLevel(intptr_t index);
457
458 void Remap(intptr_t* old_to_new_cids);
459
461
462 // If a snapshot reader has populated the class table then the
463 // sizes in the class table are not correct. Iterates through the
464 // table, updating the sizes.
466
467 void Validate();
468
469 void Print();
470
471#if defined(DART_PRECOMPILER)
472 void PrintObjectLayout(const char* filename);
473#endif
474
475#ifndef PRODUCT
476 // Describes layout of heap stats for code generation. See offset_extractor.cc
477 struct ArrayTraits {
478 static intptr_t elements_start_offset() { return 0; }
479
480 static constexpr intptr_t kElementSize = sizeof(uint8_t);
481 };
482
484 static_assert(sizeof(cached_allocation_tracing_state_table_) == kWordSize);
485 return OFFSET_OF(ClassTable, cached_allocation_tracing_state_table_);
486 }
487
488 void AllocationProfilePrintJSON(JSONStream* stream, bool internal);
489
490 void PrintToJSONObject(JSONObject* object);
491#endif // !PRODUCT
492
493 // Deallocates table copies. Do not call during concurrent access to table.
495
496 static bool IsTopLevelCid(intptr_t cid) { return cid >= kTopLevelCidOffset; }
497
498 static intptr_t IndexFromTopLevelCid(intptr_t cid) {
500 return cid - kTopLevelCidOffset;
501 }
502
503 static intptr_t CidFromTopLevelIndex(intptr_t index) {
504 return kTopLevelCidOffset + index;
505 }
506
507 private:
509 friend class Dart;
511 const char* name,
512 char** error);
513 friend class IsolateGroup; // for table()
514 static constexpr int kInitialCapacity = 512;
515
516 static constexpr intptr_t kTopLevelCidOffset = kClassIdTagMax + 1;
517
518 ClassTable(const ClassTable& original)
519 : allocator_(original.allocator_),
520 classes_(original.allocator_),
521 top_level_classes_(original.allocator_) {
522 classes_.CopyFrom(original.classes_);
523
524#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
525 // Copying classes_ doesn't perform a deep copy. Ensure we duplicate
526 // the class names to avoid double free crashes at shutdown.
527 for (intptr_t cid = 1; cid < classes_.num_cids(); ++cid) {
528 if (classes_.IsValidIndex(cid)) {
529 const char* cls_name = classes_.At<kClassNameIndex>(cid);
530 if (cls_name != nullptr) {
531 classes_.At<kClassNameIndex>(cid) = Utils::StrDup(cls_name);
532 }
533 }
534 }
535#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
536
537 top_level_classes_.CopyFrom(original.top_level_classes_);
539 }
540
541 void AllocateTopLevelIndex(intptr_t index);
542
543 ClassPtr* table() { return classes_.GetColumn<kClassIndex>(); }
544
545 // Used to drop recently added classes.
546 void SetNumCids(intptr_t num_cids, intptr_t num_tlc_cids) {
547 classes_.ShrinkTo(num_cids);
548 top_level_classes_.ShrinkTo(num_tlc_cids);
549 }
550
551 ClassTableAllocator* allocator_;
552
553 // Unfortunately std::tuple used by CidIndexedTable does not have a stable
554 // layout so we can't refer to its elements from generated code.
555 NOT_IN_PRODUCT(AcqRelAtomic<uint8_t*> cached_allocation_tracing_state_table_ =
556 {nullptr});
557
558 enum {
559 kClassIndex = 0,
560 kSizeIndex,
561 kUnboxedFieldBitmapIndex,
562#if !defined(PRODUCT)
563 kAllocationTracingStateIndex,
564#endif
565#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
566 kClassNameIndex,
567#endif
568 };
569
570#if !defined(PRODUCT)
571 CidIndexedTable<ClassIdTagType,
572 ClassPtr,
573 uint32_t,
574 UnboxedFieldBitmap,
575 uint8_t,
576 const char*>
577 classes_;
578#elif defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
579 CidIndexedTable<ClassIdTagType,
580 ClassPtr,
581 uint32_t,
582 UnboxedFieldBitmap,
583 const char*>
584 classes_;
585#else
586 CidIndexedTable<ClassIdTagType, ClassPtr, uint32_t, UnboxedFieldBitmap>
587 classes_;
588#endif
589
590#ifndef PRODUCT
591 enum {
592 kTracingDisabled = 0,
593 kTraceAllocationBit = (1 << 0),
594 kCollectInstancesBit = (1 << 1),
595 };
596#endif // !PRODUCT
597
598 CidIndexedTable<classid_t, ClassPtr> top_level_classes_;
599};
600
601} // namespace dart
602
603#endif // RUNTIME_VM_CLASS_TABLE_H_
SI T load(const P *ptr)
SI F table(const skcms_Curve *curve, F v)
friend class ClassTable
const T & At(intptr_t index) const
void ShrinkTo(intptr_t new_num_cids)
void CopyFrom(const CidIndexedTable &other)
intptr_t num_cids() const
intptr_t capacity() const
CidIndexedTable(ClassTableAllocator *allocator)
void Remap(intptr_t *old_to_new_cid)
bool IsValidIndex(intptr_t index) const
T & At(intptr_t index)
intptr_t AddRow(bool *did_grow)
void AllocateIndex(intptr_t index, bool *did_grow)
void SetNumCidsAndCapacity(intptr_t new_num_cids, intptr_t new_capacity)
const T * GetColumn() const
CidIndexedTable(const CidIndexedTable &other)=delete
T * Alloc(intptr_t len)
Definition class_table.h:93
void Free(ClassTable *table)
T * Realloc(T *array, intptr_t size, intptr_t new_size)
T * Clone(T *array, intptr_t size)
T * AllocZeroInitialized(intptr_t len)
Definition class_table.h:99
void SetTraceAllocationFor(intptr_t cid, bool trace)
void SetUserVisibleNameFor(intptr_t cid, const char *name)
intptr_t Capacity() const
friend Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *group, const char *name, char **error)
void CopySizesFromClassObjects()
ClassPtr At(intptr_t cid) const
static intptr_t allocation_tracing_state_table_offset()
void SetAt(intptr_t index, ClassPtr raw_cls)
bool ShouldTraceAllocationFor(intptr_t cid)
bool IsValidIndex(intptr_t cid) const
static intptr_t CidFromTopLevelIndex(intptr_t index)
void UpdateCachedAllocationTracingStateTablePointer()
bool CollectInstancesFor(intptr_t cid)
void AllocationProfilePrintJSON(JSONStream *stream, bool internal)
friend class ClassTableAllocator
intptr_t NumTopLevelCids() const
void UnregisterTopLevel(intptr_t index)
int32_t SizeAt(intptr_t index) const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
intptr_t NumCids() const
void UpdateClassSize(intptr_t cid, ClassPtr raw_cls)
UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t cid) const
void SetCollectInstancesFor(intptr_t cid, bool trace)
void Remap(intptr_t *old_to_new_cids)
void RegisterTopLevel(const Class &cls)
bool HasValidClassAt(intptr_t cid) const
void SetUnboxedFieldsMapAt(intptr_t cid, UnboxedFieldBitmap map)
ClassTable * Clone() const
void PopulateUserVisibleNames()
void AllocateIndex(intptr_t index)
static intptr_t IndexFromTopLevelCid(intptr_t cid)
static bool IsTopLevelCid(intptr_t cid)
void PrintToJSONObject(JSONObject *object)
const char * UserVisibleNameFor(intptr_t cid)
static DART_FORCE_INLINE constexpr intptr_t Length()
Definition class_table.h:67
DART_FORCE_INLINE uint64_t Value() const
Definition class_table.h:63
DART_FORCE_INLINE bool Get(intptr_t position) const
Definition class_table.h:51
UnboxedFieldBitmap & operator=(const UnboxedFieldBitmap &)=default
DART_FORCE_INLINE void Reset()
Definition class_table.h:65
DART_FORCE_INLINE void Clear(intptr_t position)
Definition class_table.h:59
DART_FORCE_INLINE bool IsEmpty() const
Definition class_table.h:64
UnboxedFieldBitmap(uint64_t bitmap)
Definition class_table.h:47
UnboxedFieldBitmap(const UnboxedFieldBitmap &)=default
DART_FORCE_INLINE void Set(intptr_t position)
Definition class_table.h:55
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static char * StrDup(const char *s)
static constexpr bool TestBit(T mask, size_t position)
Definition utils.h:549
#define ASSERT(E)
#define FATAL(error)
const uint8_t uint32_t uint32_t GError ** error
GAsyncResult * result
const char *const name
void * malloc(size_t size)
Definition allocation.cc:19
constexpr intptr_t kBitsPerByte
Definition globals.h:463
const intptr_t cid
void * calloc(size_t n, size_t size)
Definition allocation.cc:11
int32_t ClassIdTagType
Definition class_id.h:20
constexpr intptr_t kWordSize
Definition globals.h:509
static constexpr intptr_t kClassIdTagMax
Definition class_id.h:22
#define T
static intptr_t elements_start_offset()
static constexpr intptr_t kElementSize
const uintptr_t id
#define NOT_IN_PRODUCT(code)
Definition globals.h:84
#define OFFSET_OF(type, field)
Definition globals.h:138