5#ifndef RUNTIME_VM_CLASS_TABLE_H_
6#define RUNTIME_VM_CLASS_TABLE_H_
33class MallocGrowableArray;
34class ObjectPointerVisitor;
35class PersistentHandle;
51 DART_FORCE_INLINE
bool Get(intptr_t position)
const {
52 if (position >=
Length())
return false;
55 DART_FORCE_INLINE
void Set(intptr_t position) {
57 bitmap_ |= Utils::Bit<decltype(bitmap_)>(position);
59 DART_FORCE_INLINE
void Clear(intptr_t position) {
61 bitmap_ &=
~Utils::Bit<decltype(bitmap_)>(position);
63 DART_FORCE_INLINE uint64_t
Value()
const {
return bitmap_; }
64 DART_FORCE_INLINE
bool IsEmpty()
const {
return bitmap_ == 0; }
65 DART_FORCE_INLINE
void Reset() { bitmap_ = 0; }
67 DART_FORCE_INLINE
static constexpr intptr_t
Length() {
106 if (array ==
nullptr) {
123 auto result = AllocZeroInitialized<T>(new_size);
136 void Free(
void* ptr);
144 typedef void (*Deleter)(
void*);
152template <
typename CidType,
typename... Columns>
156 : allocator_(allocator) {}
159 std::apply([&](
auto&... column) { (allocator_->
Free(column.load()), ...); },
167 [&](
auto&... column) {
168 return std::make_tuple(
169 allocator_->
Realloc(column.load(), num_cids_, new_capacity)...);
172 capacity_ = new_capacity;
173 SetNumCids(new_num_cids);
177 *did_grow = EnsureCapacity(index);
182 *did_grow = EnsureCapacity(num_cids_);
183 intptr_t
id = num_cids_;
184 SetNumCids(num_cids_ + 1);
189 ASSERT(new_num_cids <= num_cids_);
190 num_cids_ = new_num_cids;
194 return 0 <= index && index < num_cids_;
198 ASSERT(allocator_ == other.allocator_);
200 std::apply([&](
auto&... column) { (allocator_->
Free(column.load()), ...); },
204 [&](
auto&... column) {
205 return std::make_tuple(
206 allocator_->
Clone(column.load(), other.num_cids_)...);
209 capacity_ = num_cids_ = other.num_cids_;
212 void Remap(intptr_t* old_to_new_cid) {
215 RemapAllColumns(clone, old_to_new_cid,
216 std::index_sequence_for<Columns...>{});
220 intptr_t kColumnIndex,
221 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
223 return std::get<kColumnIndex>(columns_).load();
227 intptr_t kColumnIndex,
228 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
230 return std::get<kColumnIndex>(columns_).load();
234 intptr_t kColumnIndex,
235 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
238 return GetColumn<kColumnIndex>()[index];
242 intptr_t kColumnIndex,
243 typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
244 const T&
At(intptr_t index)
const {
246 return GetColumn<kColumnIndex>()[index];
257 template <
typename T>
259 Ptr() : ptr(
nullptr) {}
260 Ptr(
T* ptr) : ptr(ptr) {}
262 Ptr(
const Ptr& other) { ptr.store(other.ptr.load()); }
264 Ptr& operator=(
const Ptr& other) {
265 ptr.store(other.load());
269 T*
load()
const {
return ptr.load(); }
274 void SetNumCids(intptr_t new_num_cids) {
276 FATAL(
"Too many classes");
278 num_cids_ = new_num_cids;
281 bool EnsureCapacity(intptr_t index) {
282 if (index >= capacity_) {
289 template <
intptr_t kColumnIndex>
290 void RemapColumn(
const CidIndexedTable& old, intptr_t* old_to_new_cid) {
291 auto new_column = GetColumn<kColumnIndex>();
292 auto old_column = old.GetColumn<kColumnIndex>();
293 for (intptr_t
i = 0;
i < num_cids_;
i++) {
294 new_column[old_to_new_cid[
i]] = old_column[
i];
298 template <std::size_t... Is>
300 intptr_t* old_to_new_cid,
301 std::index_sequence<Is...>) {
302 (RemapColumn<Is>(old, old_to_new_cid), ...);
305 static constexpr intptr_t kCapacityIncrement = 256;
307 ClassTableAllocator* allocator_;
308 intptr_t num_cids_ = 0;
309 intptr_t capacity_ = 0;
310 std::tuple<Ptr<Columns>...> columns_;
366 return classes_.At<kClassIndex>(
cid);
373 return classes_.At<kSizeIndex>(index);
376 void SetAt(intptr_t index, ClassPtr raw_cls);
383 return classes_.IsValidIndex(
cid);
390 return classes_.At<kUnboxedFieldBitmapIndex>(
cid);
395 classes_.At<kUnboxedFieldBitmapIndex>(
cid) =
map;
401 (classes_.At<kAllocationTracingStateIndex>(
cid) != kTracingDisabled);
405 classes_.At<kAllocationTracingStateIndex>(
cid) =
406 trace ? kTraceAllocationBit : kTracingDisabled;
410 auto& slot = classes_.At<kAllocationTracingStateIndex>(
cid);
412 slot |= kCollectInstancesBit;
414 slot &= ~kCollectInstancesBit;
419 auto& slot = classes_.At<kAllocationTracingStateIndex>(
cid);
420 return (slot & kCollectInstancesBit) != 0;
424 cached_allocation_tracing_state_table_.store(
425 classes_.GetColumn<kAllocationTracingStateIndex>());
431#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
435 if (!classes_.IsValidIndex(
cid)) {
438 return classes_.At<kClassNameIndex>(
cid);
442 ASSERT(classes_.At<kClassNameIndex>(
cid) ==
nullptr);
443 classes_.At<kClassNameIndex>(
cid) =
name;
447 intptr_t
NumCids()
const {
return classes_.num_cids(); }
448 intptr_t
Capacity()
const {
return classes_.capacity(); }
458 void Remap(intptr_t* old_to_new_cids);
471#if defined(DART_PRECOMPILER)
472 void PrintObjectLayout(
const char* filename);
484 static_assert(
sizeof(cached_allocation_tracing_state_table_) ==
kWordSize);
500 return cid - kTopLevelCidOffset;
504 return kTopLevelCidOffset + index;
514 static constexpr int kInitialCapacity = 512;
516 static constexpr intptr_t kTopLevelCidOffset =
kClassIdTagMax + 1;
519 : allocator_(original.allocator_),
520 classes_(original.allocator_),
521 top_level_classes_(original.allocator_) {
522 classes_.CopyFrom(original.classes_);
524#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
527 for (intptr_t
cid = 1;
cid < classes_.num_cids(); ++
cid) {
528 if (classes_.IsValidIndex(
cid)) {
529 const char* cls_name = classes_.At<kClassNameIndex>(
cid);
530 if (cls_name !=
nullptr) {
537 top_level_classes_.
CopyFrom(original.top_level_classes_);
541 void AllocateTopLevelIndex(intptr_t index);
543 ClassPtr*
table() {
return classes_.GetColumn<kClassIndex>(); }
546 void SetNumCids(intptr_t num_cids, intptr_t num_tlc_cids) {
547 classes_.ShrinkTo(num_cids);
548 top_level_classes_.
ShrinkTo(num_tlc_cids);
555 NOT_IN_PRODUCT(AcqRelAtomic<uint8_t*> cached_allocation_tracing_state_table_ =
561 kUnboxedFieldBitmapIndex,
563 kAllocationTracingStateIndex,
565#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
578#elif defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
586 CidIndexedTable<ClassIdTagType, ClassPtr, uint32_t, UnboxedFieldBitmap>
592 kTracingDisabled = 0,
593 kTraceAllocationBit = (1 << 0),
594 kCollectInstancesBit = (1 << 1),
598 CidIndexedTable<classid_t, ClassPtr> top_level_classes_;
static bool apply(Pass *pass, SkRecord *record)
const T & At(intptr_t index) const
void ShrinkTo(intptr_t new_num_cids)
void CopyFrom(const CidIndexedTable &other)
intptr_t num_cids() const
intptr_t capacity() const
CidIndexedTable(ClassTableAllocator *allocator)
void Remap(intptr_t *old_to_new_cid)
bool IsValidIndex(intptr_t index) const
intptr_t AddRow(bool *did_grow)
void AllocateIndex(intptr_t index, bool *did_grow)
void SetNumCidsAndCapacity(intptr_t new_num_cids, intptr_t new_capacity)
const T * GetColumn() const
CidIndexedTable(const CidIndexedTable &other)=delete
void Free(ClassTable *table)
T * Realloc(T *array, intptr_t size, intptr_t new_size)
T * Clone(T *array, intptr_t size)
T * AllocZeroInitialized(intptr_t len)
void SetTraceAllocationFor(intptr_t cid, bool trace)
void SetUserVisibleNameFor(intptr_t cid, const char *name)
intptr_t Capacity() const
friend Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *group, const char *name, char **error)
void Register(const Class &cls)
void CopySizesFromClassObjects()
ClassPtr At(intptr_t cid) const
static intptr_t allocation_tracing_state_table_offset()
void SetAt(intptr_t index, ClassPtr raw_cls)
bool ShouldTraceAllocationFor(intptr_t cid)
bool IsValidIndex(intptr_t cid) const
static intptr_t CidFromTopLevelIndex(intptr_t index)
void UpdateCachedAllocationTracingStateTablePointer()
bool CollectInstancesFor(intptr_t cid)
void AllocationProfilePrintJSON(JSONStream *stream, bool internal)
friend class ClassTableAllocator
intptr_t NumTopLevelCids() const
void UnregisterTopLevel(intptr_t index)
int32_t SizeAt(intptr_t index) const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void UpdateClassSize(intptr_t cid, ClassPtr raw_cls)
UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t cid) const
void SetCollectInstancesFor(intptr_t cid, bool trace)
void Remap(intptr_t *old_to_new_cids)
void RegisterTopLevel(const Class &cls)
bool HasValidClassAt(intptr_t cid) const
void SetUnboxedFieldsMapAt(intptr_t cid, UnboxedFieldBitmap map)
ClassTable * Clone() const
void PopulateUserVisibleNames()
void AllocateIndex(intptr_t index)
ClassTable(ClassTableAllocator *allocator)
static intptr_t IndexFromTopLevelCid(intptr_t cid)
static bool IsTopLevelCid(intptr_t cid)
void PrintToJSONObject(JSONObject *object)
const char * UserVisibleNameFor(intptr_t cid)
static DART_FORCE_INLINE constexpr intptr_t Length()
DART_FORCE_INLINE uint64_t Value() const
DART_FORCE_INLINE bool Get(intptr_t position) const
UnboxedFieldBitmap & operator=(const UnboxedFieldBitmap &)=default
DART_FORCE_INLINE void Reset()
DART_FORCE_INLINE void Clear(intptr_t position)
DART_FORCE_INLINE bool IsEmpty() const
UnboxedFieldBitmap(uint64_t bitmap)
UnboxedFieldBitmap(const UnboxedFieldBitmap &)=default
DART_FORCE_INLINE void Set(intptr_t position)
static constexpr T Maximum(T x, T y)
static char * StrDup(const char *s)
static constexpr bool TestBit(T mask, size_t position)
const uint8_t uint32_t uint32_t GError ** error
void * malloc(size_t size)
static ObjectPtr Ptr(ObjectPtr obj)
constexpr intptr_t kBitsPerByte
void * calloc(size_t n, size_t size)
constexpr intptr_t kWordSize
static constexpr intptr_t kClassIdTagMax
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
static intptr_t elements_start_offset()
static constexpr intptr_t kElementSize
#define NOT_IN_PRODUCT(code)
#define OFFSET_OF(type, field)