Flutter Engine
The Flutter Engine
Classes | Public Member Functions | List of all members
dart::Serializer Class Reference
Inheritance diagram for dart::Serializer:
dart::ThreadStackResource dart::StackResource

Classes

class  WritingObjectScope
 

Public Member Functions

 Serializer (Thread *thread, Snapshot::Kind kind, NonStreamingWriteStream *stream, ImageWriter *image_writer_, bool vm_, V8SnapshotProfileWriter *profile_writer=nullptr)
 
 ~Serializer ()
 
void AddBaseObject (ObjectPtr base_object, const char *type=nullptr, const char *name=nullptr)
 
intptr_t AssignRef (ObjectPtr object)
 
intptr_t AssignArtificialRef (ObjectPtr object=nullptr)
 
intptr_t GetCodeIndex (CodePtr code)
 
void Push (ObjectPtr object, intptr_t cid_override=kIllegalCid)
 
void PushWeak (ObjectPtr object)
 
void AddUntracedRef ()
 
void Trace (ObjectPtr object, intptr_t cid_override)
 
void UnexpectedObject (ObjectPtr object, const char *message)
 
SerializationClusterNewClusterForClass (intptr_t cid, bool is_canonical)
 
void ReserveHeader ()
 
void FillHeader (Snapshot::Kind kind)
 
void WriteVersionAndFeatures (bool is_vm_snapshot)
 
ZoneGrowableArray< Object * > * Serialize (SerializationRoots *roots)
 
void PrintSnapshotSizes ()
 
NonStreamingWriteStreamstream ()
 
intptr_t bytes_written ()
 
intptr_t bytes_heap_allocated ()
 
template<typename T >
void Write (T value)
 
void WriteRefId (intptr_t value)
 
void WriteUnsigned (intptr_t value)
 
void WriteUnsigned64 (uint64_t value)
 
void WriteWordWith32BitWrites (uword value)
 
void WriteBytes (const void *addr, intptr_t len)
 
void Align (intptr_t alignment, intptr_t offset=0)
 
V8SnapshotProfileWriter::ObjectId GetProfileId (ObjectPtr object) const
 
V8SnapshotProfileWriter::ObjectId GetProfileId (intptr_t ref) const
 
void WriteRootRef (ObjectPtr object, const char *name=nullptr)
 
void AttributeReference (ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
 
void AttributeElementRef (ObjectPtr object, intptr_t index)
 
void WriteElementRef (ObjectPtr object, intptr_t index)
 
void AttributePropertyRef (ObjectPtr object, const char *property)
 
void WritePropertyRef (ObjectPtr object, const char *property)
 
void WriteOffsetRef (ObjectPtr object, intptr_t offset)
 
template<typename T , typename... P>
void WriteFromTo (T obj, P &&... args)
 
template<typename T >
DART_NOINLINE void WriteRange (ObjectPtr obj, T from, T to)
 
template<typename T , typename... P>
void PushFromTo (T obj, P &&... args)
 
template<typename T >
DART_NOINLINE void PushRange (ObjectPtr obj, T from, T to)
 
void WriteTokenPosition (TokenPosition pos)
 
void WriteCid (intptr_t cid)
 
void PrepareInstructions (const CompressedStackMaps &canonical_smap)
 
void WriteInstructions (InstructionsPtr instr, uint32_t unchecked_offset, CodePtr code, bool deferred)
 
uint32_t GetDataOffset (ObjectPtr object) const
 
void TraceDataOffset (uint32_t offset)
 
intptr_t GetDataSize () const
 
void WriteDispatchTable (const Array &entries)
 
Heapheap () const
 
Zonezone () const
 
Snapshot::Kind kind () const
 
intptr_t next_ref_index () const
 
void DumpCombinedCodeStatistics ()
 
V8SnapshotProfileWriterprofile_writer () const
 
bool CreateArtificialNodeIfNeeded (ObjectPtr obj)
 
bool InCurrentLoadingUnitOrRoot (ObjectPtr obj)
 
void RecordDeferredCode (CodePtr ptr)
 
GrowableArray< LoadingUnitSerializationData * > * loading_units () const
 
void set_loading_units (GrowableArray< LoadingUnitSerializationData * > *units)
 
intptr_t current_loading_unit_id () const
 
void set_current_loading_unit_id (intptr_t id)
 
intptr_t RefId (ObjectPtr object) const
 
intptr_t UnsafeRefId (ObjectPtr object) const
 
bool IsReachable (ObjectPtr object) const
 
bool HasRef (ObjectPtr object) const
 
bool HasArtificialRef (ObjectPtr object) const
 
bool HasProfileNode (ObjectPtr object) const
 
bool IsWritten (ObjectPtr object) const
 
- Public Member Functions inherited from dart::ThreadStackResource
 ThreadStackResource (Thread *T)
 
 ~ThreadStackResource ()
 
Threadthread () const
 
Isolateisolate () const
 
IsolateGroupisolate_group () const
 
- Public Member Functions inherited from dart::StackResource
 StackResource (ThreadState *thread)
 
virtual ~StackResource ()
 
ThreadStatethread () const
 

Additional Inherited Members

- Static Public Member Functions inherited from dart::StackResource
static void Unwind (ThreadState *thread)
 
static void UnwindAbove (ThreadState *thread, StackResource *new_top)
 

Detailed Description

Definition at line 306 of file app_snapshot.cc.

Constructor & Destructor Documentation

◆ Serializer()

dart::Serializer::Serializer ( Thread thread,
Snapshot::Kind  kind,
NonStreamingWriteStream stream,
ImageWriter image_writer_,
bool  vm_,
V8SnapshotProfileWriter profile_writer = nullptr 
)

Definition at line 7422 of file app_snapshot.cc.

7429 heap_(thread->isolate_group()->heap()),
7430 zone_(thread->zone()),
7431 kind_(kind),
7432 stream_(stream),
7433 image_writer_(image_writer),
7434 canonical_clusters_by_cid_(nullptr),
7435 clusters_by_cid_(nullptr),
7436 stack_(),
7437 num_cids_(0),
7438 num_tlc_cids_(0),
7439 num_base_objects_(0),
7440 num_written_objects_(0),
7441 next_ref_index_(kFirstReference),
7442 vm_(vm),
7443 profile_writer_(profile_writer)
7444#if defined(SNAPSHOT_BACKTRACE)
7445 ,
7446 current_parent_(Object::null()),
7447 parent_pairs_()
7448#endif
7449#if defined(DART_PRECOMPILER)
7450 ,
7451 deduped_instructions_sources_(zone_)
7452#endif
7453{
7454 num_cids_ = thread->isolate_group()->class_table()->NumCids();
7455 num_tlc_cids_ = thread->isolate_group()->class_table()->NumTopLevelCids();
7456 canonical_clusters_by_cid_ = new SerializationCluster*[num_cids_];
7457 for (intptr_t i = 0; i < num_cids_; i++) {
7458 canonical_clusters_by_cid_[i] = nullptr;
7459 }
7460 clusters_by_cid_ = new SerializationCluster*[num_cids_];
7461 for (intptr_t i = 0; i < num_cids_; i++) {
7462 clusters_by_cid_[i] = nullptr;
7463 }
7464 if (profile_writer_ != nullptr) {
7465 offsets_table_ = new (zone_) OffsetsTable(zone_);
7466 }
7467}
intptr_t NumTopLevelCids() const
Definition: class_table.h:450
intptr_t NumCids() const
Definition: class_table.h:447
Heap * heap() const
Definition: isolate.h:296
ClassTable * class_table() const
Definition: isolate.h:496
static ObjectPtr null()
Definition: object.h:433
NonStreamingWriteStream * stream()
Snapshot::Kind kind() const
V8SnapshotProfileWriter * profile_writer() const
Zone * zone() const
Definition: thread_state.h:37
IsolateGroup * isolate_group() const
Definition: thread.h:541
static constexpr intptr_t kFirstReference

◆ ~Serializer()

dart::Serializer::~Serializer ( )

Definition at line 7469 of file app_snapshot.cc.

7469 {
7470 delete[] canonical_clusters_by_cid_;
7471 delete[] clusters_by_cid_;
7472}

Member Function Documentation

◆ AddBaseObject()

void dart::Serializer::AddBaseObject ( ObjectPtr  base_object,
const char *  type = nullptr,
const char *  name = nullptr 
)

Definition at line 7474 of file app_snapshot.cc.

7476 {
7477 // Don't assign references to the discarded code.
7478 const bool is_discarded_code = base_object->IsHeapObject() &&
7479 base_object->IsCode() &&
7480 Code::IsDiscarded(Code::RawCast(base_object));
7481 if (!is_discarded_code) {
7482 AssignRef(base_object);
7483 }
7484 num_base_objects_++;
7485
7486 if ((profile_writer_ != nullptr) && (type != nullptr)) {
7487 const auto& profile_id = GetProfileId(base_object);
7488 profile_writer_->SetObjectTypeAndName(profile_id, type, name);
7489 profile_writer_->AddRoot(profile_id);
7490 }
7491}
GLenum type
static bool IsDiscarded(const CodePtr code)
Definition: object.h:6834
static ObjectPtr RawCast(ObjectPtr obj)
Definition: object.h:325
intptr_t AssignRef(ObjectPtr object)
V8SnapshotProfileWriter::ObjectId GetProfileId(ObjectPtr object) const
void SetObjectTypeAndName(const ObjectId &object_id, const char *type, const char *name)
void AddRoot(const ObjectId &object_id, const char *name=nullptr)
const char *const name

◆ AddUntracedRef()

void dart::Serializer::AddUntracedRef ( )
inline

Definition at line 328 of file app_snapshot.cc.

328{ num_written_objects_++; }

◆ Align()

void dart::Serializer::Align ( intptr_t  alignment,
intptr_t  offset = 0 
)
inline

Definition at line 424 of file app_snapshot.cc.

424 {
425 stream_->Align(alignment, offset);
426 }
intptr_t Align(intptr_t alignment, intptr_t offset=0)
Definition: datastream.h:341
SeparatedVector2 offset

◆ AssignArtificialRef()

intptr_t dart::Serializer::AssignArtificialRef ( ObjectPtr  object = nullptr)

Definition at line 7507 of file app_snapshot.cc.

7507 {
7508 const intptr_t ref = -(next_ref_index_++);
7510 if (object != nullptr) {
7511 ASSERT(!object.IsHeapObject() || !object.IsInstructions());
7512 ASSERT(heap_->GetObjectId(object) == kUnreachableReference);
7513 heap_->SetObjectId(object, ref);
7514 ASSERT(heap_->GetObjectId(object) == ref);
7515 }
7516 return ref;
7517}
intptr_t GetObjectId(ObjectPtr raw_obj) const
Definition: heap.h:197
void SetObjectId(ObjectPtr raw_obj, intptr_t object_id)
Definition: heap.h:193
#define ASSERT(E)
static constexpr intptr_t kUnreachableReference
static constexpr bool IsArtificialReference(intptr_t ref)

◆ AssignRef()

intptr_t dart::Serializer::AssignRef ( ObjectPtr  object)

Definition at line 7493 of file app_snapshot.cc.

7493 {
7494 ASSERT(IsAllocatedReference(next_ref_index_));
7495
7496 // The object id weak table holds image offsets for Instructions instead
7497 // of ref indices.
7498 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7499 heap_->SetObjectId(object, next_ref_index_);
7500 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
7501
7502 objects_->Add(&Object::ZoneHandle(object));
7503
7504 return next_ref_index_++;
7505}
static Object & ZoneHandle()
Definition: object.h:419
static constexpr bool IsAllocatedReference(intptr_t ref)

◆ AttributeElementRef()

void dart::Serializer::AttributeElementRef ( ObjectPtr  object,
intptr_t  index 
)
inline

Definition at line 444 of file app_snapshot.cc.

444 {
445 AttributeReference(object,
447 }
void AttributeReference(ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
static Reference Element(intptr_t offset)

◆ AttributePropertyRef()

void dart::Serializer::AttributePropertyRef ( ObjectPtr  object,
const char *  property 
)
inline

Definition at line 454 of file app_snapshot.cc.

454 {
455 AttributeReference(object,
457 }
static Reference Property(const char *name)

◆ AttributeReference()

void dart::Serializer::AttributeReference ( ObjectPtr  object,
const V8SnapshotProfileWriter::Reference reference 
)

Definition at line 7543 of file app_snapshot.cc.

7545 {
7546 if (profile_writer_ == nullptr) return;
7547 const auto& object_id = GetProfileId(object);
7548#if defined(DART_PRECOMPILER)
7549 if (object->IsHeapObject() && object->IsWeakSerializationReference()) {
7550 auto const wsr = WeakSerializationReference::RawCast(object);
7551 auto const target = wsr->untag()->target();
7552 const auto& target_id = GetProfileId(target);
7553 if (object_id != target_id) {
7554 const auto& replacement_id = GetProfileId(wsr->untag()->replacement());
7555 ASSERT(object_id == replacement_id);
7556 // The target of the WSR will be replaced in the snapshot, so write
7557 // attributions for both the dropped target and for the replacement.
7558 profile_writer_->AttributeDroppedReferenceTo(
7559 object_currently_writing_.id_, reference, target_id, replacement_id);
7560 return;
7561 }
7562 // The replacement isn't used for this WSR in the snapshot, as either the
7563 // target is strongly referenced or the WSR itself is unreachable, so fall
7564 // through to attributing a reference to the WSR (which shares the profile
7565 // ID of the target).
7566 }
7567#endif
7568 profile_writer_->AttributeReferenceTo(object_currently_writing_.id_,
7569 reference, object_id);
7570}
void AttributeReferenceTo(const ObjectId &from_object_id, const Reference &reference, const ObjectId &to_object_id)
uint32_t * target

◆ bytes_heap_allocated()

intptr_t dart::Serializer::bytes_heap_allocated ( )
inline

Definition at line 359 of file app_snapshot.cc.

359{ return bytes_heap_allocated_; }

◆ bytes_written()

intptr_t dart::Serializer::bytes_written ( )
inline

Definition at line 358 of file app_snapshot.cc.

358{ return stream_->bytes_written(); }
DART_FORCE_INLINE intptr_t bytes_written() const
Definition: datastream.h:338

◆ CreateArtificialNodeIfNeeded()

bool dart::Serializer::CreateArtificialNodeIfNeeded ( ObjectPtr  obj)

Definition at line 7632 of file app_snapshot.cc.

7632 {
7633 ASSERT(profile_writer() != nullptr);
7634
7635 // UnsafeRefId will do lazy reference allocation for WSRs.
7636 intptr_t id = UnsafeRefId(obj);
7638 if (id != kUnreachableReference) {
7639 return IsArtificialReference(id);
7640 }
7641 if (obj->IsHeapObject() && obj->IsWeakSerializationReference()) {
7642 auto const target =
7645 // Since the WSR is unreachable, we can replace its id with whatever the
7646 // ID of the target is, whether real or artificial.
7647 id = heap_->GetObjectId(target);
7648 heap_->SetObjectId(obj, id);
7649 return IsArtificialReference(id);
7650 }
7651
7652 const char* type = nullptr;
7653 const char* name = nullptr;
7654 GrowableArray<std::pair<ObjectPtr, V8SnapshotProfileWriter::Reference>> links;
7655 const classid_t cid = obj->GetClassIdMayBeSmi();
7656 switch (cid) {
7657 // For profiling static call target tables in AOT mode.
7658 case kSmiCid: {
7659 type = "Smi";
7660 break;
7661 }
7662 // For profiling per-code object pools in bare instructions mode.
7663 case kObjectPoolCid: {
7664 type = "ObjectPool";
7665 auto const pool = ObjectPool::RawCast(obj);
7666 for (intptr_t i = 0; i < pool->untag()->length_; i++) {
7667 uint8_t bits = pool->untag()->entry_bits()[i];
7669 ObjectPool::EntryType::kTaggedObject) {
7670 auto const elem = pool->untag()->data()[i].raw_obj_;
7671 // Elements should be reachable from the global object pool.
7672 ASSERT(HasRef(elem));
7674 }
7675 }
7676 break;
7677 }
7678 // For profiling static call target tables and the dispatch table in AOT.
7679 case kImmutableArrayCid:
7680 case kArrayCid: {
7681 type = "Array";
7682 auto const array = Array::RawCast(obj);
7683 for (intptr_t i = 0, n = Smi::Value(array->untag()->length()); i < n;
7684 i++) {
7685 ObjectPtr elem = array->untag()->element(i);
7687 }
7688 break;
7689 }
7690 // For profiling the dispatch table.
7691 case kCodeCid: {
7692 type = "Code";
7693 auto const code = Code::RawCast(obj);
7695 links.Add({code->untag()->owner(),
7697 break;
7698 }
7699 case kFunctionCid: {
7700 FunctionPtr func = static_cast<FunctionPtr>(obj);
7701 type = "Function";
7703 func);
7704 links.Add({func->untag()->owner(),
7706 ObjectPtr data = func->untag()->data();
7707 if (data->GetClassId() == kClosureDataCid) {
7708 links.Add(
7710 }
7711 break;
7712 }
7713 case kClosureDataCid: {
7714 auto data = static_cast<ClosureDataPtr>(obj);
7715 type = "ClosureData";
7716 links.Add(
7717 {data->untag()->parent_function(),
7719 break;
7720 }
7721 case kClassCid: {
7722 ClassPtr cls = static_cast<ClassPtr>(obj);
7723 type = "Class";
7724 name = String::ToCString(thread(), cls->untag()->name());
7725 links.Add({cls->untag()->library(),
7727 break;
7728 }
7729 case kPatchClassCid: {
7730 PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj);
7731 type = "PatchClass";
7732 links.Add(
7733 {patch_cls->untag()->wrapped_class(),
7735 break;
7736 }
7737 case kLibraryCid: {
7738 LibraryPtr lib = static_cast<LibraryPtr>(obj);
7739 type = "Library";
7740 name = String::ToCString(thread(), lib->untag()->url());
7741 break;
7742 }
7743 case kFunctionTypeCid: {
7744 type = "FunctionType";
7745 break;
7746 };
7747 case kRecordTypeCid: {
7748 type = "RecordType";
7749 break;
7750 };
7751 default:
7752 FATAL("Request to create artificial node for object with cid %d", cid);
7753 }
7754
7755 id = AssignArtificialRef(obj);
7756 Serializer::WritingObjectScope scope(this, type, obj, name);
7757 for (const auto& link : links) {
7759 AttributeReference(link.first, link.second);
7760 }
7761 return true;
7762}
AutoreleasePool pool
static constexpr T decode(S value)
Definition: bitfield.h:171
static const char * MakeDisambiguatedCodeName(Serializer *s, CodePtr c)
static const char * MakeDisambiguatedFunctionName(Serializer *s, FunctionPtr f)
UntaggedObject * untag() const
virtual const char * ToCString() const
Definition: object.h:366
bool HasRef(ObjectPtr object) const
intptr_t AssignArtificialRef(ObjectPtr object=nullptr)
intptr_t UnsafeRefId(ObjectPtr object) const
bool CreateArtificialNodeIfNeeded(ObjectPtr obj)
intptr_t Value() const
Definition: object.h:9990
#define FATAL(error)
def link(from_root, to_root)
Definition: dart_pkg.py:44
int32_t classid_t
Definition: globals.h:524
static constexpr intptr_t kUnallocatedReference
const intptr_t cid
static int8_t data[kExtLength]

◆ current_loading_unit_id()

intptr_t dart::Serializer::current_loading_unit_id ( ) const
inline

Definition at line 551 of file app_snapshot.cc.

551{ return current_loading_unit_id_; }

◆ DumpCombinedCodeStatistics()

void dart::Serializer::DumpCombinedCodeStatistics ( )

◆ FillHeader()

void dart::Serializer::FillHeader ( Snapshot::Kind  kind)
inline

Definition at line 345 of file app_snapshot.cc.

345 {
346 Snapshot* header = reinterpret_cast<Snapshot*>(stream_->buffer());
347 header->set_magic();
348 header->set_length(stream_->bytes_written());
349 header->set_kind(kind);
350 }
uint8_t * buffer() const
Definition: datastream.h:615
static const char header[]
Definition: skpbench.cpp:88

◆ GetCodeIndex()

intptr_t dart::Serializer::GetCodeIndex ( CodePtr  code)

◆ GetDataOffset()

uint32_t dart::Serializer::GetDataOffset ( ObjectPtr  object) const

Definition at line 8379 of file app_snapshot.cc.

8379 {
8380#if defined(SNAPSHOT_BACKTRACE)
8381 return image_writer_->GetDataOffsetFor(object, ParentOf(object));
8382#else
8383 return image_writer_->GetDataOffsetFor(object);
8384#endif
8385}
uint32_t GetDataOffsetFor(ObjectPtr raw_object)

◆ GetDataSize()

intptr_t dart::Serializer::GetDataSize ( ) const

Definition at line 8387 of file app_snapshot.cc.

8387 {
8388 if (image_writer_ == nullptr) {
8389 return 0;
8390 }
8391 return image_writer_->data_size();
8392}
intptr_t data_size() const

◆ GetProfileId() [1/2]

V8SnapshotProfileWriter::ObjectId dart::Serializer::GetProfileId ( intptr_t  ref) const

Definition at line 7534 of file app_snapshot.cc.

7535 {
7536 if (IsArtificialReference(heap_id)) {
7537 return {IdSpace::kArtificial, -heap_id};
7538 }
7539 ASSERT(IsAllocatedReference(heap_id));
7540 return {IdSpace::kSnapshot, heap_id};
7541}

◆ GetProfileId() [2/2]

V8SnapshotProfileWriter::ObjectId dart::Serializer::GetProfileId ( ObjectPtr  object) const

Definition at line 7527 of file app_snapshot.cc.

7528 {
7529 // Instructions are handled separately.
7530 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7531 return GetProfileId(UnsafeRefId(object));
7532}

◆ HasArtificialRef()

bool dart::Serializer::HasArtificialRef ( ObjectPtr  object) const
inline

Definition at line 574 of file app_snapshot.cc.

574 {
575 return IsArtificialReference(heap_->GetObjectId(object));
576 }

◆ HasProfileNode()

bool dart::Serializer::HasProfileNode ( ObjectPtr  object) const
inline

Definition at line 579 of file app_snapshot.cc.

579 {
580 ASSERT(profile_writer_ != nullptr);
581 return profile_writer_->HasId(GetProfileId(object));
582 }
bool HasId(const ObjectId &object_id)

◆ HasRef()

bool dart::Serializer::HasRef ( ObjectPtr  object) const
inline

Definition at line 570 of file app_snapshot.cc.

570 {
571 return IsAllocatedReference(heap_->GetObjectId(object));
572 }

◆ heap()

Heap * dart::Serializer::heap ( ) const
inline

Definition at line 528 of file app_snapshot.cc.

528{ return heap_; }

◆ InCurrentLoadingUnitOrRoot()

bool dart::Serializer::InCurrentLoadingUnitOrRoot ( ObjectPtr  obj)

Definition at line 8004 of file app_snapshot.cc.

8004 {
8005 if (loading_units_ == nullptr) return true;
8006
8007 intptr_t unit_id = heap_->GetLoadingUnit(obj);
8008 if (unit_id == WeakTable::kNoValue) {
8009 FATAL("Missing loading unit assignment: %s\n",
8010 Object::Handle(obj).ToCString());
8011 }
8012 return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
8013}
intptr_t GetLoadingUnit(ObjectPtr raw_obj) const
Definition: heap.h:207
static constexpr intptr_t kRootId
Definition: object.h:7969
static Object & Handle()
Definition: object.h:407
static constexpr intptr_t kNoValue
Definition: weak_table.h:18

◆ IsReachable()

bool dart::Serializer::IsReachable ( ObjectPtr  object) const
inline

Definition at line 566 of file app_snapshot.cc.

566 {
567 return IsReachableReference(heap_->GetObjectId(object));
568 }
static constexpr bool IsReachableReference(intptr_t ref)

◆ IsWritten()

bool dart::Serializer::IsWritten ( ObjectPtr  object) const
inline

Definition at line 583 of file app_snapshot.cc.

583 {
584 return heap_->GetObjectId(object) > num_base_objects_;
585 }

◆ kind()

Snapshot::Kind dart::Serializer::kind ( ) const
inline

Definition at line 530 of file app_snapshot.cc.

530{ return kind_; }

◆ loading_units()

GrowableArray< LoadingUnitSerializationData * > * dart::Serializer::loading_units ( ) const
inline

Definition at line 545 of file app_snapshot.cc.

545 {
546 return loading_units_;
547 }

◆ NewClusterForClass()

SerializationCluster * dart::Serializer::NewClusterForClass ( intptr_t  cid,
bool  is_canonical 
)

Definition at line 7833 of file app_snapshot.cc.

7834 {
7835#if defined(DART_PRECOMPILED_RUNTIME)
7836 UNREACHABLE();
7837 return nullptr;
7838#else
7839 Zone* Z = zone_;
7840 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
7841 Push(isolate_group()->class_table()->At(cid));
7842 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7843 }
7845 return new (Z) TypedDataViewSerializationCluster(cid);
7846 }
7848 return new (Z) ExternalTypedDataSerializationCluster(cid);
7849 }
7850 if (IsTypedDataClassId(cid)) {
7851 return new (Z) TypedDataSerializationCluster(cid);
7852 }
7853
7854#if !defined(DART_COMPRESSED_POINTERS)
7855 // Sometimes we write memory images for read-only objects that contain no
7856 // pointers. These can be mmapped directly, needing no relocation, and added
7857 // to the list of heap pages. This gives us lazy/demand paging from the OS.
7858 // We do not do this for snapshots without code to keep snapshots portable
7859 // between machines with different word sizes. We do not do this when we use
7860 // compressed pointers because we cannot always control the load address of
7861 // the memory image, and it might be outside the 4GB region addressable by
7862 // compressed pointers.
7863 if (Snapshot::IncludesCode(kind_)) {
7864 if (auto const type = ReadOnlyObjectType(cid)) {
7865 return new (Z) RODataSerializationCluster(Z, type, cid, is_canonical);
7866 }
7867 }
7868#endif
7869
7870 const bool cluster_represents_canonical_set =
7871 current_loading_unit_id_ <= LoadingUnit::kRootId && is_canonical;
7872
7873 switch (cid) {
7874 case kClassCid:
7875 return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
7876 case kTypeParametersCid:
7877 return new (Z) TypeParametersSerializationCluster();
7878 case kTypeArgumentsCid:
7879 return new (Z) TypeArgumentsSerializationCluster(
7880 is_canonical, cluster_represents_canonical_set);
7881 case kPatchClassCid:
7882 return new (Z) PatchClassSerializationCluster();
7883 case kFunctionCid:
7884 return new (Z) FunctionSerializationCluster();
7885 case kClosureDataCid:
7886 return new (Z) ClosureDataSerializationCluster();
7887 case kFfiTrampolineDataCid:
7888 return new (Z) FfiTrampolineDataSerializationCluster();
7889 case kFieldCid:
7890 return new (Z) FieldSerializationCluster();
7891 case kScriptCid:
7892 return new (Z) ScriptSerializationCluster();
7893 case kLibraryCid:
7894 return new (Z) LibrarySerializationCluster();
7895 case kNamespaceCid:
7896 return new (Z) NamespaceSerializationCluster();
7897 case kKernelProgramInfoCid:
7898 return new (Z) KernelProgramInfoSerializationCluster();
7899 case kCodeCid:
7900 return new (Z) CodeSerializationCluster(heap_);
7901 case kObjectPoolCid:
7902 return new (Z) ObjectPoolSerializationCluster();
7903 case kPcDescriptorsCid:
7904 return new (Z) PcDescriptorsSerializationCluster();
7905 case kCodeSourceMapCid:
7906 return new (Z) CodeSourceMapSerializationCluster();
7907 case kCompressedStackMapsCid:
7908 return new (Z) CompressedStackMapsSerializationCluster();
7909 case kExceptionHandlersCid:
7910 return new (Z) ExceptionHandlersSerializationCluster();
7911 case kContextCid:
7912 return new (Z) ContextSerializationCluster();
7913 case kContextScopeCid:
7914 return new (Z) ContextScopeSerializationCluster();
7915 case kUnlinkedCallCid:
7916 return new (Z) UnlinkedCallSerializationCluster();
7917 case kICDataCid:
7918 return new (Z) ICDataSerializationCluster();
7919 case kMegamorphicCacheCid:
7920 return new (Z) MegamorphicCacheSerializationCluster();
7921 case kSubtypeTestCacheCid:
7922 return new (Z) SubtypeTestCacheSerializationCluster();
7923 case kLoadingUnitCid:
7924 return new (Z) LoadingUnitSerializationCluster();
7925 case kLanguageErrorCid:
7926 return new (Z) LanguageErrorSerializationCluster();
7927 case kUnhandledExceptionCid:
7928 return new (Z) UnhandledExceptionSerializationCluster();
7929 case kLibraryPrefixCid:
7930 return new (Z) LibraryPrefixSerializationCluster();
7931 case kTypeCid:
7932 return new (Z) TypeSerializationCluster(is_canonical,
7933 cluster_represents_canonical_set);
7934 case kFunctionTypeCid:
7935 return new (Z) FunctionTypeSerializationCluster(
7936 is_canonical, cluster_represents_canonical_set);
7937 case kRecordTypeCid:
7938 return new (Z) RecordTypeSerializationCluster(
7939 is_canonical, cluster_represents_canonical_set);
7940 case kTypeParameterCid:
7941 return new (Z) TypeParameterSerializationCluster(
7942 is_canonical, cluster_represents_canonical_set);
7943 case kClosureCid:
7944 return new (Z) ClosureSerializationCluster(is_canonical);
7945 case kMintCid:
7946 return new (Z) MintSerializationCluster(is_canonical);
7947 case kDoubleCid:
7948 return new (Z) DoubleSerializationCluster(is_canonical);
7949 case kInt32x4Cid:
7950 case kFloat32x4Cid:
7951 case kFloat64x2Cid:
7952 return new (Z) Simd128SerializationCluster(cid, is_canonical);
7953 case kGrowableObjectArrayCid:
7954 return new (Z) GrowableObjectArraySerializationCluster();
7955 case kRecordCid:
7956 return new (Z) RecordSerializationCluster(is_canonical);
7957 case kStackTraceCid:
7958 return new (Z) StackTraceSerializationCluster();
7959 case kRegExpCid:
7960 return new (Z) RegExpSerializationCluster();
7961 case kWeakPropertyCid:
7962 return new (Z) WeakPropertySerializationCluster();
7963 case kMapCid:
7964 // We do not have mutable hash maps in snapshots.
7965 UNREACHABLE();
7966 case kConstMapCid:
7967 return new (Z) MapSerializationCluster(is_canonical, kConstMapCid);
7968 case kSetCid:
7969 // We do not have mutable hash sets in snapshots.
7970 UNREACHABLE();
7971 case kConstSetCid:
7972 return new (Z) SetSerializationCluster(is_canonical, kConstSetCid);
7973 case kArrayCid:
7974 return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
7975 case kImmutableArrayCid:
7976 return new (Z)
7977 ArraySerializationCluster(is_canonical, kImmutableArrayCid);
7978 case kWeakArrayCid:
7979 return new (Z) WeakArraySerializationCluster();
7980 case kStringCid:
7981 return new (Z) StringSerializationCluster(
7982 is_canonical, cluster_represents_canonical_set && !vm_);
7983#define CASE_FFI_CID(name) case kFfi##name##Cid:
7985#undef CASE_FFI_CID
7986 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7987 case kDeltaEncodedTypedDataCid:
7988 return new (Z) DeltaEncodedTypedDataSerializationCluster();
7989 case kWeakSerializationReferenceCid:
7990#if defined(DART_PRECOMPILER)
7991 ASSERT(kind_ == Snapshot::kFullAOT);
7992 return new (Z) WeakSerializationReferenceSerializationCluster();
7993#endif
7994 default:
7995 break;
7996 }
7997
7998 // The caller will check for nullptr and provide an error with more context
7999 // than is available here.
8000 return nullptr;
8001#endif // !DART_PRECOMPILED_RUNTIME
8002}
#define CASE_FFI_CID(name)
#define UNREACHABLE()
Definition: assert.h:248
#define Z
#define CLASS_LIST_FFI_TYPE_MARKER(V)
Definition: class_id.h:165
void Push(ObjectPtr object, intptr_t cid_override=kIllegalCid)
static bool IncludesCode(Kind kind)
Definition: snapshot.h:67
IsolateGroup * isolate_group() const
bool IsTypedDataViewClassId(intptr_t index)
Definition: class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
@ kNumPredefinedCids
Definition: class_id.h:257
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447

◆ next_ref_index()

intptr_t dart::Serializer::next_ref_index ( ) const
inline

Definition at line 531 of file app_snapshot.cc.

531{ return next_ref_index_; }

◆ PrepareInstructions()

void dart::Serializer::PrepareInstructions ( const CompressedStackMaps canonical_smap)

Definition at line 8113 of file app_snapshot.cc.

8114 {
8115 if (!Snapshot::IncludesCode(kind())) return;
8116
8117 // Code objects that have identical/duplicate instructions must be adjacent in
8118 // the order that Code objects are written because the encoding of the
8119 // reference from the Code to the Instructions assumes monotonically
8120 // increasing offsets as part of a delta encoding. Also the code order table
8121 // that allows for mapping return addresses back to Code objects depends on
8122 // this sorting.
8123 if (code_cluster_ != nullptr) {
8124 CodeSerializationCluster::Sort(this, code_cluster_->objects());
8125 }
8126 if ((loading_units_ != nullptr) &&
8127 (current_loading_unit_id_ == LoadingUnit::kRootId)) {
8128 for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units_->length();
8129 i++) {
8130 auto unit_objects = loading_units_->At(i)->deferred_objects();
8131 CodeSerializationCluster::Sort(this, unit_objects);
8132 ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
8133 for (intptr_t j = 0; j < unit_objects->length(); j++) {
8134 code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
8135 }
8136 }
8137 }
8138
8139#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8140 if (kind() == Snapshot::kFullAOT) {
8141 // Group the code objects whose instructions are not being deferred in this
8142 // snapshot unit in the order they will be written: first the code objects
8143 // encountered for this first time in this unit being written by the
8144 // CodeSerializationCluster, then code object previously deferred whose
8145 // instructions are now written by UnitSerializationRoots. This order needs
8146 // to be known to finalize bare-instructions-mode's PC-relative calls.
8147 GrowableArray<CodePtr> code_objects;
8148 if (code_cluster_ != nullptr) {
8149 auto in = code_cluster_->objects();
8150 for (intptr_t i = 0; i < in->length(); i++) {
8151 code_objects.Add(in->At(i));
8152 }
8153 }
8154 if (loading_units_ != nullptr) {
8155 auto in =
8156 loading_units_->At(current_loading_unit_id_)->deferred_objects();
8157 for (intptr_t i = 0; i < in->length(); i++) {
8158 code_objects.Add(in->At(i)->ptr());
8159 }
8160 }
8161
8162 GrowableArray<ImageWriterCommand> writer_commands;
8163 RelocateCodeObjects(vm_, &code_objects, &writer_commands);
8164 image_writer_->PrepareForSerialization(&writer_commands);
8165
8166 if (code_objects.length() == 0) {
8167 return;
8168 }
8169
8170 // Build UntaggedInstructionsTable::Data object to be added to the
8171 // read-only data section of the snapshot. It contains:
8172 //
8173 // - a binary search table mapping an Instructions entry point to its
8174 // stack maps (by offset from the beginning of the Data object);
8175 // - followed by stack maps bytes;
8176 // - followed by canonical stack map entries.
8177 //
8178 struct StackMapInfo : public ZoneAllocated {
8179 CompressedStackMapsPtr map;
8180 intptr_t use_count;
8181 uint32_t offset;
8182 };
8183
8184 GrowableArray<StackMapInfo*> stack_maps;
8185 IntMap<StackMapInfo*> stack_maps_info;
8186
8187 // Build code_index_ (which maps Instructions object to the order in
8188 // which they appear in the code section in the end) and collect all
8189 // stack maps.
8190 // We also find the first Instructions object which is going to have
8191 // Code object associated with it. This will allow to reduce the binary
8192 // search space when searching specifically for the code object in runtime.
8193 uint32_t total = 0;
8194 intptr_t not_discarded_count = 0;
8195 uint32_t first_entry_with_code = 0;
8196 for (auto& cmd : writer_commands) {
8198 RELEASE_ASSERT(code_objects[total] ==
8199 cmd.insert_instruction_of_code.code);
8200 ASSERT(!Code::IsDiscarded(cmd.insert_instruction_of_code.code) ||
8201 (not_discarded_count == 0));
8202 if (!Code::IsDiscarded(cmd.insert_instruction_of_code.code)) {
8203 if (not_discarded_count == 0) {
8204 first_entry_with_code = total;
8205 }
8206 not_discarded_count++;
8207 }
8208 total++;
8209
8210 // Update code_index_.
8211 {
8212 const intptr_t instr = static_cast<intptr_t>(
8213 cmd.insert_instruction_of_code.code->untag()->instructions_);
8214 ASSERT(!code_index_.HasKey(instr));
8215 code_index_.Insert(instr, total);
8216 }
8217
8218 // Collect stack maps.
8219 CompressedStackMapsPtr stack_map =
8220 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8221 const intptr_t key = static_cast<intptr_t>(stack_map);
8222
8223 if (stack_maps_info.HasKey(key)) {
8224 stack_maps_info.Lookup(key)->use_count++;
8225 } else {
8226 auto info = new StackMapInfo();
8227 info->map = stack_map;
8228 info->use_count = 1;
8229 stack_maps.Add(info);
8230 stack_maps_info.Insert(key, info);
8231 }
8232 }
8233 }
8234 ASSERT(static_cast<intptr_t>(total) == code_index_.Length());
8235 instructions_table_len_ = not_discarded_count;
8236
8237 // Sort stack maps by usage so that most commonly used stack maps are
8238 // together at the start of the Data object.
8239 stack_maps.Sort([](StackMapInfo* const* a, StackMapInfo* const* b) {
8240 if ((*a)->use_count < (*b)->use_count) return 1;
8241 if ((*a)->use_count > (*b)->use_count) return -1;
8242 return 0;
8243 });
8244
8245 // Build Data object.
8246 MallocWriteStream pc_mapping(4 * KB);
8247
8248 // Write the header out.
8249 {
8251 memset(&header, 0, sizeof(header));
8252 header.length = total;
8253 header.first_entry_with_code = first_entry_with_code;
8254 pc_mapping.WriteFixed<UntaggedInstructionsTable::Data>(header);
8255 }
8256
8257 // Reserve space for the binary search table.
8258 for (auto& cmd : writer_commands) {
8260 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>({0, 0});
8261 }
8262 }
8263
8264 // Now write collected stack maps after the binary search table.
8265 auto write_stack_map = [&](CompressedStackMapsPtr smap) {
8266 const auto flags_and_size = smap->untag()->payload()->flags_and_size();
8267 const auto payload_size =
8269 pc_mapping.WriteFixed<uint32_t>(flags_and_size);
8270 pc_mapping.WriteBytes(smap->untag()->payload()->data(), payload_size);
8271 };
8272
8273 for (auto sm : stack_maps) {
8274 sm->offset = pc_mapping.bytes_written();
8275 write_stack_map(sm->map);
8276 }
8277
8278 // Write canonical entries (if any).
8279 if (!canonical_stack_map_entries.IsNull()) {
8280 auto header = reinterpret_cast<UntaggedInstructionsTable::Data*>(
8281 pc_mapping.buffer());
8282 header->canonical_stack_map_entries_offset = pc_mapping.bytes_written();
8283 write_stack_map(canonical_stack_map_entries.ptr());
8284 }
8285 const auto total_bytes = pc_mapping.bytes_written();
8286
8287 // Now that we have offsets to all stack maps we can write binary
8288 // search table.
8289 pc_mapping.SetPosition(
8290 sizeof(UntaggedInstructionsTable::Data)); // Skip the header.
8291 for (auto& cmd : writer_commands) {
8293 CompressedStackMapsPtr smap =
8294 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8295 const auto offset =
8296 stack_maps_info.Lookup(static_cast<intptr_t>(smap))->offset;
8297 const auto entry = image_writer_->GetTextOffsetFor(
8298 Code::InstructionsOf(cmd.insert_instruction_of_code.code),
8299 cmd.insert_instruction_of_code.code);
8300
8301 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>(
8302 {static_cast<uint32_t>(entry), offset});
8303 }
8304 }
8305 // Restore position so that Steal does not truncate the buffer.
8306 pc_mapping.SetPosition(total_bytes);
8307
8308 intptr_t length = 0;
8309 uint8_t* bytes = pc_mapping.Steal(&length);
8310
8311 instructions_table_rodata_offset_ =
8312 image_writer_->AddBytesToData(bytes, length);
8313 // Attribute all bytes in this object to the root for simplicity.
8314 if (profile_writer_ != nullptr) {
8315 const auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8316 profile_writer_->AttributeReferenceTo(
8319 "<instructions-table-rodata>"),
8320 {offset_space, instructions_table_rodata_offset_});
8321 }
8322 }
8323#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8324}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
void Add(const T &value)
static void Sort(Serializer *s, GrowableArray< CodePtr > *codes)
GrowableArray< CodePtr > * objects()
GrowableArray< CodePtr > * deferred_objects()
static InstructionsPtr InstructionsOf(const CodePtr code)
Definition: object.h:6775
int32_t GetTextOffsetFor(InstructionsPtr instructions, CodePtr code)
void PrepareForSerialization(GrowableArray< ImageWriterCommand > *commands)
uint32_t AddBytesToData(uint8_t *bytes, intptr_t length)
static const ObjectId kArtificialRootId
static bool b
struct MyStruct a[10]
if(end==-1)
size_t length
constexpr intptr_t KB
Definition: globals.h:528
struct PathData * Data(SkPath *path)
Definition: path_ops.cc:52
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
static DecodeResult decode(std::string path)
Definition: png_codec.cpp:124

◆ PrintSnapshotSizes()

void dart::Serializer::PrintSnapshotSizes ( )

Definition at line 8864 of file app_snapshot.cc.

8864 {
8865#if !defined(DART_PRECOMPILED_RUNTIME)
8866 if (FLAG_print_snapshot_sizes_verbose) {
8867 TextBuffer buffer(1024);
8868 // Header, using format sizes matching those below to ensure alignment.
8869 buffer.Printf("%25s", "Cluster");
8870 buffer.Printf(" %6s", "Objs");
8871 buffer.Printf(" %8s", "Size");
8872 buffer.Printf(" %8s", "Fraction");
8873 buffer.Printf(" %10s", "Cumulative");
8874 buffer.Printf(" %8s", "HeapSize");
8875 buffer.Printf(" %5s", "Cid");
8876 buffer.Printf(" %9s", "Canonical");
8877 buffer.AddString("\n");
8878 GrowableArray<SerializationCluster*> clusters_by_size;
8879 for (intptr_t cid = 1; cid < num_cids_; cid++) {
8880 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8881 clusters_by_size.Add(cluster);
8882 }
8883 if (auto const cluster = clusters_by_cid_[cid]) {
8884 clusters_by_size.Add(cluster);
8885 }
8886 }
8887 intptr_t text_size = 0;
8888 if (image_writer_ != nullptr) {
8889 auto const text_object_count = image_writer_->GetTextObjectCount();
8890 text_size = image_writer_->text_size();
8891 intptr_t trampoline_count, trampoline_size;
8892 image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size);
8893 auto const instructions_count = text_object_count - trampoline_count;
8894 auto const instructions_size = text_size - trampoline_size;
8895 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8896 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions"),
8897 instructions_count, instructions_size));
8898 if (trampoline_size > 0) {
8899 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8900 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline"),
8901 trampoline_count, trampoline_size));
8902 }
8903 }
8904 // The dispatch_table_size_ will be 0 if the snapshot did not include a
8905 // dispatch table (i.e., the VM snapshot). For a precompiled isolate
8906 // snapshot, we always serialize at least _one_ byte for the DispatchTable.
8907 if (dispatch_table_size_ > 0) {
8908 const auto& dispatch_table_entries = Array::Handle(
8909 zone_,
8910 isolate_group()->object_store()->dispatch_table_code_entries());
8911 auto const entry_count =
8912 dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length();
8913 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8914 "DispatchTable", entry_count, dispatch_table_size_));
8915 }
8916 if (instructions_table_len_ > 0) {
8917 const intptr_t memory_size =
8919 compiler::target::Array::InstanceSize(instructions_table_len_);
8920 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8921 "InstructionsTable", instructions_table_len_, 0, memory_size));
8922 }
8923 clusters_by_size.Sort(CompareClusters);
8924 double total_size =
8925 static_cast<double>(bytes_written() + GetDataSize() + text_size);
8926 double cumulative_fraction = 0.0;
8927 for (intptr_t i = 0; i < clusters_by_size.length(); i++) {
8928 SerializationCluster* cluster = clusters_by_size[i];
8929 double fraction = static_cast<double>(cluster->size()) / total_size;
8930 cumulative_fraction += fraction;
8931 buffer.Printf("%25s", cluster->name());
8932 buffer.Printf(" %6" Pd "", cluster->num_objects());
8933 buffer.Printf(" %8" Pd "", cluster->size());
8934 buffer.Printf(" %1.6lf", fraction);
8935 buffer.Printf(" %1.8lf", cumulative_fraction);
8936 buffer.Printf(" %8" Pd "", cluster->target_memory_size());
8937 if (cluster->cid() != -1) {
8938 buffer.Printf(" %5" Pd "", cluster->cid());
8939 } else {
8940 buffer.Printf(" %5s", "");
8941 }
8942 if (cluster->is_canonical()) {
8943 buffer.Printf(" %9s", "canonical");
8944 } else {
8945 buffer.Printf(" %9s", "");
8946 }
8947 buffer.AddString("\n");
8948 }
8949 OS::PrintErr("%s", buffer.buffer());
8950 }
8951#endif // !defined(DART_PRECOMPILED_RUNTIME)
8952}
static size_t total_size(SkSBlockAllocator< N > &pool)
intptr_t GetTextObjectCount() const
static const char * TagObjectTypeAsReadOnly(Zone *zone, const char *type)
intptr_t text_size() const
void GetTrampolineInfo(intptr_t *count, intptr_t *size) const
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
intptr_t bytes_written()
intptr_t GetDataSize() const
static int CompareClusters(SerializationCluster *const *a, SerializationCluster *const *b)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
#define Pd
Definition: globals.h:408

◆ profile_writer()

V8SnapshotProfileWriter * dart::Serializer::profile_writer ( ) const
inline

Definition at line 535 of file app_snapshot.cc.

535{ return profile_writer_; }

◆ Push()

void dart::Serializer::Push ( ObjectPtr  object,
intptr_t  cid_override = kIllegalCid 
)

Definition at line 8395 of file app_snapshot.cc.

8395 {
8396 const bool is_code = object->IsHeapObject() && object->IsCode();
8397 if (is_code && !Snapshot::IncludesCode(kind_)) {
8398 return; // Do not trace, will write null.
8399 }
8400
8401 intptr_t id = heap_->GetObjectId(object);
8402 if (id == kUnreachableReference) {
8403 // When discovering the transitive closure of objects reachable from the
8404 // roots we do not trace references, e.g. inside [RawCode], to
8405 // [RawInstructions], since [RawInstructions] doesn't contain any references
8406 // and the serialization code uses an [ImageWriter] for those.
8407 if (object->IsHeapObject() && object->IsInstructions()) {
8408 UnexpectedObject(object,
8409 "Instructions should only be reachable from Code");
8410 }
8411
8412 heap_->SetObjectId(object, kUnallocatedReference);
8413 ASSERT(IsReachableReference(heap_->GetObjectId(object)));
8414 stack_.Add({object, cid_override});
8415 if (!(is_code && Code::IsDiscarded(Code::RawCast(object)))) {
8416 num_written_objects_++;
8417 }
8418#if defined(SNAPSHOT_BACKTRACE)
8419 parent_pairs_.Add(&Object::Handle(zone_, object));
8420 parent_pairs_.Add(&Object::Handle(zone_, current_parent_));
8421#endif
8422 }
8423}
void UnexpectedObject(ObjectPtr object, const char *message)

◆ PushFromTo()

template<typename T , typename... P>
void dart::Serializer::PushFromTo ( T  obj,
P &&...  args 
)
inline

Definition at line 494 of file app_snapshot.cc.

494 {
495 auto* from = obj->untag()->from();
496 auto* to = obj->untag()->to_snapshot(kind(), args...);
497 PushRange(obj, from, to);
498 }
DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args

◆ PushRange()

template<typename T >
DART_NOINLINE void dart::Serializer::PushRange ( ObjectPtr  obj,
T  from,
T  to 
)
inline

Definition at line 501 of file app_snapshot.cc.

501 {
502 for (auto* p = from; p <= to; p++) {
503 Push(p->Decompress(obj->heap_base()));
504 }
505 }

◆ PushWeak()

void dart::Serializer::PushWeak ( ObjectPtr  object)

Definition at line 8425 of file app_snapshot.cc.

8425 {
8426 // The GC considers immediate objects to always be alive. This doesn't happen
8427 // automatically in the serializer because the serializer does not have
8428 // immediate objects: it handles Smis as ref indices like all other objects.
8429 // This visit causes the serializer to reproduce the GC's semantics for
8430 // weakness, which in particular allows the templates in hash_table.h to work
8431 // with weak arrays because the metadata Smis always survive.
8432 if (!object->IsHeapObject() || vm_) {
8433 Push(object);
8434 }
8435}

◆ RecordDeferredCode()

void dart::Serializer::RecordDeferredCode ( CodePtr  ptr)

Definition at line 8015 of file app_snapshot.cc.

8015 {
8016 const intptr_t unit_id = heap_->GetLoadingUnit(code);
8017 ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
8018 (*loading_units_)[unit_id]->AddDeferredObject(code);
8019}

◆ RefId()

intptr_t dart::Serializer::RefId ( ObjectPtr  object) const

Definition at line 7765 of file app_snapshot.cc.

7765 {
7766 auto const id = UnsafeRefId(object);
7767 if (IsAllocatedReference(id)) {
7768 return id;
7769 }
7772 auto& handle = thread()->ObjectHandle();
7773 handle = object;
7774 FATAL("Reference to unreachable object %s", handle.ToCString());
7775}
#define REUSABLE_OBJECT_HANDLESCOPE(thread)
const uintptr_t id

◆ ReserveHeader()

void dart::Serializer::ReserveHeader ( )
inline

Definition at line 340 of file app_snapshot.cc.

340 {
341 // Make room for recording snapshot buffer size.
343 }
DART_FORCE_INLINE void SetPosition(intptr_t value)
Definition: datastream.h:618
static constexpr intptr_t kHeaderSize
Definition: snapshot.h:43

◆ Serialize()

ZoneGrowableArray< Object * > * dart::Serializer::Serialize ( SerializationRoots roots)

Definition at line 8557 of file app_snapshot.cc.

8557 {
8558 // While object_currently_writing_ is initialized to the artificial root, we
8559 // set up a scope to ensure proper flushing to the profile.
8560 Serializer::WritingObjectScope scope(
8562 roots->AddBaseObjects(this);
8563
8564 NoSafepointScope no_safepoint;
8565
8566 roots->PushRoots(this);
8567
8568 // Resolving WeakSerializationReferences and WeakProperties may cause new
8569 // objects to be pushed on the stack, and handling the changes to the stack
8570 // may cause the targets of WeakSerializationReferences and keys of
8571 // WeakProperties to become reachable, so we do this as a fixed point
8572 // computation. Note that reachability is computed monotonically (an object
8573 // can change from not reachable to reachable, but never the reverse), which
8574 // is technically a conservative approximation for WSRs, but doing a strict
8575 // analysis that allows non-monotonic reachability may not halt.
8576 //
8577 // To see this, take a WSR whose replacement causes the target of another WSR
8578 // to become reachable, which then causes the target of the first WSR to
8579 // become reachable, but the only way to reach the target is through the
8580 // target of the second WSR, which was only reachable via the replacement
8581 // the first.
8582 //
8583 // In practice, this case doesn't come up as replacements tend to be either
8584 // null, smis, or singleton objects that do not contain WSRs currently.
8585 while (stack_.length() > 0) {
8586 // Strong references.
8587 while (stack_.length() > 0) {
8588 StackEntry entry = stack_.RemoveLast();
8589 Trace(entry.obj, entry.cid_override);
8590 }
8591
8592 // Ephemeron references.
8593#if defined(DART_PRECOMPILER)
8594 if (auto const cluster = CID_CLUSTER(WeakSerializationReference)) {
8595 cluster->RetraceEphemerons(this);
8596 }
8597#endif
8598 if (auto const cluster = CID_CLUSTER(WeakProperty)) {
8599 cluster->RetraceEphemerons(this);
8600 }
8601 }
8602
8603#if defined(DART_PRECOMPILER)
8604 auto const wsr_cluster = CID_CLUSTER(WeakSerializationReference);
8605 if (wsr_cluster != nullptr) {
8606 // Now that we have computed the reachability fixpoint, we remove the
8607 // count of now-reachable WSRs as they are not actually serialized.
8608 num_written_objects_ -= wsr_cluster->Count(this);
8609 // We don't need to write this cluster, so remove it from consideration.
8610 clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
8611 }
8612 ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
8613#endif
8614
8615 code_cluster_ = CID_CLUSTER(Code);
8616
8617 GrowableArray<SerializationCluster*> clusters;
8618 // The order that PostLoad runs matters for some classes because of
8619 // assumptions during canonicalization, read filling, or post-load filling of
8620 // some classes about what has already been read and/or canonicalized.
8621 // Explicitly add these clusters first, then add the rest ordered by class id.
8622#define ADD_CANONICAL_NEXT(cid) \
8623 if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
8624 clusters.Add(cluster); \
8625 canonical_clusters_by_cid_[cid] = nullptr; \
8626 }
8627#define ADD_NON_CANONICAL_NEXT(cid) \
8628 if (auto const cluster = clusters_by_cid_[cid]) { \
8629 clusters.Add(cluster); \
8630 clusters_by_cid_[cid] = nullptr; \
8631 }
8632 ADD_CANONICAL_NEXT(kOneByteStringCid)
8633 ADD_CANONICAL_NEXT(kTwoByteStringCid)
8634 ADD_CANONICAL_NEXT(kStringCid)
8635 ADD_CANONICAL_NEXT(kMintCid)
8636 ADD_CANONICAL_NEXT(kDoubleCid)
8637 ADD_CANONICAL_NEXT(kTypeParameterCid)
8638 ADD_CANONICAL_NEXT(kTypeCid)
8639 ADD_CANONICAL_NEXT(kTypeArgumentsCid)
8640 // Code cluster should be deserialized before Function as
8641 // FunctionDeserializationCluster::ReadFill uses instructions table
8642 // which is filled in CodeDeserializationCluster::ReadFill.
8643 // Code cluster should also precede ObjectPool as its ReadFill uses
8644 // entry points of stubs.
8645 ADD_NON_CANONICAL_NEXT(kCodeCid)
8646 // The function cluster should be deserialized before any closures, as
8647 // PostLoad for closures caches the entry point found in the function.
8648 ADD_NON_CANONICAL_NEXT(kFunctionCid)
8649 ADD_CANONICAL_NEXT(kClosureCid)
8650#undef ADD_CANONICAL_NEXT
8651#undef ADD_NON_CANONICAL_NEXT
8652 const intptr_t out_of_order_clusters = clusters.length();
8653 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8654 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8655 clusters.Add(cluster);
8656 }
8657 }
8658 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8659 if (auto const cluster = clusters_by_cid_[cid]) {
8660 clusters.Add(clusters_by_cid_[cid]);
8661 }
8662 }
8663 // Put back any taken out temporarily to avoid re-adding them during the loop.
8664 for (intptr_t i = 0; i < out_of_order_clusters; i++) {
8665 const auto& cluster = clusters.At(i);
8666 const intptr_t cid = cluster->cid();
8667 auto const cid_clusters =
8668 cluster->is_canonical() ? canonical_clusters_by_cid_ : clusters_by_cid_;
8669 ASSERT(cid_clusters[cid] == nullptr);
8670 cid_clusters[cid] = cluster;
8671 }
8672
8673 PrepareInstructions(roots->canonicalized_stack_map_entries());
8674
8675 intptr_t num_objects = num_base_objects_ + num_written_objects_;
8676#if defined(ARCH_IS_64_BIT)
8677 if (!Utils::IsInt(32, num_objects)) {
8678 FATAL("Ref overflow");
8679 }
8680#endif
8681
8682 WriteUnsigned(num_base_objects_);
8683 WriteUnsigned(num_objects);
8684 WriteUnsigned(clusters.length());
8685 ASSERT((instructions_table_len_ == 0) || FLAG_precompiled_mode);
8686 WriteUnsigned(instructions_table_len_);
8687 WriteUnsigned(instructions_table_rodata_offset_);
8688
8689 for (SerializationCluster* cluster : clusters) {
8690 cluster->WriteAndMeasureAlloc(this);
8691 bytes_heap_allocated_ += cluster->target_memory_size();
8692#if defined(DEBUG)
8693 Write<int32_t>(next_ref_index_);
8694#endif
8695 }
8696
8697 // We should have assigned a ref to every object we pushed.
8698 ASSERT((next_ref_index_ - 1) == num_objects);
8699 // And recorded them all in [objects_].
8700 ASSERT(objects_->length() == num_objects);
8701
8702#if defined(DART_PRECOMPILER)
8703 if (profile_writer_ != nullptr && wsr_cluster != nullptr) {
8704 // Post-WriteAlloc, we eagerly create artificial nodes for any unreachable
8705 // targets in reachable WSRs if writing a v8 snapshot profile, since they
8706 // will be used in AttributeReference().
8707 //
8708 // Unreachable WSRs may also need artificial nodes, as they may be members
8709 // of other unreachable objects that have artificial nodes in the profile,
8710 // but they are instead lazily handled in CreateArtificialNodeIfNeeded().
8711 wsr_cluster->CreateArtificialTargetNodesIfNeeded(this);
8712 }
8713#endif
8714
8715 for (SerializationCluster* cluster : clusters) {
8716 cluster->WriteAndMeasureFill(this);
8717#if defined(DEBUG)
8718 Write<int32_t>(kSectionMarker);
8719#endif
8720 }
8721
8722 roots->WriteRoots(this);
8723
8724#if defined(DEBUG)
8725 Write<int32_t>(kSectionMarker);
8726#endif
8727
8729
8731
8732 return objects_;
8733}
#define CID_CLUSTER(Type)
#define ADD_CANONICAL_NEXT(cid)
#define ADD_NON_CANONICAL_NEXT(cid)
intptr_t length() const
void ResetObjectIdTable()
Definition: heap.cc:899
Heap * heap() const
void Trace(ObjectPtr object, intptr_t cid_override)
void WriteUnsigned(intptr_t value)
void PrepareInstructions(const CompressedStackMaps &canonical_smap)
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313

◆ set_current_loading_unit_id()

void dart::Serializer::set_current_loading_unit_id ( intptr_t  id)
inline

Definition at line 552 of file app_snapshot.cc.

552 {
553 current_loading_unit_id_ = id;
554 }

◆ set_loading_units()

void dart::Serializer::set_loading_units ( GrowableArray< LoadingUnitSerializationData * > *  units)
inline

Definition at line 548 of file app_snapshot.cc.

548 {
549 loading_units_ = units;
550 }

◆ stream()

NonStreamingWriteStream * dart::Serializer::stream ( )
inline

Definition at line 357 of file app_snapshot.cc.

357{ return stream_; }

◆ Trace()

void dart::Serializer::Trace ( ObjectPtr  object,
intptr_t  cid_override 
)

Definition at line 8437 of file app_snapshot.cc.

8437 {
8438 intptr_t cid;
8439 bool is_canonical;
8440 if (!object->IsHeapObject()) {
8441 // Smis are merged into the Mint cluster because Smis for the writer might
8442 // become Mints for the reader and vice versa.
8443 cid = kMintCid;
8444 is_canonical = true;
8445 } else {
8446 cid = object->GetClassId();
8447 is_canonical = object->untag()->IsCanonical();
8448 }
8449 if (cid_override != kIllegalCid) {
8450 cid = cid_override;
8451 } else if (IsStringClassId(cid)) {
8452 cid = kStringCid;
8453 }
8454
8455 SerializationCluster** cluster_ref =
8456 is_canonical ? &canonical_clusters_by_cid_[cid] : &clusters_by_cid_[cid];
8457 if (*cluster_ref == nullptr) {
8458 *cluster_ref = NewClusterForClass(cid, is_canonical);
8459 if (*cluster_ref == nullptr) {
8460 UnexpectedObject(object, "No serialization cluster defined");
8461 }
8462 }
8463 SerializationCluster* cluster = *cluster_ref;
8464 ASSERT(cluster != nullptr);
8465 if (cluster->is_canonical() != is_canonical) {
8466 FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
8467 cluster->name(), cid,
8468 cluster->is_canonical() ? "marked" : "not marked",
8469 is_canonical ? "should be" : "should not be");
8470 }
8471
8472#if defined(SNAPSHOT_BACKTRACE)
8473 current_parent_ = object;
8474#endif
8475
8476 cluster->Trace(this, object);
8477
8478#if defined(SNAPSHOT_BACKTRACE)
8479 current_parent_ = Object::null();
8480#endif
8481}
SerializationCluster * NewClusterForClass(intptr_t cid, bool is_canonical)
@ kIllegalCid
Definition: class_id.h:214
bool IsStringClassId(intptr_t index)
Definition: class_id.h:350

◆ TraceDataOffset()

void dart::Serializer::TraceDataOffset ( uint32_t  offset)

Definition at line 8366 of file app_snapshot.cc.

8366 {
8367 if (profile_writer_ == nullptr) return;
8368 // ROData cannot be roots.
8369 ASSERT(object_currently_writing_.id_ !=
8371 auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8372 // TODO(sjindel): Give this edge a more appropriate type than element
8373 // (internal, maybe?).
8374 profile_writer_->AttributeReferenceTo(
8375 object_currently_writing_.id_,
8376 V8SnapshotProfileWriter::Reference::Element(0), {offset_space, offset});
8377}

◆ UnexpectedObject()

void dart::Serializer::UnexpectedObject ( ObjectPtr  object,
const char *  message 
)

Definition at line 8483 of file app_snapshot.cc.

8483 {
8484 // Exit the no safepoint scope so we can allocate while printing.
8485 while (thread()->no_safepoint_scope_depth() > 0) {
8487 }
8488 Object& object = Object::Handle(raw_object);
8489 OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n", message,
8490 Snapshot::KindToCString(kind_), static_cast<uword>(object.ptr()),
8491 object.ToCString());
8492#if defined(SNAPSHOT_BACKTRACE)
8493 while (!object.IsNull()) {
8494 object = ParentOf(object);
8495 OS::PrintErr("referenced by 0x%" Px " %s\n",
8496 static_cast<uword>(object.ptr()), object.ToCString());
8497 }
8498#endif
8499 OS::Abort();
8500}
static DART_NORETURN void Abort()
static const char * KindToCString(Kind kind)
Definition: snapshot.cc:12
void DecrementNoSafepointScopeDepth()
Definition: thread.h:733
Win32Message message
DART_EXPORT bool IsNull(Dart_Handle object)
uintptr_t uword
Definition: globals.h:501
#define Px
Definition: globals.h:410

◆ UnsafeRefId()

intptr_t dart::Serializer::UnsafeRefId ( ObjectPtr  object) const

Definition at line 7777 of file app_snapshot.cc.

7777 {
7778 // The object id weak table holds image offsets for Instructions instead
7779 // of ref indices.
7780 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7781 if (!Snapshot::IncludesCode(kind_) &&
7782 object->GetClassIdMayBeSmi() == kCodeCid) {
7783 return RefId(Object::null());
7784 }
7785 auto id = heap_->GetObjectId(object);
7786 if (id != kUnallocatedReference) {
7787 return id;
7788 }
7789 // This is the only case where we may still see unallocated references after
7790 // WriteAlloc is finished.
7791 if (object->IsWeakSerializationReference()) {
7792 // Lazily set the object ID of the WSR to the object which will replace
7793 // it in the snapshot.
7794 auto const wsr = static_cast<WeakSerializationReferencePtr>(object);
7795 // Either the target or the replacement must be allocated, since the
7796 // WSR is reachable.
7797 id = HasRef(wsr->untag()->target()) ? RefId(wsr->untag()->target())
7798 : RefId(wsr->untag()->replacement());
7799 heap_->SetObjectId(wsr, id);
7800 return id;
7801 }
7803 auto& handle = thread()->ObjectHandle();
7804 handle = object;
7805 FATAL("Reference for object %s is unallocated", handle.ToCString());
7806}
intptr_t RefId(ObjectPtr object) const
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558

◆ Write()

template<typename T >
void dart::Serializer::Write ( T  value)
inline

Definition at line 410 of file app_snapshot.cc.

410 {
411 BaseWriteStream::Raw<sizeof(T), T>::Write(stream_, value);
412 }
void Write(T value)
uint8_t value
#define T
Definition: precompiler.cc:65

◆ WriteBytes()

void dart::Serializer::WriteBytes ( const void *  addr,
intptr_t  len 
)
inline

Definition at line 421 of file app_snapshot.cc.

421 {
422 stream_->WriteBytes(addr, len);
423 }
void WriteBytes(const void *addr, intptr_t len)
Definition: datastream.h:424

◆ WriteCid()

void dart::Serializer::WriteCid ( intptr_t  cid)
inline

Definition at line 509 of file app_snapshot.cc.

509 {
511 Write<int32_t>(cid);
512 }
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)

◆ WriteDispatchTable()

void dart::Serializer::WriteDispatchTable ( const Array entries)

Definition at line 8759 of file app_snapshot.cc.

8759 {
8760#if defined(DART_PRECOMPILER)
8761 if (kind() != Snapshot::kFullAOT) return;
8762
8763 // Create an artificial node to which the bytes should be attributed. We
8764 // don't attribute them to entries.ptr(), as we don't want to attribute the
8765 // bytes for printing out a length of 0 to Object::null() when the dispatch
8766 // table is empty.
8767 const intptr_t profile_ref = AssignArtificialRef();
8768 const auto& dispatch_table_profile_id = GetProfileId(profile_ref);
8769 if (profile_writer_ != nullptr) {
8770 profile_writer_->SetObjectTypeAndName(dispatch_table_profile_id,
8771 "DispatchTable", "dispatch_table");
8772 profile_writer_->AddRoot(dispatch_table_profile_id);
8773 }
8774 WritingObjectScope scope(this, dispatch_table_profile_id);
8775 if (profile_writer_ != nullptr) {
8776 // We'll write the Array object as a property of the artificial dispatch
8777 // table node, so Code objects otherwise unreferenced will have it as an
8778 // ancestor.
8779 CreateArtificialNodeIfNeeded(entries.ptr());
8780 AttributePropertyRef(entries.ptr(), "<code entries>");
8781 }
8782
8783 const intptr_t bytes_before = bytes_written();
8784 const intptr_t table_length = entries.IsNull() ? 0 : entries.Length();
8785
8786 ASSERT(table_length <= compiler::target::kWordMax);
8787 WriteUnsigned(table_length);
8788 if (table_length == 0) {
8789 dispatch_table_size_ = bytes_written() - bytes_before;
8790 return;
8791 }
8792
8793 ASSERT(code_cluster_ != nullptr);
8794 // If instructions can be deduped, the code order table in the deserializer
8795 // may not contain all Code objects in the snapshot. Thus, we write the ID
8796 // for the first code object here so we can retrieve it during deserialization
8797 // and calculate the snapshot ID for Code objects from the cluster index.
8798 //
8799 // We could just use the snapshot reference ID of the Code object itself
8800 // instead of the cluster index and avoid this. However, since entries are
8801 // SLEB128 encoded, the size delta for serializing the first ID once is less
8802 // than the size delta of serializing the ID plus kIndexBase for each entry,
8803 // even when Code objects are allocated before all other non-base objects.
8804 //
8805 // We could also map Code objects to the first Code object in the cluster with
8806 // the same entry point and serialize that ID instead, but that loses
8807 // information about which Code object was originally referenced.
8808 WriteUnsigned(code_cluster_->first_ref());
8809
8810 CodePtr previous_code = nullptr;
8811 CodePtr recent[kDispatchTableRecentCount] = {nullptr};
8812 intptr_t recent_index = 0;
8813 intptr_t repeat_count = 0;
8814 for (intptr_t i = 0; i < table_length; i++) {
8815 auto const code = Code::RawCast(entries.At(i));
8816 // First, see if we're repeating the previous entry (invalid, recent, or
8817 // encoded).
8818 if (code == previous_code) {
8819 if (++repeat_count == kDispatchTableMaxRepeat) {
8820 Write(kDispatchTableMaxRepeat);
8821 repeat_count = 0;
8822 }
8823 continue;
8824 }
8825 // Emit any outstanding repeat count before handling the new code value.
8826 if (repeat_count > 0) {
8827 Write(repeat_count);
8828 repeat_count = 0;
8829 }
8830 previous_code = code;
8831 // The invalid entry can be repeated, but is never part of the recent list
8832 // since it already encodes to a single byte..
8833 if (code == Code::null()) {
8834 Write(0);
8835 continue;
8836 }
8837 // Check against the recent entries, and write an encoded reference to
8838 // the recent entry if found.
8839 intptr_t found_index = 0;
8840 for (; found_index < kDispatchTableRecentCount; found_index++) {
8841 if (recent[found_index] == code) break;
8842 }
8843 if (found_index < kDispatchTableRecentCount) {
8844 Write(~found_index);
8845 continue;
8846 }
8847 // We have a non-repeated, non-recent entry, so encode the reference ID of
8848 // the code object and emit that.
8849 auto const code_index = GetCodeIndex(code);
8850 // Use the index in the code cluster, not in the snapshot..
8851 auto const encoded = kDispatchTableIndexBase + code_index;
8853 Write(encoded);
8854 recent[recent_index] = code;
8855 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
8856 }
8857 if (repeat_count > 0) {
8858 Write(repeat_count);
8859 }
8860 dispatch_table_size_ = bytes_written() - bytes_before;
8861#endif // defined(DART_PRECOMPILER)
8862}
intptr_t GetCodeIndex(CodePtr code)
void AttributePropertyRef(ObjectPtr object, const char *property)
constexpr word kWordMax
Definition: runtime_api.h:295

◆ WriteElementRef()

void dart::Serializer::WriteElementRef ( ObjectPtr  object,
intptr_t  index 
)
inline

Definition at line 449 of file app_snapshot.cc.

449 {
450 AttributeElementRef(object, index);
451 WriteRefId(RefId(object));
452 }
void WriteRefId(intptr_t value)
void AttributeElementRef(ObjectPtr object, intptr_t index)

◆ WriteFromTo()

template<typename T , typename... P>
void dart::Serializer::WriteFromTo ( T  obj,
P &&...  args 
)
inline

Definition at line 478 of file app_snapshot.cc.

478 {
479 auto* from = obj->untag()->from();
480 auto* to = obj->untag()->to_snapshot(kind(), args...);
481 WriteRange(obj, from, to);
482 }
DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to)

◆ WriteInstructions()

void dart::Serializer::WriteInstructions ( InstructionsPtr  instr,
uint32_t  unchecked_offset,
CodePtr  code,
bool  deferred 
)

Definition at line 8326 of file app_snapshot.cc.

8329 {
8330 ASSERT(code != Code::null());
8331
8333 if (deferred) {
8334 return;
8335 }
8336
8337 const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code);
8338#if defined(DART_PRECOMPILER)
8339 if (profile_writer_ != nullptr) {
8340 ASSERT(object_currently_writing_.id_ !=
8342 const auto offset_space = vm_ ? IdSpace::kVmText : IdSpace::kIsolateText;
8343 profile_writer_->AttributeReferenceTo(
8344 object_currently_writing_.id_,
8346 {offset_space, offset});
8347 }
8348
8349 if (Code::IsDiscarded(code)) {
8350 // Discarded Code objects are not supported in the vm isolate snapshot.
8351 ASSERT(!vm_);
8352 return;
8353 }
8354
8355 if (FLAG_precompiled_mode) {
8356 const uint32_t payload_info =
8357 (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0);
8358 WriteUnsigned(payload_info);
8359 return;
8360 }
8361#endif
8362 Write<uint32_t>(offset);
8363 WriteUnsigned(unchecked_offset);
8364}
bool HasMonomorphicEntry() const
Definition: object.h:6839
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj)

◆ WriteOffsetRef()

void dart::Serializer::WriteOffsetRef ( ObjectPtr  object,
intptr_t  offset 
)
inline

Definition at line 464 of file app_snapshot.cc.

464 {
465 intptr_t id = RefId(object);
466 WriteRefId(id);
467 if (profile_writer_ != nullptr) {
468 if (auto const property = offsets_table_->FieldNameForOffset(
469 object_currently_writing_.cid_, offset)) {
470 AttributePropertyRef(object, property);
471 } else {
473 }
474 }
475 }
const char * FieldNameForOffset(intptr_t cid, intptr_t offset)

◆ WritePropertyRef()

void dart::Serializer::WritePropertyRef ( ObjectPtr  object,
const char *  property 
)
inline

Definition at line 459 of file app_snapshot.cc.

459 {
460 AttributePropertyRef(object, property);
461 WriteRefId(RefId(object));
462 }

◆ WriteRange()

template<typename T >
DART_NOINLINE void dart::Serializer::WriteRange ( ObjectPtr  obj,
T  from,
T  to 
)
inline

Definition at line 485 of file app_snapshot.cc.

485 {
486 for (auto* p = from; p <= to; p++) {
488 p->Decompress(obj->heap_base()),
489 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(obj->untag()));
490 }
491 }
void WriteOffsetRef(ObjectPtr object, intptr_t offset)

◆ WriteRefId()

void dart::Serializer::WriteRefId ( intptr_t  value)
inline

Definition at line 413 of file app_snapshot.cc.

413{ stream_->WriteRefId(value); }
void WriteRefId(intptr_t value)
Definition: datastream.h:409

◆ WriteRootRef()

void dart::Serializer::WriteRootRef ( ObjectPtr  object,
const char *  name = nullptr 
)
inline

Definition at line 431 of file app_snapshot.cc.

431 {
432 intptr_t id = RefId(object);
433 WriteRefId(id);
434 if (profile_writer_ != nullptr) {
435 profile_writer_->AddRoot(GetProfileId(object), name);
436 }
437 }

◆ WriteTokenPosition()

void dart::Serializer::WriteTokenPosition ( TokenPosition  pos)
inline

Definition at line 507 of file app_snapshot.cc.

507{ Write(pos.Serialize()); }
SkPoint pos

◆ WriteUnsigned()

void dart::Serializer::WriteUnsigned ( intptr_t  value)
inline

Definition at line 414 of file app_snapshot.cc.

414{ stream_->WriteUnsigned(value); }
void WriteUnsigned(T value)
Definition: datastream.h:400

◆ WriteUnsigned64()

void dart::Serializer::WriteUnsigned64 ( uint64_t  value)
inline

Definition at line 415 of file app_snapshot.cc.

415{ stream_->WriteUnsigned(value); }

◆ WriteVersionAndFeatures()

void dart::Serializer::WriteVersionAndFeatures ( bool  is_vm_snapshot)

Definition at line 8522 of file app_snapshot.cc.

8522 {
8523 const char* expected_version = Version::SnapshotString();
8524 ASSERT(expected_version != nullptr);
8525 const intptr_t version_len = strlen(expected_version);
8526 WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len);
8527
8528 char* expected_features =
8529 Dart::FeaturesString(IsolateGroup::Current(), is_vm_snapshot, kind_);
8530 ASSERT(expected_features != nullptr);
8531 const intptr_t features_len = strlen(expected_features);
8532 WriteBytes(reinterpret_cast<const uint8_t*>(expected_features),
8533 features_len + 1);
8534 free(expected_features);
8535}
static char * FeaturesString(IsolateGroup *isolate_group, bool is_vm_snapshot, Snapshot::Kind kind)
Definition: dart.cc:1004
static IsolateGroup * Current()
Definition: isolate.h:539
void WriteBytes(const void *addr, intptr_t len)
static const char * SnapshotString()

◆ WriteWordWith32BitWrites()

void dart::Serializer::WriteWordWith32BitWrites ( uword  value)
inline

Definition at line 417 of file app_snapshot.cc.

417 {
419 }
void WriteWordWith32BitWrites(uword value)
Definition: datastream.h:389

◆ zone()

Zone * dart::Serializer::zone ( ) const
inline

Definition at line 529 of file app_snapshot.cc.

529{ return zone_; }

The documentation for this class was generated from the following file: