Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Public Member Functions | List of all members
dart::Serializer Class Reference
Inheritance diagram for dart::Serializer:
dart::ThreadStackResource dart::StackResource

Classes

class  WritingObjectScope
 

Public Member Functions

 Serializer (Thread *thread, Snapshot::Kind kind, NonStreamingWriteStream *stream, ImageWriter *image_writer_, bool vm_, V8SnapshotProfileWriter *profile_writer=nullptr)
 
 ~Serializer ()
 
void AddBaseObject (ObjectPtr base_object, const char *type=nullptr, const char *name=nullptr)
 
intptr_t AssignRef (ObjectPtr object)
 
intptr_t AssignArtificialRef (ObjectPtr object=nullptr)
 
intptr_t GetCodeIndex (CodePtr code)
 
void Push (ObjectPtr object, intptr_t cid_override=kIllegalCid)
 
void PushWeak (ObjectPtr object)
 
void AddUntracedRef ()
 
void Trace (ObjectPtr object, intptr_t cid_override)
 
void UnexpectedObject (ObjectPtr object, const char *message)
 
SerializationClusterNewClusterForClass (intptr_t cid, bool is_canonical)
 
void ReserveHeader ()
 
void FillHeader (Snapshot::Kind kind)
 
void WriteVersionAndFeatures (bool is_vm_snapshot)
 
ZoneGrowableArray< Object * > * Serialize (SerializationRoots *roots)
 
void PrintSnapshotSizes ()
 
NonStreamingWriteStreamstream ()
 
intptr_t bytes_written ()
 
intptr_t bytes_heap_allocated ()
 
template<typename T >
void Write (T value)
 
void WriteRefId (intptr_t value)
 
void WriteUnsigned (intptr_t value)
 
void WriteUnsigned64 (uint64_t value)
 
void WriteWordWith32BitWrites (uword value)
 
void WriteBytes (const void *addr, intptr_t len)
 
void Align (intptr_t alignment, intptr_t offset=0)
 
V8SnapshotProfileWriter::ObjectId GetProfileId (ObjectPtr object) const
 
V8SnapshotProfileWriter::ObjectId GetProfileId (intptr_t ref) const
 
void WriteRootRef (ObjectPtr object, const char *name=nullptr)
 
void AttributeReference (ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
 
void AttributeElementRef (ObjectPtr object, intptr_t index)
 
void WriteElementRef (ObjectPtr object, intptr_t index)
 
void AttributePropertyRef (ObjectPtr object, const char *property)
 
void WritePropertyRef (ObjectPtr object, const char *property)
 
void WriteOffsetRef (ObjectPtr object, intptr_t offset)
 
template<typename T , typename... P>
void WriteFromTo (T obj, P &&... args)
 
template<typename T >
DART_NOINLINE void WriteRange (ObjectPtr obj, T from, T to)
 
template<typename T , typename... P>
void PushFromTo (T obj, P &&... args)
 
template<typename T >
DART_NOINLINE void PushRange (ObjectPtr obj, T from, T to)
 
void WriteTokenPosition (TokenPosition pos)
 
void WriteCid (intptr_t cid)
 
void PrepareInstructions (const CompressedStackMaps &canonical_smap)
 
void WriteInstructions (InstructionsPtr instr, uint32_t unchecked_offset, CodePtr code, bool deferred)
 
uint32_t GetDataOffset (ObjectPtr object) const
 
void TraceDataOffset (uint32_t offset)
 
intptr_t GetDataSize () const
 
void WriteDispatchTable (const Array &entries)
 
Heapheap () const
 
Zonezone () const
 
Snapshot::Kind kind () const
 
intptr_t next_ref_index () const
 
void DumpCombinedCodeStatistics ()
 
V8SnapshotProfileWriterprofile_writer () const
 
bool CreateArtificialNodeIfNeeded (ObjectPtr obj)
 
bool InCurrentLoadingUnitOrRoot (ObjectPtr obj)
 
void RecordDeferredCode (CodePtr ptr)
 
GrowableArray< LoadingUnitSerializationData * > * loading_units () const
 
void set_loading_units (GrowableArray< LoadingUnitSerializationData * > *units)
 
intptr_t current_loading_unit_id () const
 
void set_current_loading_unit_id (intptr_t id)
 
intptr_t RefId (ObjectPtr object) const
 
intptr_t UnsafeRefId (ObjectPtr object) const
 
bool IsReachable (ObjectPtr object) const
 
bool HasRef (ObjectPtr object) const
 
bool HasArtificialRef (ObjectPtr object) const
 
bool HasProfileNode (ObjectPtr object) const
 
bool IsWritten (ObjectPtr object) const
 
- Public Member Functions inherited from dart::ThreadStackResource
 ThreadStackResource (Thread *T)
 
 ~ThreadStackResource ()
 
Threadthread () const
 
Isolateisolate () const
 
IsolateGroupisolate_group () const
 
- Public Member Functions inherited from dart::StackResource
 StackResource (ThreadState *thread)
 
virtual ~StackResource ()
 
ThreadStatethread () const
 

Additional Inherited Members

- Static Public Member Functions inherited from dart::StackResource
static void Unwind (ThreadState *thread)
 
static void UnwindAbove (ThreadState *thread, StackResource *new_top)
 

Detailed Description

Definition at line 306 of file app_snapshot.cc.

Constructor & Destructor Documentation

◆ Serializer()

dart::Serializer::Serializer ( Thread thread,
Snapshot::Kind  kind,
NonStreamingWriteStream stream,
ImageWriter image_writer_,
bool  vm_,
V8SnapshotProfileWriter profile_writer = nullptr 
)

Definition at line 7390 of file app_snapshot.cc.

7397 heap_(thread->isolate_group()->heap()),
7398 zone_(thread->zone()),
7399 kind_(kind),
7400 stream_(stream),
7401 image_writer_(image_writer),
7402 canonical_clusters_by_cid_(nullptr),
7403 clusters_by_cid_(nullptr),
7404 stack_(),
7405 num_cids_(0),
7406 num_tlc_cids_(0),
7407 num_base_objects_(0),
7408 num_written_objects_(0),
7409 next_ref_index_(kFirstReference),
7410 vm_(vm),
7411 profile_writer_(profile_writer)
7412#if defined(SNAPSHOT_BACKTRACE)
7413 ,
7414 current_parent_(Object::null()),
7415 parent_pairs_()
7416#endif
7417#if defined(DART_PRECOMPILER)
7418 ,
7419 deduped_instructions_sources_(zone_)
7420#endif
7421{
7422 num_cids_ = thread->isolate_group()->class_table()->NumCids();
7423 num_tlc_cids_ = thread->isolate_group()->class_table()->NumTopLevelCids();
7424 canonical_clusters_by_cid_ = new SerializationCluster*[num_cids_];
7425 for (intptr_t i = 0; i < num_cids_; i++) {
7426 canonical_clusters_by_cid_[i] = nullptr;
7427 }
7428 clusters_by_cid_ = new SerializationCluster*[num_cids_];
7429 for (intptr_t i = 0; i < num_cids_; i++) {
7430 clusters_by_cid_[i] = nullptr;
7431 }
7432 if (profile_writer_ != nullptr) {
7433 offsets_table_ = new (zone_) OffsetsTable(zone_);
7434 }
7435}
intptr_t NumTopLevelCids() const
intptr_t NumCids() const
Heap * heap() const
Definition isolate.h:295
ClassTable * class_table() const
Definition isolate.h:491
static ObjectPtr null()
Definition object.h:433
NonStreamingWriteStream * stream()
Snapshot::Kind kind() const
V8SnapshotProfileWriter * profile_writer() const
Zone * zone() const
IsolateGroup * isolate_group() const
Definition thread.h:540
static constexpr intptr_t kFirstReference

◆ ~Serializer()

dart::Serializer::~Serializer ( )

Definition at line 7437 of file app_snapshot.cc.

7437 {
7438 delete[] canonical_clusters_by_cid_;
7439 delete[] clusters_by_cid_;
7440}

Member Function Documentation

◆ AddBaseObject()

void dart::Serializer::AddBaseObject ( ObjectPtr  base_object,
const char *  type = nullptr,
const char *  name = nullptr 
)

Definition at line 7442 of file app_snapshot.cc.

7444 {
7445 // Don't assign references to the discarded code.
7446 const bool is_discarded_code = base_object->IsHeapObject() &&
7447 base_object->IsCode() &&
7448 Code::IsDiscarded(Code::RawCast(base_object));
7449 if (!is_discarded_code) {
7450 AssignRef(base_object);
7451 }
7452 num_base_objects_++;
7453
7454 if ((profile_writer_ != nullptr) && (type != nullptr)) {
7455 const auto& profile_id = GetProfileId(base_object);
7456 profile_writer_->SetObjectTypeAndName(profile_id, type, name);
7457 profile_writer_->AddRoot(profile_id);
7458 }
7459}
static bool IsDiscarded(const CodePtr code)
Definition object.h:6807
static ObjectPtr RawCast(ObjectPtr obj)
Definition object.h:325
intptr_t AssignRef(ObjectPtr object)
V8SnapshotProfileWriter::ObjectId GetProfileId(ObjectPtr object) const
void SetObjectTypeAndName(const ObjectId &object_id, const char *type, const char *name)
void AddRoot(const ObjectId &object_id, const char *name=nullptr)
const char *const name

◆ AddUntracedRef()

void dart::Serializer::AddUntracedRef ( )
inline

Definition at line 328 of file app_snapshot.cc.

328{ num_written_objects_++; }

◆ Align()

void dart::Serializer::Align ( intptr_t  alignment,
intptr_t  offset = 0 
)
inline

Definition at line 424 of file app_snapshot.cc.

424 {
425 stream_->Align(alignment, offset);
426 }
intptr_t Align(intptr_t alignment, intptr_t offset=0)
Definition datastream.h:341
Point offset

◆ AssignArtificialRef()

intptr_t dart::Serializer::AssignArtificialRef ( ObjectPtr  object = nullptr)

Definition at line 7475 of file app_snapshot.cc.

7475 {
7476 const intptr_t ref = -(next_ref_index_++);
7478 if (object != nullptr) {
7479 ASSERT(!object.IsHeapObject() || !object.IsInstructions());
7480 ASSERT(heap_->GetObjectId(object) == kUnreachableReference);
7481 heap_->SetObjectId(object, ref);
7482 ASSERT(heap_->GetObjectId(object) == ref);
7483 }
7484 return ref;
7485}
intptr_t GetObjectId(ObjectPtr raw_obj) const
Definition heap.h:197
void SetObjectId(ObjectPtr raw_obj, intptr_t object_id)
Definition heap.h:193
#define ASSERT(E)
static constexpr intptr_t kUnreachableReference
static constexpr bool IsArtificialReference(intptr_t ref)

◆ AssignRef()

intptr_t dart::Serializer::AssignRef ( ObjectPtr  object)

Definition at line 7461 of file app_snapshot.cc.

7461 {
7462 ASSERT(IsAllocatedReference(next_ref_index_));
7463
7464 // The object id weak table holds image offsets for Instructions instead
7465 // of ref indices.
7466 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7467 heap_->SetObjectId(object, next_ref_index_);
7468 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
7469
7470 objects_->Add(&Object::ZoneHandle(object));
7471
7472 return next_ref_index_++;
7473}
static Object & ZoneHandle()
Definition object.h:419
static constexpr bool IsAllocatedReference(intptr_t ref)

◆ AttributeElementRef()

void dart::Serializer::AttributeElementRef ( ObjectPtr  object,
intptr_t  index 
)
inline

Definition at line 444 of file app_snapshot.cc.

444 {
445 AttributeReference(object,
447 }
void AttributeReference(ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
static Reference Element(intptr_t offset)

◆ AttributePropertyRef()

void dart::Serializer::AttributePropertyRef ( ObjectPtr  object,
const char *  property 
)
inline

Definition at line 454 of file app_snapshot.cc.

454 {
455 AttributeReference(object,
457 }
static Reference Property(const char *name)

◆ AttributeReference()

void dart::Serializer::AttributeReference ( ObjectPtr  object,
const V8SnapshotProfileWriter::Reference reference 
)

Definition at line 7511 of file app_snapshot.cc.

7513 {
7514 if (profile_writer_ == nullptr) return;
7515 const auto& object_id = GetProfileId(object);
7516#if defined(DART_PRECOMPILER)
7517 if (object->IsHeapObject() && object->IsWeakSerializationReference()) {
7518 auto const wsr = WeakSerializationReference::RawCast(object);
7519 auto const target = wsr->untag()->target();
7520 const auto& target_id = GetProfileId(target);
7521 if (object_id != target_id) {
7522 const auto& replacement_id = GetProfileId(wsr->untag()->replacement());
7523 ASSERT(object_id == replacement_id);
7524 // The target of the WSR will be replaced in the snapshot, so write
7525 // attributions for both the dropped target and for the replacement.
7526 profile_writer_->AttributeDroppedReferenceTo(
7527 object_currently_writing_.id_, reference, target_id, replacement_id);
7528 return;
7529 }
7530 // The replacement isn't used for this WSR in the snapshot, as either the
7531 // target is strongly referenced or the WSR itself is unreachable, so fall
7532 // through to attributing a reference to the WSR (which shares the profile
7533 // ID of the target).
7534 }
7535#endif
7536 profile_writer_->AttributeReferenceTo(object_currently_writing_.id_,
7537 reference, object_id);
7538}
void AttributeReferenceTo(const ObjectId &from_object_id, const Reference &reference, const ObjectId &to_object_id)
uint32_t * target

◆ bytes_heap_allocated()

intptr_t dart::Serializer::bytes_heap_allocated ( )
inline

Definition at line 359 of file app_snapshot.cc.

359{ return bytes_heap_allocated_; }

◆ bytes_written()

intptr_t dart::Serializer::bytes_written ( )
inline

Definition at line 358 of file app_snapshot.cc.

358{ return stream_->bytes_written(); }
DART_FORCE_INLINE intptr_t bytes_written() const
Definition datastream.h:338

◆ CreateArtificialNodeIfNeeded()

bool dart::Serializer::CreateArtificialNodeIfNeeded ( ObjectPtr  obj)

Definition at line 7600 of file app_snapshot.cc.

7600 {
7601 ASSERT(profile_writer() != nullptr);
7602
7603 // UnsafeRefId will do lazy reference allocation for WSRs.
7604 intptr_t id = UnsafeRefId(obj);
7606 if (id != kUnreachableReference) {
7607 return IsArtificialReference(id);
7608 }
7609 if (obj->IsHeapObject() && obj->IsWeakSerializationReference()) {
7610 auto const target =
7613 // Since the WSR is unreachable, we can replace its id with whatever the
7614 // ID of the target is, whether real or artificial.
7615 id = heap_->GetObjectId(target);
7616 heap_->SetObjectId(obj, id);
7617 return IsArtificialReference(id);
7618 }
7619
7620 const char* type = nullptr;
7621 const char* name = nullptr;
7622 GrowableArray<std::pair<ObjectPtr, V8SnapshotProfileWriter::Reference>> links;
7623 const classid_t cid = obj->GetClassIdMayBeSmi();
7624 switch (cid) {
7625 // For profiling static call target tables in AOT mode.
7626 case kSmiCid: {
7627 type = "Smi";
7628 break;
7629 }
7630 // For profiling per-code object pools in bare instructions mode.
7631 case kObjectPoolCid: {
7632 type = "ObjectPool";
7633 auto const pool = ObjectPool::RawCast(obj);
7634 for (intptr_t i = 0; i < pool->untag()->length_; i++) {
7635 uint8_t bits = pool->untag()->entry_bits()[i];
7636 if (ObjectPool::TypeBits::decode(bits) ==
7637 ObjectPool::EntryType::kTaggedObject) {
7638 auto const elem = pool->untag()->data()[i].raw_obj_;
7639 // Elements should be reachable from the global object pool.
7640 ASSERT(HasRef(elem));
7641 links.Add({elem, V8SnapshotProfileWriter::Reference::Element(i)});
7642 }
7643 }
7644 break;
7645 }
7646 // For profiling static call target tables and the dispatch table in AOT.
7647 case kImmutableArrayCid:
7648 case kArrayCid: {
7649 type = "Array";
7650 auto const array = Array::RawCast(obj);
7651 for (intptr_t i = 0, n = Smi::Value(array->untag()->length()); i < n;
7652 i++) {
7653 ObjectPtr elem = array->untag()->element(i);
7654 links.Add({elem, V8SnapshotProfileWriter::Reference::Element(i)});
7655 }
7656 break;
7657 }
7658 // For profiling the dispatch table.
7659 case kCodeCid: {
7660 type = "Code";
7661 auto const code = Code::RawCast(obj);
7663 links.Add({code->untag()->owner(),
7665 break;
7666 }
7667 case kFunctionCid: {
7668 FunctionPtr func = static_cast<FunctionPtr>(obj);
7669 type = "Function";
7671 func);
7672 links.Add({func->untag()->owner(),
7674 ObjectPtr data = func->untag()->data();
7675 if (data->GetClassId() == kClosureDataCid) {
7676 links.Add(
7678 }
7679 break;
7680 }
7681 case kClosureDataCid: {
7682 auto data = static_cast<ClosureDataPtr>(obj);
7683 type = "ClosureData";
7684 links.Add(
7685 {data->untag()->parent_function(),
7687 break;
7688 }
7689 case kClassCid: {
7690 ClassPtr cls = static_cast<ClassPtr>(obj);
7691 type = "Class";
7692 name = String::ToCString(thread(), cls->untag()->name());
7693 links.Add({cls->untag()->library(),
7695 break;
7696 }
7697 case kPatchClassCid: {
7698 PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj);
7699 type = "PatchClass";
7700 links.Add(
7701 {patch_cls->untag()->wrapped_class(),
7703 break;
7704 }
7705 case kLibraryCid: {
7706 LibraryPtr lib = static_cast<LibraryPtr>(obj);
7707 type = "Library";
7708 name = String::ToCString(thread(), lib->untag()->url());
7709 break;
7710 }
7711 case kFunctionTypeCid: {
7712 type = "FunctionType";
7713 break;
7714 };
7715 case kRecordTypeCid: {
7716 type = "RecordType";
7717 break;
7718 };
7719 default:
7720 FATAL("Request to create artificial node for object with cid %d", cid);
7721 }
7722
7723 id = AssignArtificialRef(obj);
7724 Serializer::WritingObjectScope scope(this, type, obj, name);
7725 for (const auto& link : links) {
7727 AttributeReference(link.first, link.second);
7728 }
7729 return true;
7730}
AutoreleasePool pool
static constexpr T decode(S value)
Definition bitfield.h:173
static const char * MakeDisambiguatedCodeName(Serializer *s, CodePtr c)
static const char * MakeDisambiguatedFunctionName(Serializer *s, FunctionPtr f)
UntaggedObject * untag() const
virtual const char * ToCString() const
Definition object.h:366
bool HasRef(ObjectPtr object) const
intptr_t AssignArtificialRef(ObjectPtr object=nullptr)
intptr_t UnsafeRefId(ObjectPtr object) const
bool CreateArtificialNodeIfNeeded(ObjectPtr obj)
intptr_t Value() const
Definition object.h:9969
#define FATAL(error)
link(from_root, to_root)
Definition dart_pkg.py:44
int32_t classid_t
Definition globals.h:524
static constexpr intptr_t kUnallocatedReference
const intptr_t cid
static int8_t data[kExtLength]

◆ current_loading_unit_id()

intptr_t dart::Serializer::current_loading_unit_id ( ) const
inline

Definition at line 551 of file app_snapshot.cc.

551{ return current_loading_unit_id_; }

◆ DumpCombinedCodeStatistics()

void dart::Serializer::DumpCombinedCodeStatistics ( )

◆ FillHeader()

void dart::Serializer::FillHeader ( Snapshot::Kind  kind)
inline

Definition at line 345 of file app_snapshot.cc.

345 {
346 Snapshot* header = reinterpret_cast<Snapshot*>(stream_->buffer());
347 header->set_magic();
348 header->set_length(stream_->bytes_written());
349 header->set_kind(kind);
350 }
static const char header[]
Definition skpbench.cpp:88

◆ GetCodeIndex()

intptr_t dart::Serializer::GetCodeIndex ( CodePtr  code)

◆ GetDataOffset()

uint32_t dart::Serializer::GetDataOffset ( ObjectPtr  object) const

Definition at line 8347 of file app_snapshot.cc.

8347 {
8348#if defined(SNAPSHOT_BACKTRACE)
8349 return image_writer_->GetDataOffsetFor(object, ParentOf(object));
8350#else
8351 return image_writer_->GetDataOffsetFor(object);
8352#endif
8353}
uint32_t GetDataOffsetFor(ObjectPtr raw_object)

◆ GetDataSize()

intptr_t dart::Serializer::GetDataSize ( ) const

Definition at line 8355 of file app_snapshot.cc.

8355 {
8356 if (image_writer_ == nullptr) {
8357 return 0;
8358 }
8359 return image_writer_->data_size();
8360}
intptr_t data_size() const

◆ GetProfileId() [1/2]

V8SnapshotProfileWriter::ObjectId dart::Serializer::GetProfileId ( intptr_t  ref) const

Definition at line 7502 of file app_snapshot.cc.

7503 {
7504 if (IsArtificialReference(heap_id)) {
7505 return {IdSpace::kArtificial, -heap_id};
7506 }
7507 ASSERT(IsAllocatedReference(heap_id));
7508 return {IdSpace::kSnapshot, heap_id};
7509}

◆ GetProfileId() [2/2]

V8SnapshotProfileWriter::ObjectId dart::Serializer::GetProfileId ( ObjectPtr  object) const

Definition at line 7495 of file app_snapshot.cc.

7496 {
7497 // Instructions are handled separately.
7498 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7499 return GetProfileId(UnsafeRefId(object));
7500}

◆ HasArtificialRef()

bool dart::Serializer::HasArtificialRef ( ObjectPtr  object) const
inline

Definition at line 574 of file app_snapshot.cc.

574 {
575 return IsArtificialReference(heap_->GetObjectId(object));
576 }

◆ HasProfileNode()

bool dart::Serializer::HasProfileNode ( ObjectPtr  object) const
inline

Definition at line 579 of file app_snapshot.cc.

579 {
580 ASSERT(profile_writer_ != nullptr);
581 return profile_writer_->HasId(GetProfileId(object));
582 }
bool HasId(const ObjectId &object_id)

◆ HasRef()

bool dart::Serializer::HasRef ( ObjectPtr  object) const
inline

Definition at line 570 of file app_snapshot.cc.

570 {
571 return IsAllocatedReference(heap_->GetObjectId(object));
572 }

◆ heap()

Heap * dart::Serializer::heap ( ) const
inline

Definition at line 528 of file app_snapshot.cc.

528{ return heap_; }

◆ InCurrentLoadingUnitOrRoot()

bool dart::Serializer::InCurrentLoadingUnitOrRoot ( ObjectPtr  obj)

Definition at line 7972 of file app_snapshot.cc.

7972 {
7973 if (loading_units_ == nullptr) return true;
7974
7975 intptr_t unit_id = heap_->GetLoadingUnit(obj);
7976 if (unit_id == WeakTable::kNoValue) {
7977 FATAL("Missing loading unit assignment: %s\n",
7978 Object::Handle(obj).ToCString());
7979 }
7980 return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
7981}
intptr_t GetLoadingUnit(ObjectPtr raw_obj) const
Definition heap.h:207
static constexpr intptr_t kRootId
Definition object.h:7940
static Object & Handle()
Definition object.h:407
static constexpr intptr_t kNoValue
Definition weak_table.h:18

◆ IsReachable()

bool dart::Serializer::IsReachable ( ObjectPtr  object) const
inline

Definition at line 566 of file app_snapshot.cc.

566 {
567 return IsReachableReference(heap_->GetObjectId(object));
568 }
static constexpr bool IsReachableReference(intptr_t ref)

◆ IsWritten()

bool dart::Serializer::IsWritten ( ObjectPtr  object) const
inline

Definition at line 583 of file app_snapshot.cc.

583 {
584 return heap_->GetObjectId(object) > num_base_objects_;
585 }

◆ kind()

Snapshot::Kind dart::Serializer::kind ( ) const
inline

Definition at line 530 of file app_snapshot.cc.

530{ return kind_; }

◆ loading_units()

GrowableArray< LoadingUnitSerializationData * > * dart::Serializer::loading_units ( ) const
inline

Definition at line 545 of file app_snapshot.cc.

545 {
546 return loading_units_;
547 }

◆ NewClusterForClass()

SerializationCluster * dart::Serializer::NewClusterForClass ( intptr_t  cid,
bool  is_canonical 
)

Definition at line 7801 of file app_snapshot.cc.

7802 {
7803#if defined(DART_PRECOMPILED_RUNTIME)
7804 UNREACHABLE();
7805 return nullptr;
7806#else
7807 Zone* Z = zone_;
7808 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
7809 Push(isolate_group()->class_table()->At(cid));
7810 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7811 }
7813 return new (Z) TypedDataViewSerializationCluster(cid);
7814 }
7816 return new (Z) ExternalTypedDataSerializationCluster(cid);
7817 }
7818 if (IsTypedDataClassId(cid)) {
7819 return new (Z) TypedDataSerializationCluster(cid);
7820 }
7821
7822#if !defined(DART_COMPRESSED_POINTERS)
7823 // Sometimes we write memory images for read-only objects that contain no
7824 // pointers. These can be mmapped directly, needing no relocation, and added
7825 // to the list of heap pages. This gives us lazy/demand paging from the OS.
7826 // We do not do this for snapshots without code to keep snapshots portable
7827 // between machines with different word sizes. We do not do this when we use
7828 // compressed pointers because we cannot always control the load address of
7829 // the memory image, and it might be outside the 4GB region addressable by
7830 // compressed pointers.
7831 if (Snapshot::IncludesCode(kind_)) {
7832 if (auto const type = ReadOnlyObjectType(cid)) {
7833 return new (Z) RODataSerializationCluster(Z, type, cid, is_canonical);
7834 }
7835 }
7836#endif
7837
7838 const bool cluster_represents_canonical_set =
7839 current_loading_unit_id_ <= LoadingUnit::kRootId && is_canonical;
7840
7841 switch (cid) {
7842 case kClassCid:
7843 return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
7844 case kTypeParametersCid:
7845 return new (Z) TypeParametersSerializationCluster();
7846 case kTypeArgumentsCid:
7847 return new (Z) TypeArgumentsSerializationCluster(
7848 is_canonical, cluster_represents_canonical_set);
7849 case kPatchClassCid:
7850 return new (Z) PatchClassSerializationCluster();
7851 case kFunctionCid:
7852 return new (Z) FunctionSerializationCluster();
7853 case kClosureDataCid:
7854 return new (Z) ClosureDataSerializationCluster();
7855 case kFfiTrampolineDataCid:
7856 return new (Z) FfiTrampolineDataSerializationCluster();
7857 case kFieldCid:
7858 return new (Z) FieldSerializationCluster();
7859 case kScriptCid:
7860 return new (Z) ScriptSerializationCluster();
7861 case kLibraryCid:
7862 return new (Z) LibrarySerializationCluster();
7863 case kNamespaceCid:
7864 return new (Z) NamespaceSerializationCluster();
7865 case kKernelProgramInfoCid:
7866 return new (Z) KernelProgramInfoSerializationCluster();
7867 case kCodeCid:
7868 return new (Z) CodeSerializationCluster(heap_);
7869 case kObjectPoolCid:
7870 return new (Z) ObjectPoolSerializationCluster();
7871 case kPcDescriptorsCid:
7872 return new (Z) PcDescriptorsSerializationCluster();
7873 case kCodeSourceMapCid:
7874 return new (Z) CodeSourceMapSerializationCluster();
7875 case kCompressedStackMapsCid:
7876 return new (Z) CompressedStackMapsSerializationCluster();
7877 case kExceptionHandlersCid:
7878 return new (Z) ExceptionHandlersSerializationCluster();
7879 case kContextCid:
7880 return new (Z) ContextSerializationCluster();
7881 case kContextScopeCid:
7882 return new (Z) ContextScopeSerializationCluster();
7883 case kUnlinkedCallCid:
7884 return new (Z) UnlinkedCallSerializationCluster();
7885 case kICDataCid:
7886 return new (Z) ICDataSerializationCluster();
7887 case kMegamorphicCacheCid:
7888 return new (Z) MegamorphicCacheSerializationCluster();
7889 case kSubtypeTestCacheCid:
7890 return new (Z) SubtypeTestCacheSerializationCluster();
7891 case kLoadingUnitCid:
7892 return new (Z) LoadingUnitSerializationCluster();
7893 case kLanguageErrorCid:
7894 return new (Z) LanguageErrorSerializationCluster();
7895 case kUnhandledExceptionCid:
7896 return new (Z) UnhandledExceptionSerializationCluster();
7897 case kLibraryPrefixCid:
7898 return new (Z) LibraryPrefixSerializationCluster();
7899 case kTypeCid:
7900 return new (Z) TypeSerializationCluster(is_canonical,
7901 cluster_represents_canonical_set);
7902 case kFunctionTypeCid:
7903 return new (Z) FunctionTypeSerializationCluster(
7904 is_canonical, cluster_represents_canonical_set);
7905 case kRecordTypeCid:
7906 return new (Z) RecordTypeSerializationCluster(
7907 is_canonical, cluster_represents_canonical_set);
7908 case kTypeParameterCid:
7909 return new (Z) TypeParameterSerializationCluster(
7910 is_canonical, cluster_represents_canonical_set);
7911 case kClosureCid:
7912 return new (Z) ClosureSerializationCluster(is_canonical);
7913 case kMintCid:
7914 return new (Z) MintSerializationCluster(is_canonical);
7915 case kDoubleCid:
7916 return new (Z) DoubleSerializationCluster(is_canonical);
7917 case kInt32x4Cid:
7918 case kFloat32x4Cid:
7919 case kFloat64x2Cid:
7920 return new (Z) Simd128SerializationCluster(cid, is_canonical);
7921 case kGrowableObjectArrayCid:
7922 return new (Z) GrowableObjectArraySerializationCluster();
7923 case kRecordCid:
7924 return new (Z) RecordSerializationCluster(is_canonical);
7925 case kStackTraceCid:
7926 return new (Z) StackTraceSerializationCluster();
7927 case kRegExpCid:
7928 return new (Z) RegExpSerializationCluster();
7929 case kWeakPropertyCid:
7930 return new (Z) WeakPropertySerializationCluster();
7931 case kMapCid:
7932 // We do not have mutable hash maps in snapshots.
7933 UNREACHABLE();
7934 case kConstMapCid:
7935 return new (Z) MapSerializationCluster(is_canonical, kConstMapCid);
7936 case kSetCid:
7937 // We do not have mutable hash sets in snapshots.
7938 UNREACHABLE();
7939 case kConstSetCid:
7940 return new (Z) SetSerializationCluster(is_canonical, kConstSetCid);
7941 case kArrayCid:
7942 return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
7943 case kImmutableArrayCid:
7944 return new (Z)
7945 ArraySerializationCluster(is_canonical, kImmutableArrayCid);
7946 case kWeakArrayCid:
7947 return new (Z) WeakArraySerializationCluster();
7948 case kStringCid:
7949 return new (Z) StringSerializationCluster(
7950 is_canonical, cluster_represents_canonical_set && !vm_);
7951#define CASE_FFI_CID(name) case kFfi##name##Cid:
7953#undef CASE_FFI_CID
7954 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7955 case kDeltaEncodedTypedDataCid:
7956 return new (Z) DeltaEncodedTypedDataSerializationCluster();
7957 case kWeakSerializationReferenceCid:
7958#if defined(DART_PRECOMPILER)
7959 ASSERT(kind_ == Snapshot::kFullAOT);
7960 return new (Z) WeakSerializationReferenceSerializationCluster();
7961#endif
7962 default:
7963 break;
7964 }
7965
7966 // The caller will check for nullptr and provide an error with more context
7967 // than is available here.
7968 return nullptr;
7969#endif // !DART_PRECOMPILED_RUNTIME
7970}
#define CASE_FFI_CID(name)
#define UNREACHABLE()
Definition assert.h:248
#define Z
#define CLASS_LIST_FFI_TYPE_MARKER(V)
Definition class_id.h:165
void Push(ObjectPtr object, intptr_t cid_override=kIllegalCid)
static bool IncludesCode(Kind kind)
Definition snapshot.h:67
IsolateGroup * isolate_group() const
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
@ kNumPredefinedCids
Definition class_id.h:257
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447

◆ next_ref_index()

intptr_t dart::Serializer::next_ref_index ( ) const
inline

Definition at line 531 of file app_snapshot.cc.

531{ return next_ref_index_; }

◆ PrepareInstructions()

void dart::Serializer::PrepareInstructions ( const CompressedStackMaps canonical_smap)

Definition at line 8081 of file app_snapshot.cc.

8082 {
8083 if (!Snapshot::IncludesCode(kind())) return;
8084
8085 // Code objects that have identical/duplicate instructions must be adjacent in
8086 // the order that Code objects are written because the encoding of the
8087 // reference from the Code to the Instructions assumes monotonically
8088 // increasing offsets as part of a delta encoding. Also the code order table
8089 // that allows for mapping return addresses back to Code objects depends on
8090 // this sorting.
8091 if (code_cluster_ != nullptr) {
8092 CodeSerializationCluster::Sort(this, code_cluster_->objects());
8093 }
8094 if ((loading_units_ != nullptr) &&
8095 (current_loading_unit_id_ == LoadingUnit::kRootId)) {
8096 for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units_->length();
8097 i++) {
8098 auto unit_objects = loading_units_->At(i)->deferred_objects();
8099 CodeSerializationCluster::Sort(this, unit_objects);
8100 ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
8101 for (intptr_t j = 0; j < unit_objects->length(); j++) {
8102 code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
8103 }
8104 }
8105 }
8106
8107#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8108 if (kind() == Snapshot::kFullAOT) {
8109 // Group the code objects whose instructions are not being deferred in this
8110 // snapshot unit in the order they will be written: first the code objects
8111 // encountered for this first time in this unit being written by the
8112 // CodeSerializationCluster, then code object previously deferred whose
8113 // instructions are now written by UnitSerializationRoots. This order needs
8114 // to be known to finalize bare-instructions-mode's PC-relative calls.
8115 GrowableArray<CodePtr> code_objects;
8116 if (code_cluster_ != nullptr) {
8117 auto in = code_cluster_->objects();
8118 for (intptr_t i = 0; i < in->length(); i++) {
8119 code_objects.Add(in->At(i));
8120 }
8121 }
8122 if (loading_units_ != nullptr) {
8123 auto in =
8124 loading_units_->At(current_loading_unit_id_)->deferred_objects();
8125 for (intptr_t i = 0; i < in->length(); i++) {
8126 code_objects.Add(in->At(i)->ptr());
8127 }
8128 }
8129
8130 GrowableArray<ImageWriterCommand> writer_commands;
8131 RelocateCodeObjects(vm_, &code_objects, &writer_commands);
8132 image_writer_->PrepareForSerialization(&writer_commands);
8133
8134 if (code_objects.length() == 0) {
8135 return;
8136 }
8137
8138 // Build UntaggedInstructionsTable::Data object to be added to the
8139 // read-only data section of the snapshot. It contains:
8140 //
8141 // - a binary search table mapping an Instructions entry point to its
8142 // stack maps (by offset from the beginning of the Data object);
8143 // - followed by stack maps bytes;
8144 // - followed by canonical stack map entries.
8145 //
8146 struct StackMapInfo : public ZoneAllocated {
8147 CompressedStackMapsPtr map;
8148 intptr_t use_count;
8149 uint32_t offset;
8150 };
8151
8152 GrowableArray<StackMapInfo*> stack_maps;
8153 IntMap<StackMapInfo*> stack_maps_info;
8154
8155 // Build code_index_ (which maps Instructions object to the order in
8156 // which they appear in the code section in the end) and collect all
8157 // stack maps.
8158 // We also find the first Instructions object which is going to have
8159 // Code object associated with it. This will allow to reduce the binary
8160 // search space when searching specifically for the code object in runtime.
8161 uint32_t total = 0;
8162 intptr_t not_discarded_count = 0;
8163 uint32_t first_entry_with_code = 0;
8164 for (auto& cmd : writer_commands) {
8166 RELEASE_ASSERT(code_objects[total] ==
8167 cmd.insert_instruction_of_code.code);
8168 ASSERT(!Code::IsDiscarded(cmd.insert_instruction_of_code.code) ||
8169 (not_discarded_count == 0));
8170 if (!Code::IsDiscarded(cmd.insert_instruction_of_code.code)) {
8171 if (not_discarded_count == 0) {
8172 first_entry_with_code = total;
8173 }
8174 not_discarded_count++;
8175 }
8176 total++;
8177
8178 // Update code_index_.
8179 {
8180 const intptr_t instr = static_cast<intptr_t>(
8181 cmd.insert_instruction_of_code.code->untag()->instructions_);
8182 ASSERT(!code_index_.HasKey(instr));
8183 code_index_.Insert(instr, total);
8184 }
8185
8186 // Collect stack maps.
8187 CompressedStackMapsPtr stack_map =
8188 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8189 const intptr_t key = static_cast<intptr_t>(stack_map);
8190
8191 if (stack_maps_info.HasKey(key)) {
8192 stack_maps_info.Lookup(key)->use_count++;
8193 } else {
8194 auto info = new StackMapInfo();
8195 info->map = stack_map;
8196 info->use_count = 1;
8197 stack_maps.Add(info);
8198 stack_maps_info.Insert(key, info);
8199 }
8200 }
8201 }
8202 ASSERT(static_cast<intptr_t>(total) == code_index_.Length());
8203 instructions_table_len_ = not_discarded_count;
8204
8205 // Sort stack maps by usage so that most commonly used stack maps are
8206 // together at the start of the Data object.
8207 stack_maps.Sort([](StackMapInfo* const* a, StackMapInfo* const* b) {
8208 if ((*a)->use_count < (*b)->use_count) return 1;
8209 if ((*a)->use_count > (*b)->use_count) return -1;
8210 return 0;
8211 });
8212
8213 // Build Data object.
8214 MallocWriteStream pc_mapping(4 * KB);
8215
8216 // Write the header out.
8217 {
8218 UntaggedInstructionsTable::Data header;
8219 memset(&header, 0, sizeof(header));
8220 header.length = total;
8221 header.first_entry_with_code = first_entry_with_code;
8222 pc_mapping.WriteFixed<UntaggedInstructionsTable::Data>(header);
8223 }
8224
8225 // Reserve space for the binary search table.
8226 for (auto& cmd : writer_commands) {
8228 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>({0, 0});
8229 }
8230 }
8231
8232 // Now write collected stack maps after the binary search table.
8233 auto write_stack_map = [&](CompressedStackMapsPtr smap) {
8234 const auto flags_and_size = smap->untag()->payload()->flags_and_size();
8235 const auto payload_size =
8237 pc_mapping.WriteFixed<uint32_t>(flags_and_size);
8238 pc_mapping.WriteBytes(smap->untag()->payload()->data(), payload_size);
8239 };
8240
8241 for (auto sm : stack_maps) {
8242 sm->offset = pc_mapping.bytes_written();
8243 write_stack_map(sm->map);
8244 }
8245
8246 // Write canonical entries (if any).
8247 if (!canonical_stack_map_entries.IsNull()) {
8248 auto header = reinterpret_cast<UntaggedInstructionsTable::Data*>(
8249 pc_mapping.buffer());
8250 header->canonical_stack_map_entries_offset = pc_mapping.bytes_written();
8251 write_stack_map(canonical_stack_map_entries.ptr());
8252 }
8253 const auto total_bytes = pc_mapping.bytes_written();
8254
8255 // Now that we have offsets to all stack maps we can write binary
8256 // search table.
8257 pc_mapping.SetPosition(
8258 sizeof(UntaggedInstructionsTable::Data)); // Skip the header.
8259 for (auto& cmd : writer_commands) {
8261 CompressedStackMapsPtr smap =
8262 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8263 const auto offset =
8264 stack_maps_info.Lookup(static_cast<intptr_t>(smap))->offset;
8265 const auto entry = image_writer_->GetTextOffsetFor(
8266 Code::InstructionsOf(cmd.insert_instruction_of_code.code),
8267 cmd.insert_instruction_of_code.code);
8268
8269 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>(
8270 {static_cast<uint32_t>(entry), offset});
8271 }
8272 }
8273 // Restore position so that Steal does not truncate the buffer.
8274 pc_mapping.SetPosition(total_bytes);
8275
8276 intptr_t length = 0;
8277 uint8_t* bytes = pc_mapping.Steal(&length);
8278
8279 instructions_table_rodata_offset_ =
8280 image_writer_->AddBytesToData(bytes, length);
8281 // Attribute all bytes in this object to the root for simplicity.
8282 if (profile_writer_ != nullptr) {
8283 const auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8284 profile_writer_->AttributeReferenceTo(
8287 "<instructions-table-rodata>"),
8288 {offset_space, instructions_table_rodata_offset_});
8289 }
8290 }
8291#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8292}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
#define RELEASE_ASSERT(cond)
Definition assert.h:327
void Add(const T &value)
static void Sort(Serializer *s, GrowableArray< CodePtr > *codes)
GrowableArray< CodePtr > * objects()
GrowableArray< CodePtr > * deferred_objects()
static InstructionsPtr InstructionsOf(const CodePtr code)
Definition object.h:6748
int32_t GetTextOffsetFor(InstructionsPtr instructions, CodePtr code)
void PrepareForSerialization(GrowableArray< ImageWriterCommand > *commands)
uint32_t AddBytesToData(uint8_t *bytes, intptr_t length)
static const ObjectId kArtificialRootId
static bool b
struct MyStruct a[10]
size_t length
constexpr intptr_t KB
Definition globals.h:528
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition SkVx.h:680

◆ PrintSnapshotSizes()

void dart::Serializer::PrintSnapshotSizes ( )

Definition at line 8832 of file app_snapshot.cc.

8832 {
8833#if !defined(DART_PRECOMPILED_RUNTIME)
8834 if (FLAG_print_snapshot_sizes_verbose) {
8835 TextBuffer buffer(1024);
8836 // Header, using format sizes matching those below to ensure alignment.
8837 buffer.Printf("%25s", "Cluster");
8838 buffer.Printf(" %6s", "Objs");
8839 buffer.Printf(" %8s", "Size");
8840 buffer.Printf(" %8s", "Fraction");
8841 buffer.Printf(" %10s", "Cumulative");
8842 buffer.Printf(" %8s", "HeapSize");
8843 buffer.Printf(" %5s", "Cid");
8844 buffer.Printf(" %9s", "Canonical");
8845 buffer.AddString("\n");
8846 GrowableArray<SerializationCluster*> clusters_by_size;
8847 for (intptr_t cid = 1; cid < num_cids_; cid++) {
8848 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8849 clusters_by_size.Add(cluster);
8850 }
8851 if (auto const cluster = clusters_by_cid_[cid]) {
8852 clusters_by_size.Add(cluster);
8853 }
8854 }
8855 intptr_t text_size = 0;
8856 if (image_writer_ != nullptr) {
8857 auto const text_object_count = image_writer_->GetTextObjectCount();
8858 text_size = image_writer_->text_size();
8859 intptr_t trampoline_count, trampoline_size;
8860 image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size);
8861 auto const instructions_count = text_object_count - trampoline_count;
8862 auto const instructions_size = text_size - trampoline_size;
8863 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8864 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions"),
8865 instructions_count, instructions_size));
8866 if (trampoline_size > 0) {
8867 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8868 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline"),
8869 trampoline_count, trampoline_size));
8870 }
8871 }
8872 // The dispatch_table_size_ will be 0 if the snapshot did not include a
8873 // dispatch table (i.e., the VM snapshot). For a precompiled isolate
8874 // snapshot, we always serialize at least _one_ byte for the DispatchTable.
8875 if (dispatch_table_size_ > 0) {
8876 const auto& dispatch_table_entries = Array::Handle(
8877 zone_,
8878 isolate_group()->object_store()->dispatch_table_code_entries());
8879 auto const entry_count =
8880 dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length();
8881 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8882 "DispatchTable", entry_count, dispatch_table_size_));
8883 }
8884 if (instructions_table_len_ > 0) {
8885 const intptr_t memory_size =
8886 compiler::target::InstructionsTable::InstanceSize() +
8887 compiler::target::Array::InstanceSize(instructions_table_len_);
8888 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8889 "InstructionsTable", instructions_table_len_, 0, memory_size));
8890 }
8891 clusters_by_size.Sort(CompareClusters);
8892 double total_size =
8893 static_cast<double>(bytes_written() + GetDataSize() + text_size);
8894 double cumulative_fraction = 0.0;
8895 for (intptr_t i = 0; i < clusters_by_size.length(); i++) {
8896 SerializationCluster* cluster = clusters_by_size[i];
8897 double fraction = static_cast<double>(cluster->size()) / total_size;
8898 cumulative_fraction += fraction;
8899 buffer.Printf("%25s", cluster->name());
8900 buffer.Printf(" %6" Pd "", cluster->num_objects());
8901 buffer.Printf(" %8" Pd "", cluster->size());
8902 buffer.Printf(" %1.6lf", fraction);
8903 buffer.Printf(" %1.8lf", cumulative_fraction);
8904 buffer.Printf(" %8" Pd "", cluster->target_memory_size());
8905 if (cluster->cid() != -1) {
8906 buffer.Printf(" %5" Pd "", cluster->cid());
8907 } else {
8908 buffer.Printf(" %5s", "");
8909 }
8910 if (cluster->is_canonical()) {
8911 buffer.Printf(" %9s", "canonical");
8912 } else {
8913 buffer.Printf(" %9s", "");
8914 }
8915 buffer.AddString("\n");
8916 }
8917 OS::PrintErr("%s", buffer.buffer());
8918 }
8919#endif // !defined(DART_PRECOMPILED_RUNTIME)
8920}
static size_t total_size(SkSBlockAllocator< N > &pool)
intptr_t GetTextObjectCount() const
static const char * TagObjectTypeAsReadOnly(Zone *zone, const char *type)
intptr_t text_size() const
void GetTrampolineInfo(intptr_t *count, intptr_t *size) const
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
intptr_t bytes_written()
intptr_t GetDataSize() const
static const uint8_t buffer[]
static int CompareClusters(SerializationCluster *const *a, SerializationCluster *const *b)
#define Pd
Definition globals.h:408

◆ profile_writer()

V8SnapshotProfileWriter * dart::Serializer::profile_writer ( ) const
inline

Definition at line 535 of file app_snapshot.cc.

535{ return profile_writer_; }

◆ Push()

void dart::Serializer::Push ( ObjectPtr  object,
intptr_t  cid_override = kIllegalCid 
)

Definition at line 8363 of file app_snapshot.cc.

8363 {
8364 const bool is_code = object->IsHeapObject() && object->IsCode();
8365 if (is_code && !Snapshot::IncludesCode(kind_)) {
8366 return; // Do not trace, will write null.
8367 }
8368
8369 intptr_t id = heap_->GetObjectId(object);
8370 if (id == kUnreachableReference) {
8371 // When discovering the transitive closure of objects reachable from the
8372 // roots we do not trace references, e.g. inside [RawCode], to
8373 // [RawInstructions], since [RawInstructions] doesn't contain any references
8374 // and the serialization code uses an [ImageWriter] for those.
8375 if (object->IsHeapObject() && object->IsInstructions()) {
8376 UnexpectedObject(object,
8377 "Instructions should only be reachable from Code");
8378 }
8379
8380 heap_->SetObjectId(object, kUnallocatedReference);
8381 ASSERT(IsReachableReference(heap_->GetObjectId(object)));
8382 stack_.Add({object, cid_override});
8383 if (!(is_code && Code::IsDiscarded(Code::RawCast(object)))) {
8384 num_written_objects_++;
8385 }
8386#if defined(SNAPSHOT_BACKTRACE)
8387 parent_pairs_.Add(&Object::Handle(zone_, object));
8388 parent_pairs_.Add(&Object::Handle(zone_, current_parent_));
8389#endif
8390 }
8391}
void UnexpectedObject(ObjectPtr object, const char *message)

◆ PushFromTo()

template<typename T , typename... P>
void dart::Serializer::PushFromTo ( T  obj,
P &&...  args 
)
inline

Definition at line 494 of file app_snapshot.cc.

494 {
495 auto* from = obj->untag()->from();
496 auto* to = obj->untag()->to_snapshot(kind(), args...);
497 PushRange(obj, from, to);
498 }
DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args

◆ PushRange()

template<typename T >
DART_NOINLINE void dart::Serializer::PushRange ( ObjectPtr  obj,
T  from,
T  to 
)
inline

Definition at line 501 of file app_snapshot.cc.

501 {
502 for (auto* p = from; p <= to; p++) {
503 Push(p->Decompress(obj->heap_base()));
504 }
505 }

◆ PushWeak()

void dart::Serializer::PushWeak ( ObjectPtr  object)

Definition at line 8393 of file app_snapshot.cc.

8393 {
8394 // The GC considers immediate objects to always be alive. This doesn't happen
8395 // automatically in the serializer because the serializer does not have
8396 // immediate objects: it handles Smis as ref indices like all other objects.
8397 // This visit causes the serializer to reproduce the GC's semantics for
8398 // weakness, which in particular allows the templates in hash_table.h to work
8399 // with weak arrays because the metadata Smis always survive.
8400 if (!object->IsHeapObject() || vm_) {
8401 Push(object);
8402 }
8403}

◆ RecordDeferredCode()

void dart::Serializer::RecordDeferredCode ( CodePtr  ptr)

Definition at line 7983 of file app_snapshot.cc.

7983 {
7984 const intptr_t unit_id = heap_->GetLoadingUnit(code);
7985 ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
7986 (*loading_units_)[unit_id]->AddDeferredObject(code);
7987}

◆ RefId()

intptr_t dart::Serializer::RefId ( ObjectPtr  object) const

Definition at line 7733 of file app_snapshot.cc.

7733 {
7734 auto const id = UnsafeRefId(object);
7735 if (IsAllocatedReference(id)) {
7736 return id;
7737 }
7740 auto& handle = thread()->ObjectHandle();
7741 handle = object;
7742 FATAL("Reference to unreachable object %s", handle.ToCString());
7743}
#define REUSABLE_OBJECT_HANDLESCOPE(thread)
const uintptr_t id

◆ ReserveHeader()

void dart::Serializer::ReserveHeader ( )
inline

Definition at line 340 of file app_snapshot.cc.

340 {
341 // Make room for recording snapshot buffer size.
343 }
DART_FORCE_INLINE void SetPosition(intptr_t value)
Definition datastream.h:618
static constexpr intptr_t kHeaderSize
Definition snapshot.h:43

◆ Serialize()

ZoneGrowableArray< Object * > * dart::Serializer::Serialize ( SerializationRoots roots)

Definition at line 8525 of file app_snapshot.cc.

8525 {
8526 // While object_currently_writing_ is initialized to the artificial root, we
8527 // set up a scope to ensure proper flushing to the profile.
8528 Serializer::WritingObjectScope scope(
8530 roots->AddBaseObjects(this);
8531
8532 NoSafepointScope no_safepoint;
8533
8534 roots->PushRoots(this);
8535
8536 // Resolving WeakSerializationReferences and WeakProperties may cause new
8537 // objects to be pushed on the stack, and handling the changes to the stack
8538 // may cause the targets of WeakSerializationReferences and keys of
8539 // WeakProperties to become reachable, so we do this as a fixed point
8540 // computation. Note that reachability is computed monotonically (an object
8541 // can change from not reachable to reachable, but never the reverse), which
8542 // is technically a conservative approximation for WSRs, but doing a strict
8543 // analysis that allows non-monotonic reachability may not halt.
8544 //
8545 // To see this, take a WSR whose replacement causes the target of another WSR
8546 // to become reachable, which then causes the target of the first WSR to
8547 // become reachable, but the only way to reach the target is through the
8548 // target of the second WSR, which was only reachable via the replacement
8549 // the first.
8550 //
8551 // In practice, this case doesn't come up as replacements tend to be either
8552 // null, smis, or singleton objects that do not contain WSRs currently.
8553 while (stack_.length() > 0) {
8554 // Strong references.
8555 while (stack_.length() > 0) {
8556 StackEntry entry = stack_.RemoveLast();
8557 Trace(entry.obj, entry.cid_override);
8558 }
8559
8560 // Ephemeron references.
8561#if defined(DART_PRECOMPILER)
8562 if (auto const cluster = CID_CLUSTER(WeakSerializationReference)) {
8563 cluster->RetraceEphemerons(this);
8564 }
8565#endif
8566 if (auto const cluster = CID_CLUSTER(WeakProperty)) {
8567 cluster->RetraceEphemerons(this);
8568 }
8569 }
8570
8571#if defined(DART_PRECOMPILER)
8572 auto const wsr_cluster = CID_CLUSTER(WeakSerializationReference);
8573 if (wsr_cluster != nullptr) {
8574 // Now that we have computed the reachability fixpoint, we remove the
8575 // count of now-reachable WSRs as they are not actually serialized.
8576 num_written_objects_ -= wsr_cluster->Count(this);
8577 // We don't need to write this cluster, so remove it from consideration.
8578 clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
8579 }
8580 ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
8581#endif
8582
8583 code_cluster_ = CID_CLUSTER(Code);
8584
8585 GrowableArray<SerializationCluster*> clusters;
8586 // The order that PostLoad runs matters for some classes because of
8587 // assumptions during canonicalization, read filling, or post-load filling of
8588 // some classes about what has already been read and/or canonicalized.
8589 // Explicitly add these clusters first, then add the rest ordered by class id.
8590#define ADD_CANONICAL_NEXT(cid) \
8591 if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
8592 clusters.Add(cluster); \
8593 canonical_clusters_by_cid_[cid] = nullptr; \
8594 }
8595#define ADD_NON_CANONICAL_NEXT(cid) \
8596 if (auto const cluster = clusters_by_cid_[cid]) { \
8597 clusters.Add(cluster); \
8598 clusters_by_cid_[cid] = nullptr; \
8599 }
8600 ADD_CANONICAL_NEXT(kOneByteStringCid)
8601 ADD_CANONICAL_NEXT(kTwoByteStringCid)
8602 ADD_CANONICAL_NEXT(kStringCid)
8603 ADD_CANONICAL_NEXT(kMintCid)
8604 ADD_CANONICAL_NEXT(kDoubleCid)
8605 ADD_CANONICAL_NEXT(kTypeParameterCid)
8606 ADD_CANONICAL_NEXT(kTypeCid)
8607 ADD_CANONICAL_NEXT(kTypeArgumentsCid)
8608 // Code cluster should be deserialized before Function as
8609 // FunctionDeserializationCluster::ReadFill uses instructions table
8610 // which is filled in CodeDeserializationCluster::ReadFill.
8611 // Code cluster should also precede ObjectPool as its ReadFill uses
8612 // entry points of stubs.
8613 ADD_NON_CANONICAL_NEXT(kCodeCid)
8614 // The function cluster should be deserialized before any closures, as
8615 // PostLoad for closures caches the entry point found in the function.
8616 ADD_NON_CANONICAL_NEXT(kFunctionCid)
8617 ADD_CANONICAL_NEXT(kClosureCid)
8618#undef ADD_CANONICAL_NEXT
8619#undef ADD_NON_CANONICAL_NEXT
8620 const intptr_t out_of_order_clusters = clusters.length();
8621 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8622 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8623 clusters.Add(cluster);
8624 }
8625 }
8626 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8627 if (auto const cluster = clusters_by_cid_[cid]) {
8628 clusters.Add(clusters_by_cid_[cid]);
8629 }
8630 }
8631 // Put back any taken out temporarily to avoid re-adding them during the loop.
8632 for (intptr_t i = 0; i < out_of_order_clusters; i++) {
8633 const auto& cluster = clusters.At(i);
8634 const intptr_t cid = cluster->cid();
8635 auto const cid_clusters =
8636 cluster->is_canonical() ? canonical_clusters_by_cid_ : clusters_by_cid_;
8637 ASSERT(cid_clusters[cid] == nullptr);
8638 cid_clusters[cid] = cluster;
8639 }
8640
8641 PrepareInstructions(roots->canonicalized_stack_map_entries());
8642
8643 intptr_t num_objects = num_base_objects_ + num_written_objects_;
8644#if defined(ARCH_IS_64_BIT)
8645 if (!Utils::IsInt(32, num_objects)) {
8646 FATAL("Ref overflow");
8647 }
8648#endif
8649
8650 WriteUnsigned(num_base_objects_);
8651 WriteUnsigned(num_objects);
8652 WriteUnsigned(clusters.length());
8653 ASSERT((instructions_table_len_ == 0) || FLAG_precompiled_mode);
8654 WriteUnsigned(instructions_table_len_);
8655 WriteUnsigned(instructions_table_rodata_offset_);
8656
8657 for (SerializationCluster* cluster : clusters) {
8658 cluster->WriteAndMeasureAlloc(this);
8659 bytes_heap_allocated_ += cluster->target_memory_size();
8660#if defined(DEBUG)
8661 Write<int32_t>(next_ref_index_);
8662#endif
8663 }
8664
8665 // We should have assigned a ref to every object we pushed.
8666 ASSERT((next_ref_index_ - 1) == num_objects);
8667 // And recorded them all in [objects_].
8668 ASSERT(objects_->length() == num_objects);
8669
8670#if defined(DART_PRECOMPILER)
8671 if (profile_writer_ != nullptr && wsr_cluster != nullptr) {
8672 // Post-WriteAlloc, we eagerly create artificial nodes for any unreachable
8673 // targets in reachable WSRs if writing a v8 snapshot profile, since they
8674 // will be used in AttributeReference().
8675 //
8676 // Unreachable WSRs may also need artificial nodes, as they may be members
8677 // of other unreachable objects that have artificial nodes in the profile,
8678 // but they are instead lazily handled in CreateArtificialNodeIfNeeded().
8679 wsr_cluster->CreateArtificialTargetNodesIfNeeded(this);
8680 }
8681#endif
8682
8683 for (SerializationCluster* cluster : clusters) {
8684 cluster->WriteAndMeasureFill(this);
8685#if defined(DEBUG)
8686 Write<int32_t>(kSectionMarker);
8687#endif
8688 }
8689
8690 roots->WriteRoots(this);
8691
8692#if defined(DEBUG)
8693 Write<int32_t>(kSectionMarker);
8694#endif
8695
8697
8699
8700 return objects_;
8701}
#define CID_CLUSTER(Type)
#define ADD_CANONICAL_NEXT(cid)
#define ADD_NON_CANONICAL_NEXT(cid)
intptr_t length() const
void ResetObjectIdTable()
Definition heap.cc:888
Heap * heap() const
void Trace(ObjectPtr object, intptr_t cid_override)
void WriteUnsigned(intptr_t value)
void PrepareInstructions(const CompressedStackMaps &canonical_smap)
static bool IsInt(intptr_t N, T value)
Definition utils.h:298

◆ set_current_loading_unit_id()

void dart::Serializer::set_current_loading_unit_id ( intptr_t  id)
inline

Definition at line 552 of file app_snapshot.cc.

552 {
553 current_loading_unit_id_ = id;
554 }

◆ set_loading_units()

void dart::Serializer::set_loading_units ( GrowableArray< LoadingUnitSerializationData * > *  units)
inline

Definition at line 548 of file app_snapshot.cc.

548 {
549 loading_units_ = units;
550 }

◆ stream()

NonStreamingWriteStream * dart::Serializer::stream ( )
inline

Definition at line 357 of file app_snapshot.cc.

357{ return stream_; }

◆ Trace()

void dart::Serializer::Trace ( ObjectPtr  object,
intptr_t  cid_override 
)

Definition at line 8405 of file app_snapshot.cc.

8405 {
8406 intptr_t cid;
8407 bool is_canonical;
8408 if (!object->IsHeapObject()) {
8409 // Smis are merged into the Mint cluster because Smis for the writer might
8410 // become Mints for the reader and vice versa.
8411 cid = kMintCid;
8412 is_canonical = true;
8413 } else {
8414 cid = object->GetClassId();
8415 is_canonical = object->untag()->IsCanonical();
8416 }
8417 if (cid_override != kIllegalCid) {
8418 cid = cid_override;
8419 } else if (IsStringClassId(cid)) {
8420 cid = kStringCid;
8421 }
8422
8423 SerializationCluster** cluster_ref =
8424 is_canonical ? &canonical_clusters_by_cid_[cid] : &clusters_by_cid_[cid];
8425 if (*cluster_ref == nullptr) {
8426 *cluster_ref = NewClusterForClass(cid, is_canonical);
8427 if (*cluster_ref == nullptr) {
8428 UnexpectedObject(object, "No serialization cluster defined");
8429 }
8430 }
8431 SerializationCluster* cluster = *cluster_ref;
8432 ASSERT(cluster != nullptr);
8433 if (cluster->is_canonical() != is_canonical) {
8434 FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
8435 cluster->name(), cid,
8436 cluster->is_canonical() ? "marked" : "not marked",
8437 is_canonical ? "should be" : "should not be");
8438 }
8439
8440#if defined(SNAPSHOT_BACKTRACE)
8441 current_parent_ = object;
8442#endif
8443
8444 cluster->Trace(this, object);
8445
8446#if defined(SNAPSHOT_BACKTRACE)
8447 current_parent_ = Object::null();
8448#endif
8449}
SerializationCluster * NewClusterForClass(intptr_t cid, bool is_canonical)
@ kIllegalCid
Definition class_id.h:214
bool IsStringClassId(intptr_t index)
Definition class_id.h:350

◆ TraceDataOffset()

void dart::Serializer::TraceDataOffset ( uint32_t  offset)

Definition at line 8334 of file app_snapshot.cc.

8334 {
8335 if (profile_writer_ == nullptr) return;
8336 // ROData cannot be roots.
8337 ASSERT(object_currently_writing_.id_ !=
8339 auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8340 // TODO(sjindel): Give this edge a more appropriate type than element
8341 // (internal, maybe?).
8342 profile_writer_->AttributeReferenceTo(
8343 object_currently_writing_.id_,
8344 V8SnapshotProfileWriter::Reference::Element(0), {offset_space, offset});
8345}

◆ UnexpectedObject()

void dart::Serializer::UnexpectedObject ( ObjectPtr  object,
const char *  message 
)

Definition at line 8451 of file app_snapshot.cc.

8451 {
8452 // Exit the no safepoint scope so we can allocate while printing.
8453 while (thread()->no_safepoint_scope_depth() > 0) {
8455 }
8456 Object& object = Object::Handle(raw_object);
8457 OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n", message,
8458 Snapshot::KindToCString(kind_), static_cast<uword>(object.ptr()),
8459 object.ToCString());
8460#if defined(SNAPSHOT_BACKTRACE)
8461 while (!object.IsNull()) {
8462 object = ParentOf(object);
8463 OS::PrintErr("referenced by 0x%" Px " %s\n",
8464 static_cast<uword>(object.ptr()), object.ToCString());
8465 }
8466#endif
8467 OS::Abort();
8468}
static DART_NORETURN void Abort()
static const char * KindToCString(Kind kind)
Definition snapshot.cc:12
void DecrementNoSafepointScopeDepth()
Definition thread.h:720
Win32Message message
DART_EXPORT bool IsNull(Dart_Handle object)
uintptr_t uword
Definition globals.h:501
#define Px
Definition globals.h:410

◆ UnsafeRefId()

intptr_t dart::Serializer::UnsafeRefId ( ObjectPtr  object) const

Definition at line 7745 of file app_snapshot.cc.

7745 {
7746 // The object id weak table holds image offsets for Instructions instead
7747 // of ref indices.
7748 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7749 if (!Snapshot::IncludesCode(kind_) &&
7750 object->GetClassIdMayBeSmi() == kCodeCid) {
7751 return RefId(Object::null());
7752 }
7753 auto id = heap_->GetObjectId(object);
7754 if (id != kUnallocatedReference) {
7755 return id;
7756 }
7757 // This is the only case where we may still see unallocated references after
7758 // WriteAlloc is finished.
7759 if (object->IsWeakSerializationReference()) {
7760 // Lazily set the object ID of the WSR to the object which will replace
7761 // it in the snapshot.
7762 auto const wsr = static_cast<WeakSerializationReferencePtr>(object);
7763 // Either the target or the replacement must be allocated, since the
7764 // WSR is reachable.
7765 id = HasRef(wsr->untag()->target()) ? RefId(wsr->untag()->target())
7766 : RefId(wsr->untag()->replacement());
7767 heap_->SetObjectId(wsr, id);
7768 return id;
7769 }
7771 auto& handle = thread()->ObjectHandle();
7772 handle = object;
7773 FATAL("Reference for object %s is unallocated", handle.ToCString());
7774}
intptr_t RefId(ObjectPtr object) const
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)

◆ Write()

template<typename T >
void dart::Serializer::Write ( T  value)
inline

Definition at line 410 of file app_snapshot.cc.

410 {
411 BaseWriteStream::Raw<sizeof(T), T>::Write(stream_, value);
412 }
void Write(T value)
#define T

◆ WriteBytes()

void dart::Serializer::WriteBytes ( const void *  addr,
intptr_t  len 
)
inline

Definition at line 421 of file app_snapshot.cc.

421 {
422 stream_->WriteBytes(addr, len);
423 }
void WriteBytes(const void *addr, intptr_t len)
Definition datastream.h:424

◆ WriteCid()

void dart::Serializer::WriteCid ( intptr_t  cid)
inline

Definition at line 509 of file app_snapshot.cc.

509 {
511 Write<int32_t>(cid);
512 }
#define COMPILE_ASSERT(expr)
Definition assert.h:339

◆ WriteDispatchTable()

void dart::Serializer::WriteDispatchTable ( const Array entries)

Definition at line 8727 of file app_snapshot.cc.

8727 {
8728#if defined(DART_PRECOMPILER)
8729 if (kind() != Snapshot::kFullAOT) return;
8730
8731 // Create an artificial node to which the bytes should be attributed. We
8732 // don't attribute them to entries.ptr(), as we don't want to attribute the
8733 // bytes for printing out a length of 0 to Object::null() when the dispatch
8734 // table is empty.
8735 const intptr_t profile_ref = AssignArtificialRef();
8736 const auto& dispatch_table_profile_id = GetProfileId(profile_ref);
8737 if (profile_writer_ != nullptr) {
8738 profile_writer_->SetObjectTypeAndName(dispatch_table_profile_id,
8739 "DispatchTable", "dispatch_table");
8740 profile_writer_->AddRoot(dispatch_table_profile_id);
8741 }
8742 WritingObjectScope scope(this, dispatch_table_profile_id);
8743 if (profile_writer_ != nullptr) {
8744 // We'll write the Array object as a property of the artificial dispatch
8745 // table node, so Code objects otherwise unreferenced will have it as an
8746 // ancestor.
8747 CreateArtificialNodeIfNeeded(entries.ptr());
8748 AttributePropertyRef(entries.ptr(), "<code entries>");
8749 }
8750
8751 const intptr_t bytes_before = bytes_written();
8752 const intptr_t table_length = entries.IsNull() ? 0 : entries.Length();
8753
8754 ASSERT(table_length <= compiler::target::kWordMax);
8755 WriteUnsigned(table_length);
8756 if (table_length == 0) {
8757 dispatch_table_size_ = bytes_written() - bytes_before;
8758 return;
8759 }
8760
8761 ASSERT(code_cluster_ != nullptr);
8762 // If instructions can be deduped, the code order table in the deserializer
8763 // may not contain all Code objects in the snapshot. Thus, we write the ID
8764 // for the first code object here so we can retrieve it during deserialization
8765 // and calculate the snapshot ID for Code objects from the cluster index.
8766 //
8767 // We could just use the snapshot reference ID of the Code object itself
8768 // instead of the cluster index and avoid this. However, since entries are
8769 // SLEB128 encoded, the size delta for serializing the first ID once is less
8770 // than the size delta of serializing the ID plus kIndexBase for each entry,
8771 // even when Code objects are allocated before all other non-base objects.
8772 //
8773 // We could also map Code objects to the first Code object in the cluster with
8774 // the same entry point and serialize that ID instead, but that loses
8775 // information about which Code object was originally referenced.
8776 WriteUnsigned(code_cluster_->first_ref());
8777
8778 CodePtr previous_code = nullptr;
8779 CodePtr recent[kDispatchTableRecentCount] = {nullptr};
8780 intptr_t recent_index = 0;
8781 intptr_t repeat_count = 0;
8782 for (intptr_t i = 0; i < table_length; i++) {
8783 auto const code = Code::RawCast(entries.At(i));
8784 // First, see if we're repeating the previous entry (invalid, recent, or
8785 // encoded).
8786 if (code == previous_code) {
8787 if (++repeat_count == kDispatchTableMaxRepeat) {
8788 Write(kDispatchTableMaxRepeat);
8789 repeat_count = 0;
8790 }
8791 continue;
8792 }
8793 // Emit any outstanding repeat count before handling the new code value.
8794 if (repeat_count > 0) {
8795 Write(repeat_count);
8796 repeat_count = 0;
8797 }
8798 previous_code = code;
8799 // The invalid entry can be repeated, but is never part of the recent list
8800 // since it already encodes to a single byte..
8801 if (code == Code::null()) {
8802 Write(0);
8803 continue;
8804 }
8805 // Check against the recent entries, and write an encoded reference to
8806 // the recent entry if found.
8807 intptr_t found_index = 0;
8808 for (; found_index < kDispatchTableRecentCount; found_index++) {
8809 if (recent[found_index] == code) break;
8810 }
8811 if (found_index < kDispatchTableRecentCount) {
8812 Write(~found_index);
8813 continue;
8814 }
8815 // We have a non-repeated, non-recent entry, so encode the reference ID of
8816 // the code object and emit that.
8817 auto const code_index = GetCodeIndex(code);
8818 // Use the index in the code cluster, not in the snapshot..
8819 auto const encoded = kDispatchTableIndexBase + code_index;
8820 ASSERT(encoded <= compiler::target::kWordMax);
8821 Write(encoded);
8822 recent[recent_index] = code;
8823 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
8824 }
8825 if (repeat_count > 0) {
8826 Write(repeat_count);
8827 }
8828 dispatch_table_size_ = bytes_written() - bytes_before;
8829#endif // defined(DART_PRECOMPILER)
8830}
intptr_t GetCodeIndex(CodePtr code)
void AttributePropertyRef(ObjectPtr object, const char *property)

◆ WriteElementRef()

void dart::Serializer::WriteElementRef ( ObjectPtr  object,
intptr_t  index 
)
inline

Definition at line 449 of file app_snapshot.cc.

449 {
450 AttributeElementRef(object, index);
451 WriteRefId(RefId(object));
452 }
void WriteRefId(intptr_t value)
void AttributeElementRef(ObjectPtr object, intptr_t index)

◆ WriteFromTo()

template<typename T , typename... P>
void dart::Serializer::WriteFromTo ( T  obj,
P &&...  args 
)
inline

Definition at line 478 of file app_snapshot.cc.

478 {
479 auto* from = obj->untag()->from();
480 auto* to = obj->untag()->to_snapshot(kind(), args...);
481 WriteRange(obj, from, to);
482 }
DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to)

◆ WriteInstructions()

void dart::Serializer::WriteInstructions ( InstructionsPtr  instr,
uint32_t  unchecked_offset,
CodePtr  code,
bool  deferred 
)

Definition at line 8294 of file app_snapshot.cc.

8297 {
8298 ASSERT(code != Code::null());
8299
8300 ASSERT(InCurrentLoadingUnitOrRoot(code) != deferred);
8301 if (deferred) {
8302 return;
8303 }
8304
8305 const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code);
8306#if defined(DART_PRECOMPILER)
8307 if (profile_writer_ != nullptr) {
8308 ASSERT(object_currently_writing_.id_ !=
8310 const auto offset_space = vm_ ? IdSpace::kVmText : IdSpace::kIsolateText;
8311 profile_writer_->AttributeReferenceTo(
8312 object_currently_writing_.id_,
8314 {offset_space, offset});
8315 }
8316
8317 if (Code::IsDiscarded(code)) {
8318 // Discarded Code objects are not supported in the vm isolate snapshot.
8319 ASSERT(!vm_);
8320 return;
8321 }
8322
8323 if (FLAG_precompiled_mode) {
8324 const uint32_t payload_info =
8325 (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0);
8326 WriteUnsigned(payload_info);
8327 return;
8328 }
8329#endif
8330 Write<uint32_t>(offset);
8331 WriteUnsigned(unchecked_offset);
8332}
bool HasMonomorphicEntry() const
Definition object.h:6812
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj)

◆ WriteOffsetRef()

void dart::Serializer::WriteOffsetRef ( ObjectPtr  object,
intptr_t  offset 
)
inline

Definition at line 464 of file app_snapshot.cc.

464 {
465 intptr_t id = RefId(object);
466 WriteRefId(id);
467 if (profile_writer_ != nullptr) {
468 if (auto const property = offsets_table_->FieldNameForOffset(
469 object_currently_writing_.cid_, offset)) {
470 AttributePropertyRef(object, property);
471 } else {
473 }
474 }
475 }
const char * FieldNameForOffset(intptr_t cid, intptr_t offset)

◆ WritePropertyRef()

void dart::Serializer::WritePropertyRef ( ObjectPtr  object,
const char *  property 
)
inline

Definition at line 459 of file app_snapshot.cc.

459 {
460 AttributePropertyRef(object, property);
461 WriteRefId(RefId(object));
462 }

◆ WriteRange()

template<typename T >
DART_NOINLINE void dart::Serializer::WriteRange ( ObjectPtr  obj,
T  from,
T  to 
)
inline

Definition at line 485 of file app_snapshot.cc.

485 {
486 for (auto* p = from; p <= to; p++) {
488 p->Decompress(obj->heap_base()),
489 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(obj->untag()));
490 }
491 }
void WriteOffsetRef(ObjectPtr object, intptr_t offset)

◆ WriteRefId()

void dart::Serializer::WriteRefId ( intptr_t  value)
inline

Definition at line 413 of file app_snapshot.cc.

413{ stream_->WriteRefId(value); }
void WriteRefId(intptr_t value)
Definition datastream.h:409

◆ WriteRootRef()

void dart::Serializer::WriteRootRef ( ObjectPtr  object,
const char *  name = nullptr 
)
inline

Definition at line 431 of file app_snapshot.cc.

431 {
432 intptr_t id = RefId(object);
433 WriteRefId(id);
434 if (profile_writer_ != nullptr) {
435 profile_writer_->AddRoot(GetProfileId(object), name);
436 }
437 }

◆ WriteTokenPosition()

void dart::Serializer::WriteTokenPosition ( TokenPosition  pos)
inline

Definition at line 507 of file app_snapshot.cc.

507{ Write(pos.Serialize()); }
SkPoint pos

◆ WriteUnsigned()

void dart::Serializer::WriteUnsigned ( intptr_t  value)
inline

Definition at line 414 of file app_snapshot.cc.

414{ stream_->WriteUnsigned(value); }
void WriteUnsigned(T value)
Definition datastream.h:400

◆ WriteUnsigned64()

void dart::Serializer::WriteUnsigned64 ( uint64_t  value)
inline

Definition at line 415 of file app_snapshot.cc.

415{ stream_->WriteUnsigned(value); }

◆ WriteVersionAndFeatures()

void dart::Serializer::WriteVersionAndFeatures ( bool  is_vm_snapshot)

Definition at line 8490 of file app_snapshot.cc.

8490 {
8491 const char* expected_version = Version::SnapshotString();
8492 ASSERT(expected_version != nullptr);
8493 const intptr_t version_len = strlen(expected_version);
8494 WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len);
8495
8496 char* expected_features =
8497 Dart::FeaturesString(IsolateGroup::Current(), is_vm_snapshot, kind_);
8498 ASSERT(expected_features != nullptr);
8499 const intptr_t features_len = strlen(expected_features);
8500 WriteBytes(reinterpret_cast<const uint8_t*>(expected_features),
8501 features_len + 1);
8502 free(expected_features);
8503}
static char * FeaturesString(IsolateGroup *isolate_group, bool is_vm_snapshot, Snapshot::Kind kind)
Definition dart.cc:998
static IsolateGroup * Current()
Definition isolate.h:534
void WriteBytes(const void *addr, intptr_t len)
static const char * SnapshotString()
Definition version_in.cc:15

◆ WriteWordWith32BitWrites()

void dart::Serializer::WriteWordWith32BitWrites ( uword  value)
inline

Definition at line 417 of file app_snapshot.cc.

417 {
418 stream_->WriteWordWith32BitWrites(value);
419 }
void WriteWordWith32BitWrites(uword value)
Definition datastream.h:389

◆ zone()

Zone * dart::Serializer::zone ( ) const
inline

Definition at line 529 of file app_snapshot.cc.

529{ return zone_; }

The documentation for this class was generated from the following file: