Flutter Engine
The Flutter Engine
app_snapshot.cc
Go to the documentation of this file.
1// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <memory>
6#include <utility>
7
8#include "vm/app_snapshot.h"
9
10#include "platform/assert.h"
11#include "vm/bootstrap.h"
12#include "vm/bss_relocs.h"
13#include "vm/canonical_tables.h"
14#include "vm/class_id.h"
15#include "vm/code_observers.h"
18#include "vm/dart.h"
19#include "vm/dart_entry.h"
20#include "vm/dispatch_table.h"
21#include "vm/flag_list.h"
22#include "vm/growable_array.h"
23#include "vm/heap/heap.h"
24#include "vm/image_snapshot.h"
25#include "vm/native_entry.h"
26#include "vm/object.h"
27#include "vm/object_store.h"
28#include "vm/program_visitor.h"
30#include "vm/stub_code.h"
31#include "vm/symbols.h"
32#include "vm/timeline.h"
34#include "vm/version.h"
35#include "vm/zone_text_buffer.h"
36
37#if !defined(DART_PRECOMPILED_RUNTIME)
41#endif // !defined(DART_PRECOMPILED_RUNTIME)
42
43namespace dart {
44
45#if !defined(DART_PRECOMPILED_RUNTIME)
47 print_cluster_information,
48 false,
49 "Print information about clusters written to snapshot");
50#endif
51
52#if defined(DART_PRECOMPILER)
54 write_v8_snapshot_profile_to,
55 nullptr,
56 "Write a snapshot profile in V8 format to a file.");
57DEFINE_FLAG(bool,
58 print_array_optimization_candidates,
59 false,
60 "Print information about how many array are candidates for Smi and "
61 "ROData optimizations.");
62#endif // defined(DART_PRECOMPILER)
63
64// Forward declarations.
65class Serializer;
66class Deserializer;
67
68namespace {
69
70// Serialized clusters are identified by their CID. So to insert custom clusters
71// we need to assign them a CID that is otherwise never serialized.
72static constexpr intptr_t kDeltaEncodedTypedDataCid = kNativePointer;
73
74// StorageTrait for HashTable which allows to create hash tables backed by
75// zone memory. Used to compute cluster order for canonical clusters.
76struct GrowableArrayStorageTraits {
77 class Array : public ZoneAllocated {
78 public:
79 explicit Array(Zone* zone, intptr_t length)
80 : length_(length), array_(zone->Alloc<ObjectPtr>(length)) {}
81
82 intptr_t Length() const { return length_; }
83 void SetAt(intptr_t index, const Object& value) const {
84 array_[index] = value.ptr();
85 }
86 ObjectPtr At(intptr_t index) const { return array_[index]; }
87
88 private:
89 intptr_t length_ = 0;
90 ObjectPtr* array_ = nullptr;
92 };
93
94 using ArrayPtr = Array*;
95 class ArrayHandle : public ZoneAllocated {
96 public:
97 explicit ArrayHandle(ArrayPtr ptr) : ptr_(ptr) {}
98 ArrayHandle() {}
99
100 void SetFrom(const ArrayHandle& other) { ptr_ = other.ptr_; }
101 void Clear() { ptr_ = nullptr; }
102 bool IsNull() const { return ptr_ == nullptr; }
103 ArrayPtr ptr() { return ptr_; }
104
105 intptr_t Length() const { return ptr_->Length(); }
106 void SetAt(intptr_t index, const Object& value) const {
107 ptr_->SetAt(index, value);
108 }
109 ObjectPtr At(intptr_t index) const { return ptr_->At(index); }
110
111 private:
112 ArrayPtr ptr_ = nullptr;
113 DISALLOW_COPY_AND_ASSIGN(ArrayHandle);
114 };
115
116 static ArrayHandle& PtrToHandle(ArrayPtr ptr) {
117 return *new ArrayHandle(ptr);
118 }
119
120 static void SetHandle(ArrayHandle& dst, const ArrayHandle& src) { // NOLINT
121 dst.SetFrom(src);
122 }
123
124 static void ClearHandle(ArrayHandle& dst) { // NOLINT
125 dst.Clear();
126 }
127
128 static ArrayPtr New(Zone* zone, intptr_t length, Heap::Space space) {
129 return new (zone) Array(zone, length);
130 }
131
132 static bool IsImmutable(const ArrayHandle& handle) { return false; }
133
134 static ObjectPtr At(ArrayHandle* array, intptr_t index) {
135 return array->At(index);
136 }
137
138 static void SetAt(ArrayHandle* array, intptr_t index, const Object& value) {
139 array->SetAt(index, value);
140 }
141};
142} // namespace
143
144#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
145
146static void RelocateCodeObjects(
147 bool is_vm,
148 GrowableArray<CodePtr>* code_objects,
149 GrowableArray<ImageWriterCommand>* image_writer_commands) {
150 auto thread = Thread::Current();
151 auto isolate_group =
152 is_vm ? Dart::vm_isolate()->group() : thread->isolate_group();
153
154 WritableCodePages writable_code_pages(thread, isolate_group);
155 CodeRelocator::Relocate(thread, code_objects, image_writer_commands, is_vm);
156}
157
158#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
159
161 public:
162 static constexpr intptr_t kSizeVaries = -1;
163 explicit SerializationCluster(const char* name,
164 intptr_t cid,
165 intptr_t target_instance_size = kSizeVaries,
166 bool is_canonical = false)
167 : name_(name),
168 cid_(cid),
169 target_instance_size_(target_instance_size),
171 is_immutable_(Object::ShouldHaveImmutabilityBitSet(cid)) {
172 ASSERT(target_instance_size == kSizeVaries || target_instance_size >= 0);
173 }
175
176 // Add [object] to the cluster and push its outgoing references.
177 virtual void Trace(Serializer* serializer, ObjectPtr object) = 0;
178
179 // Write the cluster type and information needed to allocate the cluster's
180 // objects. For fixed sized objects, this is just the object count. For
181 // variable sized objects, this is the object count and length of each object.
182 virtual void WriteAlloc(Serializer* serializer) = 0;
183
184 // Write the byte and reference data of the cluster's objects.
185 virtual void WriteFill(Serializer* serializer) = 0;
186
187 void WriteAndMeasureAlloc(Serializer* serializer);
188 void WriteAndMeasureFill(Serializer* serializer);
189
190 const char* name() const { return name_; }
191 intptr_t cid() const { return cid_; }
192 bool is_canonical() const { return is_canonical_; }
193 bool is_immutable() const { return is_immutable_; }
194 intptr_t size() const { return size_; }
195 intptr_t num_objects() const { return num_objects_; }
196
197 // Returns number of bytes needed for deserialized objects in
198 // this cluster. Printed in --print_snapshot_sizes_verbose statistics.
199 //
200 // In order to calculate this size, clusters of fixed-size objects
201 // can pass instance size as [target_instance_size] constructor parameter.
202 // Otherwise clusters should count [target_memory_size] in
203 // their [WriteAlloc] methods.
204 intptr_t target_memory_size() const { return target_memory_size_; }
205
206 protected:
207 const char* const name_;
208 const intptr_t cid_;
209 const intptr_t target_instance_size_;
210 const bool is_canonical_;
211 const bool is_immutable_;
212 intptr_t size_ = 0;
213 intptr_t num_objects_ = 0;
215};
216
218 public:
219 explicit DeserializationCluster(const char* name,
220 bool is_canonical = false,
221 bool is_immutable = false)
222 : name_(name),
224 is_immutable_(is_immutable),
225 start_index_(-1),
226 stop_index_(-1) {}
228
229 // Allocate memory for all objects in the cluster and write their addresses
230 // into the ref array. Do not touch this memory.
231 virtual void ReadAlloc(Deserializer* deserializer) = 0;
232
233 // Initialize the cluster's objects. Do not touch the memory of other objects.
234 virtual void ReadFill(Deserializer* deserializer) = 0;
235
236 // Complete any action that requires the full graph to be deserialized, such
237 // as rehashing.
238 virtual void PostLoad(Deserializer* deserializer, const Array& refs) {
239 // We only need to worry about how canonical values are handled during
240 // deserialization if there may be multiple loading units, which only
241 // happens in the precompiled runtime.
242#if defined(DART_PRECOMPILED_RUNTIME)
243 if (is_canonical()) {
244 FATAL("%s needs canonicalization but doesn't define PostLoad", name());
245 }
246#endif
247 }
248
249 const char* name() const { return name_; }
250 bool is_canonical() const { return is_canonical_; }
251
252 protected:
253 void ReadAllocFixedSize(Deserializer* deserializer, intptr_t instance_size);
254
255 const char* const name_;
256 const bool is_canonical_;
257 const bool is_immutable_;
258 // The range of the ref array that belongs to this cluster.
259 intptr_t start_index_;
260 intptr_t stop_index_;
261};
262
264 public:
266 virtual void AddBaseObjects(Serializer* serializer) = 0;
267 virtual void PushRoots(Serializer* serializer) = 0;
268 virtual void WriteRoots(Serializer* serializer) = 0;
269
271};
272
274 public:
276 virtual void AddBaseObjects(Deserializer* deserializer) = 0;
277 virtual void ReadRoots(Deserializer* deserializer) = 0;
278 virtual void PostLoad(Deserializer* deserializer, const Array& refs) = 0;
279};
280
281// Reference value for objects that either are not reachable from the roots or
282// should never have a reference in the snapshot (because they are dropped,
283// for example). Should be the default value for Heap::GetObjectId.
284static constexpr intptr_t kUnreachableReference = 0;
286static constexpr intptr_t kFirstReference = 1;
287
288// Reference value for traced objects that have not been allocated their final
289// reference ID.
290static constexpr intptr_t kUnallocatedReference = -1;
291
292static constexpr bool IsAllocatedReference(intptr_t ref) {
293 return ref > kUnreachableReference;
294}
295
296static constexpr bool IsArtificialReference(intptr_t ref) {
297 return ref < kUnallocatedReference;
298}
299
300static constexpr bool IsReachableReference(intptr_t ref) {
301 return ref == kUnallocatedReference || IsAllocatedReference(ref);
302}
303
304class CodeSerializationCluster;
305
307 public:
311 ImageWriter* image_writer_,
312 bool vm_,
314 ~Serializer();
315
316 void AddBaseObject(ObjectPtr base_object,
317 const char* type = nullptr,
318 const char* name = nullptr);
319
320 intptr_t AssignRef(ObjectPtr object);
321 intptr_t AssignArtificialRef(ObjectPtr object = nullptr);
322
323 intptr_t GetCodeIndex(CodePtr code);
324
325 void Push(ObjectPtr object, intptr_t cid_override = kIllegalCid);
326 void PushWeak(ObjectPtr object);
327
328 void AddUntracedRef() { num_written_objects_++; }
329
330 void Trace(ObjectPtr object, intptr_t cid_override);
331
332 void UnexpectedObject(ObjectPtr object, const char* message);
333#if defined(SNAPSHOT_BACKTRACE)
334 ObjectPtr ParentOf(ObjectPtr object) const;
335 ObjectPtr ParentOf(const Object& object) const;
336#endif
337
338 SerializationCluster* NewClusterForClass(intptr_t cid, bool is_canonical);
339
341 // Make room for recording snapshot buffer size.
343 }
344
346 Snapshot* header = reinterpret_cast<Snapshot*>(stream_->buffer());
347 header->set_magic();
348 header->set_length(stream_->bytes_written());
349 header->set_kind(kind);
350 }
351
352 void WriteVersionAndFeatures(bool is_vm_snapshot);
353
355 void PrintSnapshotSizes();
356
357 NonStreamingWriteStream* stream() { return stream_; }
358 intptr_t bytes_written() { return stream_->bytes_written(); }
359 intptr_t bytes_heap_allocated() { return bytes_heap_allocated_; }
360
362 public:
364 const char* type,
365 ObjectPtr object,
366 StringPtr name)
368 serializer,
369 ReserveId(serializer,
370 type,
371 object,
372 String::ToCString(serializer->thread(), name)),
373 object) {}
374
376 const char* type,
377 ObjectPtr object,
378 const char* name)
379 : WritingObjectScope(serializer,
380 ReserveId(serializer, type, object, name),
381 object) {}
382
383 WritingObjectScope(Serializer* serializer,
385 ObjectPtr object = nullptr);
386
388 : WritingObjectScope(serializer,
389 serializer->GetProfileId(object),
390 object) {}
391
393
394 private:
395 static V8SnapshotProfileWriter::ObjectId ReserveId(Serializer* serializer,
396 const char* type,
397 ObjectPtr object,
398 const char* name);
399
400 private:
401 Serializer* const serializer_;
402 const ObjectPtr old_object_;
404 const classid_t old_cid_;
405 };
406
407 // Writes raw data to the stream (basic type).
408 // sizeof(T) must be in {1,2,4,8}.
409 template <typename T>
410 void Write(T value) {
411 BaseWriteStream::Raw<sizeof(T), T>::Write(stream_, value);
412 }
413 void WriteRefId(intptr_t value) { stream_->WriteRefId(value); }
414 void WriteUnsigned(intptr_t value) { stream_->WriteUnsigned(value); }
415 void WriteUnsigned64(uint64_t value) { stream_->WriteUnsigned(value); }
416
419 }
420
421 void WriteBytes(const void* addr, intptr_t len) {
422 stream_->WriteBytes(addr, len);
423 }
424 void Align(intptr_t alignment, intptr_t offset = 0) {
425 stream_->Align(alignment, offset);
426 }
427
430
431 void WriteRootRef(ObjectPtr object, const char* name = nullptr) {
432 intptr_t id = RefId(object);
433 WriteRefId(id);
434 if (profile_writer_ != nullptr) {
435 profile_writer_->AddRoot(GetProfileId(object), name);
436 }
437 }
438
439 // Record a reference from the currently written object to the given object
440 // and return reference id for the given object.
441 void AttributeReference(ObjectPtr object,
442 const V8SnapshotProfileWriter::Reference& reference);
443
444 void AttributeElementRef(ObjectPtr object, intptr_t index) {
445 AttributeReference(object,
447 }
448
449 void WriteElementRef(ObjectPtr object, intptr_t index) {
450 AttributeElementRef(object, index);
451 WriteRefId(RefId(object));
452 }
453
454 void AttributePropertyRef(ObjectPtr object, const char* property) {
455 AttributeReference(object,
457 }
458
459 void WritePropertyRef(ObjectPtr object, const char* property) {
460 AttributePropertyRef(object, property);
461 WriteRefId(RefId(object));
462 }
463
464 void WriteOffsetRef(ObjectPtr object, intptr_t offset) {
465 intptr_t id = RefId(object);
466 WriteRefId(id);
467 if (profile_writer_ != nullptr) {
468 if (auto const property = offsets_table_->FieldNameForOffset(
469 object_currently_writing_.cid_, offset)) {
470 AttributePropertyRef(object, property);
471 } else {
473 }
474 }
475 }
476
477 template <typename T, typename... P>
478 void WriteFromTo(T obj, P&&... args) {
479 auto* from = obj->untag()->from();
480 auto* to = obj->untag()->to_snapshot(kind(), args...);
481 WriteRange(obj, from, to);
482 }
483
484 template <typename T>
485 DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to) {
486 for (auto* p = from; p <= to; p++) {
488 p->Decompress(obj->heap_base()),
489 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(obj->untag()));
490 }
491 }
492
493 template <typename T, typename... P>
494 void PushFromTo(T obj, P&&... args) {
495 auto* from = obj->untag()->from();
496 auto* to = obj->untag()->to_snapshot(kind(), args...);
497 PushRange(obj, from, to);
498 }
499
500 template <typename T>
501 DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to) {
502 for (auto* p = from; p <= to; p++) {
503 Push(p->Decompress(obj->heap_base()));
504 }
505 }
506
508
509 void WriteCid(intptr_t cid) {
511 Write<int32_t>(cid);
512 }
513
514 // Sorts Code objects and reorders instructions before writing snapshot.
515 // Builds binary search table for stack maps.
516 void PrepareInstructions(const CompressedStackMaps& canonical_smap);
517
518 void WriteInstructions(InstructionsPtr instr,
519 uint32_t unchecked_offset,
520 CodePtr code,
521 bool deferred);
522 uint32_t GetDataOffset(ObjectPtr object) const;
523 void TraceDataOffset(uint32_t offset);
524 intptr_t GetDataSize() const;
525
526 void WriteDispatchTable(const Array& entries);
527
528 Heap* heap() const { return heap_; }
529 Zone* zone() const { return zone_; }
530 Snapshot::Kind kind() const { return kind_; }
531 intptr_t next_ref_index() const { return next_ref_index_; }
532
534
535 V8SnapshotProfileWriter* profile_writer() const { return profile_writer_; }
536
537 // If the given [obj] was not included into the snapshot and have not
538 // yet gotten an artificial node created for it create an artificial node
539 // in the profile representing this object.
540 // Returns true if [obj] has an artificial profile node associated with it.
542
544 void RecordDeferredCode(CodePtr ptr);
546 return loading_units_;
547 }
549 loading_units_ = units;
550 }
551 intptr_t current_loading_unit_id() const { return current_loading_unit_id_; }
552 void set_current_loading_unit_id(intptr_t id) {
553 current_loading_unit_id_ = id;
554 }
555
556 // Returns the reference ID for the object. Fails for objects that have not
557 // been allocated a reference ID yet, so should be used only after all
558 // WriteAlloc calls.
559 intptr_t RefId(ObjectPtr object) const;
560
561 // Same as RefId, but allows artificial and unreachable references. Still
562 // fails for unallocated references.
563 intptr_t UnsafeRefId(ObjectPtr object) const;
564
565 // Whether the object is reachable.
566 bool IsReachable(ObjectPtr object) const {
567 return IsReachableReference(heap_->GetObjectId(object));
568 }
569 // Whether the object has an allocated reference.
570 bool HasRef(ObjectPtr object) const {
571 return IsAllocatedReference(heap_->GetObjectId(object));
572 }
573 // Whether the object only appears in the V8 snapshot profile.
574 bool HasArtificialRef(ObjectPtr object) const {
575 return IsArtificialReference(heap_->GetObjectId(object));
576 }
577 // Whether a node for the object already has been added to the V8 snapshot
578 // profile.
579 bool HasProfileNode(ObjectPtr object) const {
580 ASSERT(profile_writer_ != nullptr);
581 return profile_writer_->HasId(GetProfileId(object));
582 }
583 bool IsWritten(ObjectPtr object) const {
584 return heap_->GetObjectId(object) > num_base_objects_;
585 }
586
587 private:
588 const char* ReadOnlyObjectType(intptr_t cid);
589 void FlushProfile();
590
591 Heap* heap_;
592 Zone* zone_;
593 Snapshot::Kind kind_;
595 ImageWriter* image_writer_;
596 SerializationCluster** canonical_clusters_by_cid_;
597 SerializationCluster** clusters_by_cid_;
598 CodeSerializationCluster* code_cluster_ = nullptr;
599
600 struct StackEntry {
601 ObjectPtr obj;
602 intptr_t cid_override;
603 };
604 GrowableArray<StackEntry> stack_;
605
606 intptr_t num_cids_;
607 intptr_t num_tlc_cids_;
608 intptr_t num_base_objects_;
609 intptr_t num_written_objects_;
610 intptr_t next_ref_index_;
611
612 intptr_t dispatch_table_size_ = 0;
613 intptr_t bytes_heap_allocated_ = 0;
614 intptr_t instructions_table_len_ = 0;
615 intptr_t instructions_table_rodata_offset_ = 0;
616
617 // True if writing VM snapshot, false for Isolate snapshot.
618 bool vm_;
619
620 V8SnapshotProfileWriter* profile_writer_ = nullptr;
621 struct ProfilingObject {
622 ObjectPtr object_ = nullptr;
623 // Unless within a WritingObjectScope, any bytes written are attributed to
624 // the artificial root.
625 V8SnapshotProfileWriter::ObjectId id_ =
627 intptr_t last_stream_position_ = 0;
628 intptr_t cid_ = -1;
629 } object_currently_writing_;
630 OffsetsTable* offsets_table_ = nullptr;
631
632#if defined(SNAPSHOT_BACKTRACE)
633 ObjectPtr current_parent_;
634 GrowableArray<Object*> parent_pairs_;
635#endif
636
637#if defined(DART_PRECOMPILER)
638 IntMap<intptr_t> deduped_instructions_sources_;
639 IntMap<intptr_t> code_index_;
640#endif
641
642 intptr_t current_loading_unit_id_ = 0;
643 GrowableArray<LoadingUnitSerializationData*>* loading_units_ = nullptr;
644 ZoneGrowableArray<Object*>* objects_ = new ZoneGrowableArray<Object*>();
645
646 DISALLOW_IMPLICIT_CONSTRUCTORS(Serializer);
647};
648
649#define AutoTraceObject(obj) \
650 Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)
651
652#define AutoTraceObjectName(obj, str) \
653 Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, str)
654
655#define WriteFieldValue(field, value) s->WritePropertyRef(value, #field);
656
657#define WriteFromTo(obj, ...) s->WriteFromTo(obj, ##__VA_ARGS__);
658
659#define PushFromTo(obj, ...) s->PushFromTo(obj, ##__VA_ARGS__);
660
661#define WriteField(obj, field) s->WritePropertyRef(obj->untag()->field, #field)
662#define WriteCompressedField(obj, name) \
663 s->WritePropertyRef(obj->untag()->name(), #name "_")
664
666 public:
669 const uint8_t* buffer,
670 intptr_t size,
671 const uint8_t* data_buffer,
672 const uint8_t* instructions_buffer,
673 bool is_non_root_unit,
674 intptr_t offset = 0);
676
677 // Verifies the image alignment.
678 //
679 // Returns ApiError::null() on success and an ApiError with an an appropriate
680 // message otherwise.
681 ApiErrorPtr VerifyImageAlignment();
682
683 ObjectPtr Allocate(intptr_t size);
685 intptr_t cid,
686 intptr_t size,
687 bool is_canonical = false) {
688 InitializeHeader(raw, cid, size, is_canonical,
690 }
691 static void InitializeHeader(ObjectPtr raw,
692 intptr_t cid,
693 intptr_t size,
694 bool is_canonical,
695 bool is_immutable);
696
697 // Reads raw data (for basic types).
698 // sizeof(T) must be in {1,2,4,8}.
699 template <typename T>
700 T Read() {
701 return ReadStream::Raw<sizeof(T), T>::Read(&stream_);
702 }
703 intptr_t ReadRefId() { return stream_.ReadRefId(); }
704 intptr_t ReadUnsigned() { return stream_.ReadUnsigned(); }
705 uint64_t ReadUnsigned64() { return stream_.ReadUnsigned<uint64_t>(); }
706 void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); }
707
709
710 intptr_t position() const { return stream_.Position(); }
711 void set_position(intptr_t p) { stream_.SetPosition(p); }
712 const uint8_t* AddressOfCurrentPosition() const {
713 return stream_.AddressOfCurrentPosition();
714 }
715
716 void Advance(intptr_t value) { stream_.Advance(value); }
717 void Align(intptr_t alignment, intptr_t offset = 0) {
718 stream_.Align(alignment, offset);
719 }
720
721 void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
722
723 void AssignRef(ObjectPtr object) {
724 ASSERT(next_ref_index_ <= num_objects_);
725 refs_->untag()->data()[next_ref_index_] = object;
726 next_ref_index_++;
727 }
728
729 ObjectPtr Ref(intptr_t index) const {
730 ASSERT(index > 0);
731 ASSERT(index <= num_objects_);
732 return refs_->untag()->element(index);
733 }
734
735 CodePtr GetCodeByIndex(intptr_t code_index, uword* entry_point) const;
736 uword GetEntryPointByCodeIndex(intptr_t code_index) const;
737
738 // If |code_index| corresponds to a non-discarded Code object returns
739 // index within the code cluster that corresponds to this Code object.
740 // Otherwise, if |code_index| corresponds to the discarded Code then
741 // returns -1.
742 static intptr_t CodeIndexToClusterIndex(const InstructionsTable& table,
743 intptr_t code_index);
744
746
748 return TokenPosition::Deserialize(Read<int32_t>());
749 }
750
751 intptr_t ReadCid() {
753 return Read<int32_t>();
754 }
755
756 void ReadInstructions(CodePtr code, bool deferred);
757 void EndInstructions();
758 ObjectPtr GetObjectAt(uint32_t offset) const;
759
761
763
765 ReadDispatchTable(&stream_, /*deferred=*/false, InstructionsTable::Handle(),
766 -1, -1);
767 }
769 bool deferred,
770 const InstructionsTable& root_instruction_table,
771 intptr_t deferred_code_start_index,
772 intptr_t deferred_code_end_index);
773
774 intptr_t next_index() const { return next_ref_index_; }
775 Heap* heap() const { return heap_; }
776 Zone* zone() const { return zone_; }
778#if defined(DART_PRECOMPILED_RUNTIME)
779 return Snapshot::kFullAOT;
780#else
781 return kind_;
782#endif
783 }
784 bool is_non_root_unit() const { return is_non_root_unit_; }
785 void set_code_start_index(intptr_t value) { code_start_index_ = value; }
786 intptr_t code_start_index() const { return code_start_index_; }
787 void set_code_stop_index(intptr_t value) { code_stop_index_ = value; }
788 intptr_t code_stop_index() const { return code_stop_index_; }
790 return instructions_table_;
791 }
792 intptr_t num_base_objects() const { return num_base_objects_; }
793
794 // This serves to make the snapshot cursor, ref table and null be locals
795 // during ReadFill, which allows the C compiler to see they are not aliased
796 // and can be kept in registers.
797 class Local : public ReadStream {
798 public:
800 : ReadStream(d->stream_.buffer_, d->stream_.current_, d->stream_.end_),
801 d_(d),
802 refs_(d->refs_),
803 null_(Object::null()) {
804#if defined(DEBUG)
805 // Can't mix use of Deserializer::Read*.
806 d->stream_.current_ = nullptr;
807#endif
808 }
809 ~Local() { d_->stream_.current_ = current_; }
810
811 ObjectPtr Ref(intptr_t index) const {
812 ASSERT(index > 0);
813 ASSERT(index <= d_->num_objects_);
814 return refs_->untag()->element(index);
815 }
816
817 template <typename T>
818 T Read() {
819 return ReadStream::Raw<sizeof(T), T>::Read(this);
820 }
821 uint64_t ReadUnsigned64() { return ReadUnsigned<uint64_t>(); }
822
825 return TokenPosition::Deserialize(Read<int32_t>());
826 }
827
828 intptr_t ReadCid() {
830 return Read<int32_t>();
831 }
832
833 template <typename T, typename... P>
834 void ReadFromTo(T obj, P&&... params) {
835 auto* from = obj->untag()->from();
836 auto* to_snapshot = obj->untag()->to_snapshot(d_->kind(), params...);
837 auto* to = obj->untag()->to(params...);
838 for (auto* p = from; p <= to_snapshot; p++) {
839 *p = ReadRef();
840 }
841 // This is necessary because, unlike Object::Allocate, the clustered
842 // deserializer allocates object without null-initializing them. Instead,
843 // each deserialization cluster is responsible for initializing every
844 // field, ensuring that every field is written to exactly once.
845 for (auto* p = to_snapshot + 1; p <= to; p++) {
846 *p = null_;
847 }
848 }
849
850 private:
851 Deserializer* const d_;
852 const ArrayPtr refs_;
853 const ObjectPtr null_;
854 };
855
856 private:
857 Heap* heap_;
858 PageSpace* old_space_;
859 FreeList* freelist_;
860 Zone* zone_;
861 Snapshot::Kind kind_;
862 ReadStream stream_;
863 ImageReader* image_reader_;
864 intptr_t num_base_objects_;
865 intptr_t num_objects_;
866 intptr_t num_clusters_;
867 ArrayPtr refs_;
868 intptr_t next_ref_index_;
869 intptr_t code_start_index_ = 0;
870 intptr_t code_stop_index_ = 0;
871 intptr_t instructions_index_ = 0;
872 DeserializationCluster** clusters_;
873 const bool is_non_root_unit_;
874 InstructionsTable& instructions_table_;
875};
876
877DART_FORCE_INLINE
880 old_space_->AllocateSnapshotLocked(freelist_, size));
881}
882
884 intptr_t class_id,
885 intptr_t size,
886 bool is_canonical,
887 bool is_immutable) {
889 uword tags = 0;
890 tags = UntaggedObject::ClassIdTag::update(class_id, tags);
892 tags = UntaggedObject::CanonicalBit::update(is_canonical, tags);
893 tags = UntaggedObject::AlwaysSetBit::update(true, tags);
894 tags = UntaggedObject::NotMarkedBit::update(true, tags);
897 tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
898 raw->untag()->tags_ = tags;
899}
900
901#if !defined(DART_PRECOMPILED_RUNTIME)
903 intptr_t start_size = serializer->bytes_written();
904 intptr_t start_data = serializer->GetDataSize();
905 intptr_t start_objects = serializer->next_ref_index();
909 serializer->Write<uint32_t>(tags);
910 WriteAlloc(serializer);
911 intptr_t stop_size = serializer->bytes_written();
912 intptr_t stop_data = serializer->GetDataSize();
913 intptr_t stop_objects = serializer->next_ref_index();
914 if (FLAG_print_cluster_information) {
915 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), ", start_size,
916 stop_size - start_size);
917 OS::PrintErr("Data 0x%" Pp " (%" Pd "): ", start_data,
918 stop_data - start_data);
919 OS::PrintErr("Alloc %s (%" Pd ")\n", name(), stop_objects - start_objects);
920 }
921 size_ += (stop_size - start_size) + (stop_data - start_data);
922 num_objects_ += (stop_objects - start_objects);
925 }
926}
927
929 intptr_t start = serializer->bytes_written();
930 WriteFill(serializer);
931 intptr_t stop = serializer->bytes_written();
932 if (FLAG_print_cluster_information) {
933 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n", start, stop - start,
934 name());
935 }
936 size_ += (stop - start);
937}
938#endif // !DART_PRECOMPILED_RUNTIME
939
940DART_NOINLINE
942 intptr_t instance_size) {
943 start_index_ = d->next_index();
944 intptr_t count = d->ReadUnsigned();
945 for (intptr_t i = 0; i < count; i++) {
946 d->AssignRef(d->Allocate(instance_size));
947 }
948 stop_index_ = d->next_index();
949}
950
951#if !defined(DART_PRECOMPILED_RUNTIME)
953 Serializer* s,
954 intptr_t class_id) {
955 const auto unboxed_fields_bitmap_host =
956 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(class_id);
957
958 UnboxedFieldBitmap unboxed_fields_bitmap;
959 if (unboxed_fields_bitmap_host.IsEmpty() ||
961 unboxed_fields_bitmap = unboxed_fields_bitmap_host;
962 } else {
964 // A new bitmap is built if the word sizes in the target and
965 // host are different
966 unboxed_fields_bitmap.Reset();
967 intptr_t target_i = 0, host_i = 0;
968
969 while (host_i < UnboxedFieldBitmap::Length()) {
970 // Each unboxed field has constant length, therefore the number of
971 // words used by it should double when compiling from 64-bit to 32-bit.
972 if (unboxed_fields_bitmap_host.Get(host_i++)) {
973 unboxed_fields_bitmap.Set(target_i++);
974 unboxed_fields_bitmap.Set(target_i++);
975 } else {
976 // For object pointers, the field is always one word length
977 target_i++;
978 }
979 }
980 }
981
982 return unboxed_fields_bitmap;
983}
984
986 public:
987 explicit ClassSerializationCluster(intptr_t num_cids)
988 : SerializationCluster("Class",
989 kClassCid,
990 compiler::target::Class::InstanceSize()),
991 predefined_(kNumPredefinedCids),
992 objects_(num_cids) {}
994
995 void Trace(Serializer* s, ObjectPtr object) {
996 ClassPtr cls = Class::RawCast(object);
997 intptr_t class_id = cls->untag()->id_;
998
999 if (class_id == kIllegalCid) {
1000 // Classes expected to be dropped by the precompiler should not be traced.
1001 s->UnexpectedObject(cls, "Class with illegal cid");
1002 }
1003 if (class_id < kNumPredefinedCids) {
1004 // These classes are allocated by Object::Init or Object::InitOnce, so the
1005 // deserializer must find them in the class table instead of allocating
1006 // them.
1007 predefined_.Add(cls);
1008 } else {
1009 objects_.Add(cls);
1010 }
1011
1012 PushFromTo(cls);
1013 }
1014
1016 intptr_t count = predefined_.length();
1017 s->WriteUnsigned(count);
1018 for (intptr_t i = 0; i < count; i++) {
1019 ClassPtr cls = predefined_[i];
1020 s->AssignRef(cls);
1021 AutoTraceObject(cls);
1022 intptr_t class_id = cls->untag()->id_;
1023 s->WriteCid(class_id);
1024 }
1025 count = objects_.length();
1026 s->WriteUnsigned(count);
1027 for (intptr_t i = 0; i < count; i++) {
1028 ClassPtr cls = objects_[i];
1029 s->AssignRef(cls);
1030 }
1031 }
1032
1034 intptr_t count = predefined_.length();
1035 for (intptr_t i = 0; i < count; i++) {
1036 WriteClass(s, predefined_[i]);
1037 }
1038 count = objects_.length();
1039 for (intptr_t i = 0; i < count; i++) {
1040 WriteClass(s, objects_[i]);
1041 }
1042 }
1043
1044 private:
1045 void WriteClass(Serializer* s, ClassPtr cls) {
1046 AutoTraceObjectName(cls, cls->untag()->name());
1047 WriteFromTo(cls);
1048 intptr_t class_id = cls->untag()->id_;
1049 if (class_id == kIllegalCid) {
1050 s->UnexpectedObject(cls, "Class with illegal cid");
1051 }
1052 s->WriteCid(class_id);
1053 if (s->kind() != Snapshot::kFullAOT) {
1054 s->Write<uint32_t>(cls->untag()->kernel_offset_);
1055 }
1056 s->Write<int32_t>(Class::target_instance_size_in_words(cls));
1057 s->Write<int32_t>(Class::target_next_field_offset_in_words(cls));
1059 s->Write<int16_t>(cls->untag()->num_type_arguments_);
1060 s->Write<uint16_t>(cls->untag()->num_native_fields_);
1061 if (s->kind() != Snapshot::kFullAOT) {
1062 s->WriteTokenPosition(cls->untag()->token_pos_);
1063 s->WriteTokenPosition(cls->untag()->end_token_pos_);
1064 s->WriteCid(cls->untag()->implementor_cid_);
1065 }
1066 s->Write<uint32_t>(cls->untag()->state_bits_);
1067
1068 if (!ClassTable::IsTopLevelCid(class_id)) {
1069 const auto unboxed_fields_map =
1071 s->WriteUnsigned64(unboxed_fields_map.Value());
1072 }
1073 }
1074
1075 GrowableArray<ClassPtr> predefined_;
1076 GrowableArray<ClassPtr> objects_;
1077};
1078#endif // !DART_PRECOMPILED_RUNTIME
1079
1081 public:
1084
1085 void ReadAlloc(Deserializer* d) override {
1086 predefined_start_index_ = d->next_index();
1087 intptr_t count = d->ReadUnsigned();
1088 ClassTable* table = d->isolate_group()->class_table();
1089 for (intptr_t i = 0; i < count; i++) {
1090 intptr_t class_id = d->ReadCid();
1091 ASSERT(table->HasValidClassAt(class_id));
1092 ClassPtr cls = table->At(class_id);
1093 ASSERT(cls != nullptr);
1094 d->AssignRef(cls);
1095 }
1096 predefined_stop_index_ = d->next_index();
1097
1098 start_index_ = d->next_index();
1099 count = d->ReadUnsigned();
1100 for (intptr_t i = 0; i < count; i++) {
1101 d->AssignRef(d->Allocate(Class::InstanceSize()));
1102 }
1103 stop_index_ = d->next_index();
1104 }
1105
1106 void ReadFill(Deserializer* d_) override {
1108
1109 for (intptr_t id = predefined_start_index_; id < predefined_stop_index_;
1110 id++) {
1111 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1112 d.ReadFromTo(cls);
1113 intptr_t class_id = d.ReadCid();
1114 cls->untag()->id_ = class_id;
1115#if !defined(DART_PRECOMPILED_RUNTIME)
1116 ASSERT(d_->kind() != Snapshot::kFullAOT);
1117 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1118#endif
1119 if (!IsInternalVMdefinedClassId(class_id)) {
1120 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1121 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1122#if defined(DART_PRECOMPILER)
1123 // Only one pair is serialized. The target field only exists when
1124 // DART_PRECOMPILER is defined
1125 cls->untag()->target_instance_size_in_words_ =
1126 cls->untag()->host_instance_size_in_words_;
1127 cls->untag()->target_next_field_offset_in_words_ =
1128 cls->untag()->host_next_field_offset_in_words_;
1129#endif // defined(DART_PRECOMPILER)
1130 } else {
1131 d.Read<int32_t>(); // Skip.
1132 d.Read<int32_t>(); // Skip.
1133 }
1134 cls->untag()->host_type_arguments_field_offset_in_words_ =
1135 d.Read<int32_t>();
1136#if defined(DART_PRECOMPILER)
1137 cls->untag()->target_type_arguments_field_offset_in_words_ =
1138 cls->untag()->host_type_arguments_field_offset_in_words_;
1139#endif // defined(DART_PRECOMPILER)
1140 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1141 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1142#if !defined(DART_PRECOMPILED_RUNTIME)
1143 ASSERT(d_->kind() != Snapshot::kFullAOT);
1144 cls->untag()->token_pos_ = d.ReadTokenPosition();
1145 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1146 cls->untag()->implementor_cid_ = d.ReadCid();
1147#endif // !defined(DART_PRECOMPILED_RUNTIME)
1148 cls->untag()->state_bits_ = d.Read<uint32_t>();
1149 d.ReadUnsigned64(); // Skip unboxed fields bitmap.
1150 }
1151
1153 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1154 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1156 d.ReadFromTo(cls);
1157
1158 intptr_t class_id = d.ReadCid();
1159 ASSERT(class_id >= kNumPredefinedCids);
1160 cls->untag()->id_ = class_id;
1161
1162#if !defined(DART_PRECOMPILED_RUNTIME)
1163 ASSERT(d_->kind() != Snapshot::kFullAOT);
1164 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1165#endif
1166 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1167 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1168 cls->untag()->host_type_arguments_field_offset_in_words_ =
1169 d.Read<int32_t>();
1170#if defined(DART_PRECOMPILER)
1171 cls->untag()->target_instance_size_in_words_ =
1172 cls->untag()->host_instance_size_in_words_;
1173 cls->untag()->target_next_field_offset_in_words_ =
1174 cls->untag()->host_next_field_offset_in_words_;
1175 cls->untag()->target_type_arguments_field_offset_in_words_ =
1176 cls->untag()->host_type_arguments_field_offset_in_words_;
1177#endif // defined(DART_PRECOMPILER)
1178 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1179 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1180#if !defined(DART_PRECOMPILED_RUNTIME)
1181 ASSERT(d_->kind() != Snapshot::kFullAOT);
1182 cls->untag()->token_pos_ = d.ReadTokenPosition();
1183 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1184 cls->untag()->implementor_cid_ = d.ReadCid();
1185#endif // !defined(DART_PRECOMPILED_RUNTIME)
1186 cls->untag()->state_bits_ = d.Read<uint32_t>();
1187
1188 table->AllocateIndex(class_id);
1189 table->SetAt(class_id, cls);
1190
1191 if (!ClassTable::IsTopLevelCid(class_id)) {
1192 const UnboxedFieldBitmap unboxed_fields_map(d.ReadUnsigned64());
1193 table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
1194 }
1195 }
1196 }
1197
1198 private:
1199 intptr_t predefined_start_index_;
1200 intptr_t predefined_stop_index_;
1201};
1202
1203// Super classes for writing out clusters which contain objects grouped into
1204// a canonical set (e.g. String, Type, TypeArguments, etc).
1205// To save space in the snapshot we avoid writing such canonical sets
1206// explicitly as Array objects into the snapshot and instead utilize a different
1207// encoding: objects in a cluster representing a canonical set are sorted
1208// to appear in the same order they appear in the Array representing the set,
1209// and we additionally write out array of values describing gaps between
1210// objects.
1211//
1212// In some situations not all canonical objects of the some type need to
1213// be added to the resulting canonical set because they are cached in some
1214// special way (see Type::Canonicalize as an example, which caches declaration
1215// types in a special way). In this case subclass can set
1216// kAllCanonicalObjectsAreIncludedIntoSet to |false| and override
1217// IsInCanonicalSet filter.
1218#if !defined(DART_PRECOMPILED_RUNTIME)
1219template <typename SetType,
1220 typename HandleType,
1221 typename PointerType,
1222 bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1224 protected:
1226 bool is_canonical,
1227 bool represents_canonical_set,
1228 const char* name,
1229 intptr_t target_instance_size = 0)
1230 : SerializationCluster(name, cid, target_instance_size, is_canonical),
1231 represents_canonical_set_(represents_canonical_set) {}
1232
1233 virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) {
1234 // Must override this function if kAllCanonicalObjectsAreIncludedIntoSet
1235 // is set to |false|.
1236 ASSERT(kAllCanonicalObjectsAreIncludedIntoSet);
1237 return true;
1238 }
1239
1241 if (!represents_canonical_set_) {
1242 return;
1243 }
1244
1245 // Sort objects before writing them out so that they appear in the same
1246 // order as they would appear in a CanonicalStringSet.
1247 using ZoneCanonicalSet =
1249
1250 // Compute required capacity for the hashtable (to avoid overallocating).
1251 intptr_t required_capacity = 0;
1252 for (auto ptr : objects_) {
1253 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1254 required_capacity++;
1255 }
1256 }
1257 // Over-allocate capacity so a few inserts can happen at startup without
1258 // causing a rehash.
1259 const intptr_t kSpareCapacity = 32;
1260 required_capacity = static_cast<intptr_t>(
1261 static_cast<double>(required_capacity + kSpareCapacity) /
1263
1264 intptr_t num_occupied = 0;
1265
1266 // Build canonical set out of objects that should belong to it.
1267 // Objects that don't belong to it are copied to the prefix of objects_.
1268 ZoneCanonicalSet table(
1269 s->zone(), HashTables::New<ZoneCanonicalSet>(required_capacity));
1270 HandleType& element = HandleType::Handle(s->zone());
1271 for (auto ptr : objects_) {
1272 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1273 element ^= ptr;
1274 intptr_t entry = -1;
1275 const bool present = table.FindKeyOrDeletedOrUnused(element, &entry);
1276 ASSERT(!present);
1277 table.InsertKey(entry, element);
1278 } else {
1279 objects_[num_occupied++] = ptr;
1280 }
1281 }
1282
1283 const auto prefix_length = num_occupied;
1284
1285 // Compute objects_ order and gaps based on canonical set layout.
1286 auto& arr = table.Release();
1287 intptr_t last_occupied = ZoneCanonicalSet::kFirstKeyIndex - 1;
1288 for (intptr_t i = ZoneCanonicalSet::kFirstKeyIndex, length = arr.Length();
1289 i < length; i++) {
1290 ObjectPtr v = arr.At(i);
1291 ASSERT(v != ZoneCanonicalSet::DeletedMarker().ptr());
1292 if (v != ZoneCanonicalSet::UnusedMarker().ptr()) {
1293 const intptr_t unused_run_length = (i - 1) - last_occupied;
1294 gaps_.Add(unused_run_length);
1295 objects_[num_occupied++] = static_cast<PointerType>(v);
1296 last_occupied = i;
1297 }
1298 }
1299 ASSERT(num_occupied == objects_.length());
1300 ASSERT(prefix_length == (objects_.length() - gaps_.length()));
1301 table_length_ = arr.Length();
1302 }
1303
1305 if (represents_canonical_set_) {
1306 s->WriteUnsigned(table_length_);
1307 s->WriteUnsigned(objects_.length() - gaps_.length());
1308 for (auto gap : gaps_) {
1309 s->WriteUnsigned(gap);
1310 }
1313 }
1314 }
1315
1317
1318 private:
1319 const bool represents_canonical_set_;
1321 intptr_t table_length_ = 0;
1322};
1323#endif
1324
1325template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1327 public:
1329 bool is_root_unit,
1330 const char* name)
1332 is_root_unit_(is_root_unit),
1333 table_(SetType::ArrayHandle::Handle()) {}
1334
1336 if (!is_root_unit_ || !is_canonical()) {
1337 return;
1338 }
1339
1340 const auto table_length = d->ReadUnsigned();
1341 first_element_ = d->ReadUnsigned();
1342 const intptr_t count = stop_index_ - (start_index_ + first_element_);
1343 auto table = StartDeserialization(d, table_length, count);
1344 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1345 table.FillGap(d->ReadUnsigned());
1346 table.WriteElement(d, d->Ref(i));
1347 }
1348 table_ = table.Finish();
1349 }
1350
1351 protected:
1352 const bool is_root_unit_;
1354 typename SetType::ArrayHandle& table_;
1355
1357 const Array& refs,
1358 const typename SetType::ArrayHandle& current_table) {
1359#if defined(DEBUG)
1360 // First check that we are not overwriting a table and loosing information.
1361 if (!current_table.IsNull()) {
1362 SetType current_set(d->zone(), current_table.ptr());
1363 ASSERT(current_set.NumOccupied() == 0);
1364 current_set.Release();
1365 }
1366
1367 // Now check that manually created table behaves correctly as a canonical
1368 // set.
1369 SetType canonical_set(d->zone(), table_.ptr());
1371 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1372 key = refs.At(i);
1373 ASSERT(canonical_set.GetOrNull(key) != Object::null());
1374 }
1375 canonical_set.Release();
1376#endif // defined(DEBUG)
1377 }
1378
1379 private:
1380 struct DeserializationFinger {
1381 typename SetType::ArrayPtr table;
1382 intptr_t current_index;
1383 ObjectPtr gap_element;
1384
1385 void FillGap(int length) {
1386 for (intptr_t j = 0; j < length; j++) {
1387 table->untag()->data()[current_index + j] = gap_element;
1388 }
1389 current_index += length;
1390 }
1391
1392 void WriteElement(Deserializer* d, ObjectPtr object) {
1393 table->untag()->data()[current_index++] = object;
1394 }
1395
1396 typename SetType::ArrayPtr Finish() {
1397 if (table != SetType::ArrayHandle::null()) {
1398 FillGap(Smi::Value(table->untag()->length()) - current_index);
1399 }
1400 auto result = table;
1401 table = SetType::ArrayHandle::null();
1402 return result;
1403 }
1404 };
1405
1406 static DeserializationFinger StartDeserialization(Deserializer* d,
1407 intptr_t length,
1408 intptr_t count) {
1409 const intptr_t instance_size = SetType::ArrayHandle::InstanceSize(length);
1410 typename SetType::ArrayPtr table =
1411 static_cast<typename SetType::ArrayPtr>(d->Allocate(instance_size));
1412 Deserializer::InitializeHeader(table, SetType::Storage::ArrayCid,
1413 instance_size);
1414 if ((SetType::Storage::ArrayCid == kArrayCid) &&
1416 table->untag()->SetCardRememberedBitUnsynchronized();
1417 }
1418 InitTypeArgsOrNext(table);
1419 table->untag()->length_ = Smi::New(length);
1420 for (intptr_t i = 0; i < SetType::kFirstKeyIndex; i++) {
1421 table->untag()->data()[i] = Smi::New(0);
1422 }
1423 table->untag()->data()[SetType::kOccupiedEntriesIndex] = Smi::New(count);
1424 return {table, SetType::kFirstKeyIndex, SetType::UnusedMarker().ptr()};
1425 }
1426
1427 static void InitTypeArgsOrNext(ArrayPtr table) {
1428 table->untag()->type_arguments_ = TypeArguments::null();
1429 }
1430 static void InitTypeArgsOrNext(WeakArrayPtr table) {
1431 table->untag()->next_seen_by_gc_ = WeakArray::null();
1432 }
1433};
1434
1435#if !defined(DART_PRECOMPILED_RUNTIME)
1437 public:
1439 : SerializationCluster("TypeParameters",
1440 kTypeParametersCid,
1441 compiler::target::TypeParameters::InstanceSize()) {
1442 }
1444
1445 void Trace(Serializer* s, ObjectPtr object) {
1446 TypeParametersPtr type_params = TypeParameters::RawCast(object);
1447 objects_.Add(type_params);
1448 PushFromTo(type_params);
1449 }
1450
1452 const intptr_t count = objects_.length();
1453 s->WriteUnsigned(count);
1454 for (intptr_t i = 0; i < count; i++) {
1455 TypeParametersPtr type_params = objects_[i];
1456 s->AssignRef(type_params);
1457 }
1458 }
1459
1461 const intptr_t count = objects_.length();
1462 for (intptr_t i = 0; i < count; i++) {
1463 TypeParametersPtr type_params = objects_[i];
1464 AutoTraceObject(type_params);
1465 WriteFromTo(type_params);
1466 }
1467 }
1468
1469 private:
1471};
1472#endif // !DART_PRECOMPILED_RUNTIME
1473
1475 public:
1477 : DeserializationCluster("TypeParameters") {}
1479
1480 void ReadAlloc(Deserializer* d) override {
1482 }
1483
1484 void ReadFill(Deserializer* d_) override {
1486
1487 ASSERT(!is_canonical()); // Never canonical.
1488 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1489 TypeParametersPtr type_params = static_cast<TypeParametersPtr>(d.Ref(id));
1490 Deserializer::InitializeHeader(type_params, kTypeParametersCid,
1492 d.ReadFromTo(type_params);
1493 }
1494 }
1495};
1496
1497#if !defined(DART_PRECOMPILED_RUNTIME)
1499 : public CanonicalSetSerializationCluster<CanonicalTypeArgumentsSet,
1500 TypeArguments,
1501 TypeArgumentsPtr> {
1502 public:
1504 bool represents_canonical_set)
1505 : CanonicalSetSerializationCluster(kTypeArgumentsCid,
1507 represents_canonical_set,
1508 "TypeArguments") {}
1510
1511 void Trace(Serializer* s, ObjectPtr object) {
1512 TypeArgumentsPtr type_args = TypeArguments::RawCast(object);
1513 objects_.Add(type_args);
1514
1515 s->Push(type_args->untag()->instantiations());
1516 const intptr_t length = Smi::Value(type_args->untag()->length());
1517 for (intptr_t i = 0; i < length; i++) {
1518 s->Push(type_args->untag()->element(i));
1519 }
1520 }
1521
1523 const intptr_t count = objects_.length();
1524 s->WriteUnsigned(count);
1526 for (intptr_t i = 0; i < count; i++) {
1527 TypeArgumentsPtr type_args = objects_[i];
1528 s->AssignRef(type_args);
1529 AutoTraceObject(type_args);
1530 const intptr_t length = Smi::Value(type_args->untag()->length());
1531 s->WriteUnsigned(length);
1534 }
1536 }
1537
1539 const intptr_t count = objects_.length();
1540 for (intptr_t i = 0; i < count; i++) {
1541 TypeArgumentsPtr type_args = objects_[i];
1542 AutoTraceObject(type_args);
1543 const intptr_t length = Smi::Value(type_args->untag()->length());
1544 s->WriteUnsigned(length);
1545 intptr_t hash = Smi::Value(type_args->untag()->hash());
1546 s->Write<int32_t>(hash);
1547 const intptr_t nullability =
1548 Smi::Value(type_args->untag()->nullability());
1549 s->WriteUnsigned(nullability);
1550 WriteField(type_args, instantiations());
1551 for (intptr_t j = 0; j < length; j++) {
1552 s->WriteElementRef(type_args->untag()->element(j), j);
1553 }
1554 }
1555 }
1556};
1557#endif // !DART_PRECOMPILED_RUNTIME
1558
1560 : public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> {
1561 public:
1563 bool is_root_unit)
1565 is_root_unit,
1566 "TypeArguments") {}
1568
1569 void ReadAlloc(Deserializer* d) override {
1570 start_index_ = d->next_index();
1571 const intptr_t count = d->ReadUnsigned();
1572 for (intptr_t i = 0; i < count; i++) {
1573 const intptr_t length = d->ReadUnsigned();
1574 d->AssignRef(d->Allocate(TypeArguments::InstanceSize(length)));
1575 }
1576 stop_index_ = d->next_index();
1578 }
1579
1580 void ReadFill(Deserializer* d_) override {
1582
1583 const bool mark_canonical = is_root_unit_ && is_canonical();
1584 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1585 TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d.Ref(id));
1586 const intptr_t length = d.ReadUnsigned();
1587 Deserializer::InitializeHeader(type_args, kTypeArgumentsCid,
1589 mark_canonical);
1590 type_args->untag()->length_ = Smi::New(length);
1591 type_args->untag()->hash_ = Smi::New(d.Read<int32_t>());
1592 type_args->untag()->nullability_ = Smi::New(d.ReadUnsigned());
1593 type_args->untag()->instantiations_ = static_cast<ArrayPtr>(d.ReadRef());
1594 for (intptr_t j = 0; j < length; j++) {
1595 type_args->untag()->types()[j] =
1596 static_cast<AbstractTypePtr>(d.ReadRef());
1597 }
1598 }
1599 }
1600
1601 void PostLoad(Deserializer* d, const Array& refs) override {
1602 if (!table_.IsNull()) {
1603 auto object_store = d->isolate_group()->object_store();
1605 d, refs, Array::Handle(object_store->canonical_type_arguments()));
1606 object_store->set_canonical_type_arguments(table_);
1607 } else if (!is_root_unit_ && is_canonical()) {
1608 TypeArguments& type_arg = TypeArguments::Handle(d->zone());
1609 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1610 type_arg ^= refs.At(i);
1611 type_arg = type_arg.Canonicalize(d->thread());
1612 refs.SetAt(i, type_arg);
1613 }
1614 }
1615 }
1616};
1617
1618#if !defined(DART_PRECOMPILED_RUNTIME)
1620 public:
1622 : SerializationCluster("PatchClass",
1623 kPatchClassCid,
1624 compiler::target::PatchClass::InstanceSize()) {}
1626
1627 void Trace(Serializer* s, ObjectPtr object) {
1628 PatchClassPtr cls = PatchClass::RawCast(object);
1629 objects_.Add(cls);
1630 PushFromTo(cls);
1631 }
1632
1634 const intptr_t count = objects_.length();
1635 s->WriteUnsigned(count);
1636 for (intptr_t i = 0; i < count; i++) {
1637 PatchClassPtr cls = objects_[i];
1638 s->AssignRef(cls);
1639 }
1640 }
1641
1643 const intptr_t count = objects_.length();
1644 for (intptr_t i = 0; i < count; i++) {
1645 PatchClassPtr cls = objects_[i];
1646 AutoTraceObject(cls);
1647 WriteFromTo(cls);
1648 if (s->kind() != Snapshot::kFullAOT) {
1649 s->Write<int32_t>(cls->untag()->kernel_library_index_);
1650 }
1651 }
1652 }
1653
1654 private:
1656};
1657#endif // !DART_PRECOMPILED_RUNTIME
1658
1660 public:
1663
1664 void ReadAlloc(Deserializer* d) override {
1666 }
1667
1668 void ReadFill(Deserializer* d_) override {
1670
1671 ASSERT(!is_canonical()); // Never canonical.
1672 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1673 PatchClassPtr cls = static_cast<PatchClassPtr>(d.Ref(id));
1674 Deserializer::InitializeHeader(cls, kPatchClassCid,
1676 d.ReadFromTo(cls);
1677#if !defined(DART_PRECOMPILED_RUNTIME)
1678 ASSERT(d_->kind() != Snapshot::kFullAOT);
1679 cls->untag()->kernel_library_index_ = d.Read<int32_t>();
1680#endif
1681 }
1682 }
1683};
1684
1685#if !defined(DART_PRECOMPILED_RUNTIME)
1687 public:
1689 : SerializationCluster("Function",
1690 kFunctionCid,
1691 compiler::target::Function::InstanceSize()) {}
1693
1694 void Trace(Serializer* s, ObjectPtr object) {
1695 Snapshot::Kind kind = s->kind();
1696 FunctionPtr func = Function::RawCast(object);
1697 objects_.Add(func);
1698
1699 PushFromTo(func);
1700 if (kind == Snapshot::kFullAOT) {
1701 s->Push(func->untag()->code());
1702 } else if (kind == Snapshot::kFullJIT) {
1703 NOT_IN_PRECOMPILED(s->Push(func->untag()->unoptimized_code()));
1704 s->Push(func->untag()->code());
1705 s->Push(func->untag()->ic_data_array());
1706 }
1707 if (kind != Snapshot::kFullAOT) {
1708 NOT_IN_PRECOMPILED(s->Push(func->untag()->positional_parameter_names()));
1709 }
1710 }
1711
1713 const intptr_t count = objects_.length();
1714 s->WriteUnsigned(count);
1715 for (intptr_t i = 0; i < count; i++) {
1716 FunctionPtr func = objects_[i];
1717 s->AssignRef(func);
1718 }
1719 }
1720
1722 Snapshot::Kind kind = s->kind();
1723 const intptr_t count = objects_.length();
1724 for (intptr_t i = 0; i < count; i++) {
1725 FunctionPtr func = objects_[i];
1727 WriteFromTo(func);
1728 if (kind == Snapshot::kFullAOT) {
1729#if defined(DART_PRECOMPILER)
1730 CodePtr code = func->untag()->code();
1731 const auto code_index = s->GetCodeIndex(code);
1732 s->WriteUnsigned(code_index);
1733 s->AttributePropertyRef(code, "code_");
1734#else
1735 UNREACHABLE();
1736#endif
1737 } else if (s->kind() == Snapshot::kFullJIT) {
1738 NOT_IN_PRECOMPILED(WriteCompressedField(func, unoptimized_code));
1740 WriteCompressedField(func, ic_data_array);
1741 }
1742
1743 if (kind != Snapshot::kFullAOT) {
1745 WriteCompressedField(func, positional_parameter_names));
1746 }
1747
1748#if defined(DART_PRECOMPILER) && !defined(PRODUCT)
1749 TokenPosition token_pos = func->untag()->token_pos_;
1750 if (kind == Snapshot::kFullAOT) {
1751 // We use then token_pos property to store the line number
1752 // in AOT snapshots.
1753 intptr_t line = -1;
1754 const Function& function = Function::Handle(func);
1755 const Script& script = Script::Handle(function.script());
1756 if (!script.IsNull()) {
1757 script.GetTokenLocation(token_pos, &line, nullptr);
1758 }
1759 token_pos = line == -1 ? TokenPosition::kNoSource
1761 }
1762 s->WriteTokenPosition(token_pos);
1763#else
1764 if (kind != Snapshot::kFullAOT) {
1765 s->WriteTokenPosition(func->untag()->token_pos_);
1766 }
1767#endif
1768 if (kind != Snapshot::kFullAOT) {
1769 s->WriteTokenPosition(func->untag()->end_token_pos_);
1770 s->Write<uint32_t>(func->untag()->kernel_offset_);
1771 s->Write<uint32_t>(func->untag()->packed_fields_);
1772 }
1773 s->Write<uint32_t>(func->untag()->kind_tag_);
1774 }
1775 }
1776
1778 FunctionPtr f) {
1779 if (s->profile_writer() == nullptr) {
1780 return nullptr;
1781 }
1782
1784 Function& fun = reused_function_handle.Handle();
1785 fun = f;
1786 ZoneTextBuffer printer(s->thread()->zone());
1788 Object::NameVisibility::kInternalName),
1789 &printer);
1790 return printer.buffer();
1791 }
1792
1793 private:
1795};
1796#endif // !DART_PRECOMPILED_RUNTIME
1797
1798template <bool need_entry_point_for_non_discarded>
1799DART_FORCE_INLINE static CodePtr GetCodeAndEntryPointByIndex(
1800 const Deserializer* d,
1801 intptr_t code_index,
1802 uword* entry_point) {
1803 code_index -= 1; // 0 is reserved for LazyCompile stub.
1804
1805 // In root unit and VM isolate snapshot code_indices are self-contained
1806 // they point into instruction table and/or into the code cluster.
1807 // In non-root units we might also refer to code objects from the
1808 // parent unit which means code_index is biased by num_base_objects_
1809 const intptr_t base = d->is_non_root_unit() ? d->num_base_objects() : 0;
1810 if (code_index < base) {
1811 CodePtr code = static_cast<CodePtr>(d->Ref(code_index));
1812 if (need_entry_point_for_non_discarded) {
1813 *entry_point = Code::EntryPointOf(code);
1814 }
1815 return code;
1816 }
1817 code_index -= base;
1818
1819 // At this point code_index is referring to a code object which is either
1820 // discarded or exists in the Code cluster. Non-discarded Code objects
1821 // are associated with the tail of the instruction table and have the
1822 // same order there and in the Code cluster. This means that
1823 // subtracting first_entry_with_code yields index into the Code cluster.
1824 // This also works for deferred code objects in root unit's snapshot
1825 // due to the choice of encoding (see Serializer::GetCodeIndex).
1826 const intptr_t first_entry_with_code =
1827 d->instructions_table().rodata()->first_entry_with_code;
1828 if (code_index < first_entry_with_code) {
1829 *entry_point = d->instructions_table().EntryPointAt(code_index);
1830 return StubCode::UnknownDartCode().ptr();
1831 } else {
1832 const intptr_t cluster_index = code_index - first_entry_with_code;
1833 CodePtr code =
1834 static_cast<CodePtr>(d->Ref(d->code_start_index() + cluster_index));
1835 if (need_entry_point_for_non_discarded) {
1836 *entry_point = Code::EntryPointOf(code);
1837 }
1838 return code;
1839 }
1840}
1841
1842CodePtr Deserializer::GetCodeByIndex(intptr_t code_index,
1843 uword* entry_point) const {
1844 // See Serializer::GetCodeIndex for how code_index is encoded.
1845 if (code_index == 0) {
1846 return StubCode::LazyCompile().ptr();
1847 } else if (FLAG_precompiled_mode) {
1849 /*need_entry_point_for_non_discarded=*/false>(this, code_index,
1850 entry_point);
1851 } else {
1852 // -1 below because 0 is reserved for LazyCompile stub.
1853 const intptr_t ref = code_start_index_ + code_index - 1;
1854 ASSERT(code_start_index_ <= ref && ref < code_stop_index_);
1855 return static_cast<CodePtr>(Ref(ref));
1856 }
1857}
1858
1860 intptr_t code_index) {
1861 // Note: code indices we are interpreting here originate from the root
1862 // loading unit which means base is equal to 0.
1863 // See comments which clarify the connection between code_index and
1864 // index into the Code cluster.
1865 ASSERT(FLAG_precompiled_mode);
1866 const intptr_t first_entry_with_code = table.rodata()->first_entry_with_code;
1867 return code_index - 1 - first_entry_with_code;
1868}
1869
1871 // See Deserializer::GetCodeByIndex which this code repeats.
1872 ASSERT(FLAG_precompiled_mode);
1873 uword entry_point = 0;
1874 GetCodeAndEntryPointByIndex</*need_entry_point_for_non_discarded=*/true>(
1875 this, code_index, &entry_point);
1876 return entry_point;
1877}
1878
1880 public:
1883
1884 void ReadAlloc(Deserializer* d) override {
1886 }
1887
1888 void ReadFill(Deserializer* d_) override {
1890
1891 ASSERT(!is_canonical()); // Never canonical.
1892 Snapshot::Kind kind = d_->kind();
1893
1894 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1895 FunctionPtr func = static_cast<FunctionPtr>(d.Ref(id));
1896 Deserializer::InitializeHeader(func, kFunctionCid,
1898 d.ReadFromTo(func);
1899
1900#if defined(DEBUG)
1901 func->untag()->entry_point_ = 0;
1902 func->untag()->unchecked_entry_point_ = 0;
1903#endif
1904
1905#if defined(DART_PRECOMPILED_RUNTIME)
1906 ASSERT(kind == Snapshot::kFullAOT);
1907 const intptr_t code_index = d.ReadUnsigned();
1908 uword entry_point = 0;
1909 CodePtr code = d_->GetCodeByIndex(code_index, &entry_point);
1910 func->untag()->code_ = code;
1911 if (entry_point != 0) {
1912 func->untag()->entry_point_ = entry_point;
1913 func->untag()->unchecked_entry_point_ = entry_point;
1914 }
1915#else
1916 ASSERT(kind != Snapshot::kFullAOT);
1917 if (kind == Snapshot::kFullJIT) {
1918 func->untag()->unoptimized_code_ = static_cast<CodePtr>(d.ReadRef());
1919 func->untag()->code_ = static_cast<CodePtr>(d.ReadRef());
1920 func->untag()->ic_data_array_ = static_cast<ArrayPtr>(d.ReadRef());
1921 }
1922#endif
1923
1924#if !defined(DART_PRECOMPILED_RUNTIME)
1925 ASSERT(kind != Snapshot::kFullAOT);
1926 func->untag()->positional_parameter_names_ =
1927 static_cast<ArrayPtr>(d.ReadRef());
1928#endif
1929#if !defined(DART_PRECOMPILED_RUNTIME) || \
1930 (defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT))
1931 func->untag()->token_pos_ = d.ReadTokenPosition();
1932#endif
1933#if !defined(DART_PRECOMPILED_RUNTIME)
1934 func->untag()->end_token_pos_ = d.ReadTokenPosition();
1935 func->untag()->kernel_offset_ = d.Read<uint32_t>();
1936 func->untag()->unboxed_parameters_info_.Reset();
1937 func->untag()->packed_fields_ = d.Read<uint32_t>();
1938#endif
1939
1940 func->untag()->kind_tag_ = d.Read<uint32_t>();
1941#if !defined(DART_PRECOMPILED_RUNTIME)
1942 func->untag()->usage_counter_ = 0;
1943 func->untag()->optimized_instruction_count_ = 0;
1944 func->untag()->optimized_call_site_count_ = 0;
1945 func->untag()->deoptimization_counter_ = 0;
1946 func->untag()->state_bits_ = 0;
1947 func->untag()->inlining_depth_ = 0;
1948#endif
1949 }
1950 }
1951
1952 void PostLoad(Deserializer* d, const Array& refs) override {
1953 if (d->kind() == Snapshot::kFullAOT) {
1954 Function& func = Function::Handle(d->zone());
1955 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1956 func ^= refs.At(i);
1957 auto const code = func.ptr()->untag()->code();
1958 ASSERT(code->IsCode());
1960 uword entry_point = code->untag()->entry_point_;
1961 ASSERT(entry_point != 0);
1962 func.ptr()->untag()->entry_point_ = entry_point;
1963 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
1964 ASSERT(unchecked_entry_point != 0);
1965 func.ptr()->untag()->unchecked_entry_point_ = unchecked_entry_point;
1966 }
1967 }
1968 } else if (d->kind() == Snapshot::kFullJIT) {
1969 Function& func = Function::Handle(d->zone());
1970 Code& code = Code::Handle(d->zone());
1971 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1972 func ^= refs.At(i);
1973 code = func.CurrentCode();
1974 if (func.HasCode() && !code.IsDisabled()) {
1975 func.SetInstructionsSafe(code); // Set entrypoint.
1976 func.SetWasCompiled(true);
1977 } else {
1978 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub
1979 }
1980 }
1981 } else {
1982 Function& func = Function::Handle(d->zone());
1983 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1984 func ^= refs.At(i);
1985 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub.
1986 }
1987 }
1988 }
1989};
1990
1991#if !defined(DART_PRECOMPILED_RUNTIME)
1993 public:
1995 : SerializationCluster("ClosureData",
1996 kClosureDataCid,
1997 compiler::target::ClosureData::InstanceSize()) {}
1999
2000 void Trace(Serializer* s, ObjectPtr object) {
2001 ClosureDataPtr data = ClosureData::RawCast(object);
2002 objects_.Add(data);
2003
2004 if (s->kind() != Snapshot::kFullAOT) {
2005 s->Push(data->untag()->context_scope());
2006 }
2007 s->Push(data->untag()->parent_function());
2008 s->Push(data->untag()->closure());
2009 }
2010
2012 const intptr_t count = objects_.length();
2013 s->WriteUnsigned(count);
2014 for (intptr_t i = 0; i < count; i++) {
2015 ClosureDataPtr data = objects_[i];
2016 s->AssignRef(data);
2017 }
2018 }
2019
2021 const intptr_t count = objects_.length();
2022 for (intptr_t i = 0; i < count; i++) {
2023 ClosureDataPtr data = objects_[i];
2025 if (s->kind() != Snapshot::kFullAOT) {
2026 WriteCompressedField(data, context_scope);
2027 }
2028 WriteCompressedField(data, parent_function);
2030 s->WriteUnsigned(static_cast<uint32_t>(data->untag()->packed_fields_));
2031 }
2032 }
2033
2034 private:
2036};
2037#endif // !DART_PRECOMPILED_RUNTIME
2038
2040 public:
2043
2044 void ReadAlloc(Deserializer* d) override {
2046 }
2047
2048 void ReadFill(Deserializer* d_) override {
2050
2051 ASSERT(!is_canonical()); // Never canonical.
2052 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2053 ClosureDataPtr data = static_cast<ClosureDataPtr>(d.Ref(id));
2054 Deserializer::InitializeHeader(data, kClosureDataCid,
2056 if (d_->kind() == Snapshot::kFullAOT) {
2057 data->untag()->context_scope_ = ContextScope::null();
2058 } else {
2059 data->untag()->context_scope_ =
2060 static_cast<ContextScopePtr>(d.ReadRef());
2061 }
2062 data->untag()->parent_function_ = static_cast<FunctionPtr>(d.ReadRef());
2063 data->untag()->closure_ = static_cast<ClosurePtr>(d.ReadRef());
2064 data->untag()->packed_fields_ = d.ReadUnsigned<uint32_t>();
2065 }
2066 }
2067};
2068
2069#if !defined(DART_PRECOMPILED_RUNTIME)
2071 public:
2074 "FfiTrampolineData",
2075 kFfiTrampolineDataCid,
2076 compiler::target::FfiTrampolineData::InstanceSize()) {}
2078
2079 void Trace(Serializer* s, ObjectPtr object) {
2080 FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object);
2081 objects_.Add(data);
2083 }
2084
2086 const intptr_t count = objects_.length();
2087 s->WriteUnsigned(count);
2088 for (intptr_t i = 0; i < count; i++) {
2089 s->AssignRef(objects_[i]);
2090 }
2091 }
2092
2094 const intptr_t count = objects_.length();
2095 for (intptr_t i = 0; i < count; i++) {
2096 FfiTrampolineDataPtr const data = objects_[i];
2099 s->Write<int32_t>(data->untag()->callback_id_);
2100 s->Write<uint8_t>(data->untag()->ffi_function_kind_);
2101 }
2102 }
2103
2104 private:
2106};
2107#endif // !DART_PRECOMPILED_RUNTIME
2108
2110 public:
2112 : DeserializationCluster("FfiTrampolineData") {}
2114
2115 void ReadAlloc(Deserializer* d) override {
2117 }
2118
2119 void ReadFill(Deserializer* d_) override {
2121
2122 ASSERT(!is_canonical()); // Never canonical.
2123 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2124 FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d.Ref(id));
2125 Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
2127 d.ReadFromTo(data);
2128 data->untag()->callback_id_ = d.Read<int32_t>();
2129 data->untag()->ffi_function_kind_ = d.Read<uint8_t>();
2130 }
2131 }
2132};
2133
2134#if !defined(DART_PRECOMPILED_RUNTIME)
2136 public:
2138 : SerializationCluster("Field",
2139 kFieldCid,
2140 compiler::target::Field::InstanceSize()) {}
2142
2143 void Trace(Serializer* s, ObjectPtr object) {
2144 FieldPtr field = Field::RawCast(object);
2145 objects_.Add(field);
2146
2147 Snapshot::Kind kind = s->kind();
2148
2149 s->Push(field->untag()->name());
2150 s->Push(field->untag()->owner());
2151 s->Push(field->untag()->type());
2152 // Write out the initializer function
2153 s->Push(field->untag()->initializer_function());
2154
2155 if (kind != Snapshot::kFullAOT) {
2156 s->Push(field->untag()->guarded_list_length());
2157 }
2158 if (kind == Snapshot::kFullJIT) {
2159 s->Push(field->untag()->dependent_code());
2160 }
2161 // Write out either the initial static value or field offset.
2162 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2163 s->Push(field->untag()->host_offset_or_field_id());
2164 } else {
2165 s->Push(Smi::New(Field::TargetOffsetOf(field)));
2166 }
2167 }
2168
2170 const intptr_t count = objects_.length();
2171 s->WriteUnsigned(count);
2172 for (intptr_t i = 0; i < count; i++) {
2173 FieldPtr field = objects_[i];
2174 s->AssignRef(field);
2175 }
2176 }
2177
2179 Snapshot::Kind kind = s->kind();
2180 const intptr_t count = objects_.length();
2181 for (intptr_t i = 0; i < count; i++) {
2182 FieldPtr field = objects_[i];
2183 AutoTraceObjectName(field, field->untag()->name());
2184
2185 WriteCompressedField(field, name);
2186 WriteCompressedField(field, owner);
2187 WriteCompressedField(field, type);
2188 // Write out the initializer function and initial value if not in AOT.
2189 WriteCompressedField(field, initializer_function);
2190 if (kind != Snapshot::kFullAOT) {
2191 WriteCompressedField(field, guarded_list_length);
2192 }
2193 if (kind == Snapshot::kFullJIT) {
2194 WriteCompressedField(field, dependent_code);
2195 }
2196
2197 if (kind != Snapshot::kFullAOT) {
2198 s->WriteTokenPosition(field->untag()->token_pos_);
2199 s->WriteTokenPosition(field->untag()->end_token_pos_);
2200 s->WriteCid(field->untag()->guarded_cid_);
2201 s->WriteCid(field->untag()->is_nullable_);
2202 s->Write<int8_t>(field->untag()->static_type_exactness_state_);
2203 s->Write<uint32_t>(field->untag()->kernel_offset_);
2204 }
2205 s->Write<uint16_t>(field->untag()->kind_bits_);
2206
2207 // Write out either the initial static value or field offset.
2208 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2209 WriteFieldValue("id", field->untag()->host_offset_or_field_id());
2210 } else {
2212 }
2213 }
2214 }
2215
2216 private:
2217 GrowableArray<FieldPtr> objects_;
2218};
2219#endif // !DART_PRECOMPILED_RUNTIME
2220
2222 public:
2225
2226 void ReadAlloc(Deserializer* d) override {
2228 }
2229
2230 void ReadFill(Deserializer* d_) override {
2232
2233 ASSERT(!is_canonical()); // Never canonical.
2234#if !defined(DART_PRECOMPILED_RUNTIME)
2235 Snapshot::Kind kind = d_->kind();
2236#endif
2237 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2238 FieldPtr field = static_cast<FieldPtr>(d.Ref(id));
2240 d.ReadFromTo(field);
2241#if !defined(DART_PRECOMPILED_RUNTIME)
2242 ASSERT(d_->kind() != Snapshot::kFullAOT);
2243 field->untag()->guarded_list_length_ = static_cast<SmiPtr>(d.ReadRef());
2244 if (kind == Snapshot::kFullJIT) {
2245 field->untag()->dependent_code_ =
2246 static_cast<WeakArrayPtr>(d.ReadRef());
2247 }
2248 field->untag()->token_pos_ = d.ReadTokenPosition();
2249 field->untag()->end_token_pos_ = d.ReadTokenPosition();
2250 field->untag()->guarded_cid_ = d.ReadCid();
2251 field->untag()->is_nullable_ = d.ReadCid();
2252 const int8_t static_type_exactness_state = d.Read<int8_t>();
2253#if defined(TARGET_ARCH_X64)
2254 field->untag()->static_type_exactness_state_ =
2255 static_type_exactness_state;
2256#else
2257 // We might produce core snapshots using X64 VM and then consume
2258 // them in IA32 or ARM VM. In which case we need to simply ignore
2259 // static type exactness state written into snapshot because non-X64
2260 // builds don't have this feature enabled.
2261 // TODO(dartbug.com/34170) Support other architectures.
2262 USE(static_type_exactness_state);
2263 field->untag()->static_type_exactness_state_ =
2265#endif // defined(TARGET_ARCH_X64)
2266 field->untag()->kernel_offset_ = d.Read<uint32_t>();
2267#endif
2268 field->untag()->kind_bits_ = d.Read<uint16_t>();
2269
2270 field->untag()->host_offset_or_field_id_ =
2271 static_cast<SmiPtr>(d.ReadRef());
2272#if !defined(DART_PRECOMPILED_RUNTIME)
2273 field->untag()->target_offset_ =
2274 Smi::Value(field->untag()->host_offset_or_field_id());
2275#endif // !defined(DART_PRECOMPILED_RUNTIME)
2276 }
2277 }
2278
2279 void PostLoad(Deserializer* d, const Array& refs) override {
2280 Field& field = Field::Handle(d->zone());
2281 if (!IsolateGroup::Current()->use_field_guards()) {
2282 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2283 field ^= refs.At(i);
2285 field.set_is_nullable_unsafe(true);
2291 }
2292 } else {
2293 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2294 field ^= refs.At(i);
2295 field.InitializeGuardedListLengthInObjectOffset(/*unsafe=*/true);
2296 }
2297 }
2298 }
2299};
2300
2301#if !defined(DART_PRECOMPILED_RUNTIME)
2303 public:
2305 : SerializationCluster("Script",
2306 kScriptCid,
2307 compiler::target::Script::InstanceSize()) {}
2309
2310 void Trace(Serializer* s, ObjectPtr object) {
2311 ScriptPtr script = Script::RawCast(object);
2312 objects_.Add(script);
2313 auto* from = script->untag()->from();
2314 auto* to = script->untag()->to_snapshot(s->kind());
2315 for (auto* p = from; p <= to; p++) {
2316 const intptr_t offset =
2317 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(script->untag());
2318 const ObjectPtr obj = p->Decompress(script->heap_base());
2320 // Line starts are delta encoded.
2321 s->Push(obj, kDeltaEncodedTypedDataCid);
2322 } else {
2323 s->Push(obj);
2324 }
2325 }
2326 }
2327
2329 const intptr_t count = objects_.length();
2330 s->WriteUnsigned(count);
2331 for (intptr_t i = 0; i < count; i++) {
2332 ScriptPtr script = objects_[i];
2333 s->AssignRef(script);
2334 }
2335 }
2336
2338 const intptr_t count = objects_.length();
2339 for (intptr_t i = 0; i < count; i++) {
2340 ScriptPtr script = objects_[i];
2341 AutoTraceObjectName(script, script->untag()->url());
2343 if (s->kind() != Snapshot::kFullAOT) {
2344 // Clear out the max position cache in snapshots to ensure no
2345 // differences in the snapshot due to triggering caching vs. not.
2346 int32_t written_flags =
2348 0, script->untag()->flags_and_max_position_);
2350 false, written_flags);
2351 s->Write<int32_t>(written_flags);
2352 }
2353 s->Write<int32_t>(script->untag()->kernel_script_index_);
2354 }
2355 }
2356
2357 private:
2358 GrowableArray<ScriptPtr> objects_;
2359};
2360#endif // !DART_PRECOMPILED_RUNTIME
2361
2363 public:
2366
2367 void ReadAlloc(Deserializer* d) override {
2369 }
2370
2371 void ReadFill(Deserializer* d_) override {
2373
2374 ASSERT(!is_canonical()); // Never canonical.
2375 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2376 ScriptPtr script = static_cast<ScriptPtr>(d.Ref(id));
2379 d.ReadFromTo(script);
2380#if !defined(DART_PRECOMPILED_RUNTIME)
2381 script->untag()->flags_and_max_position_ = d.Read<int32_t>();
2382#endif
2383 script->untag()->kernel_script_index_ = d.Read<int32_t>();
2384 script->untag()->load_timestamp_ = 0;
2385 }
2386 }
2387};
2388
2389#if !defined(DART_PRECOMPILED_RUNTIME)
2391 public:
2393 : SerializationCluster("Library",
2394 kLibraryCid,
2395 compiler::target::Library::InstanceSize()) {}
2397
2398 void Trace(Serializer* s, ObjectPtr object) {
2399 LibraryPtr lib = Library::RawCast(object);
2400 objects_.Add(lib);
2401 PushFromTo(lib);
2402 }
2403
2405 const intptr_t count = objects_.length();
2406 s->WriteUnsigned(count);
2407 for (intptr_t i = 0; i < count; i++) {
2408 LibraryPtr lib = objects_[i];
2409 s->AssignRef(lib);
2410 }
2411 }
2412
2414 const intptr_t count = objects_.length();
2415 for (intptr_t i = 0; i < count; i++) {
2416 LibraryPtr lib = objects_[i];
2417 AutoTraceObjectName(lib, lib->untag()->url());
2418 WriteFromTo(lib);
2419 s->Write<int32_t>(lib->untag()->index_);
2420 s->Write<uint16_t>(lib->untag()->num_imports_);
2421 s->Write<int8_t>(lib->untag()->load_state_);
2422 s->Write<uint8_t>(lib->untag()->flags_);
2423 if (s->kind() != Snapshot::kFullAOT) {
2424 s->Write<uint32_t>(lib->untag()->kernel_library_index_);
2425 }
2426 }
2427 }
2428
2429 private:
2431};
2432#endif // !DART_PRECOMPILED_RUNTIME
2433
2435 public:
2438
2439 void ReadAlloc(Deserializer* d) override {
2441 }
2442
2443 void ReadFill(Deserializer* d_) override {
2445
2446 ASSERT(!is_canonical()); // Never canonical.
2447 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2448 LibraryPtr lib = static_cast<LibraryPtr>(d.Ref(id));
2450 d.ReadFromTo(lib);
2451 lib->untag()->native_entry_resolver_ = nullptr;
2452 lib->untag()->native_entry_symbol_resolver_ = nullptr;
2453 lib->untag()->ffi_native_resolver_ = nullptr;
2454 lib->untag()->index_ = d.Read<int32_t>();
2455 lib->untag()->num_imports_ = d.Read<uint16_t>();
2456 lib->untag()->load_state_ = d.Read<int8_t>();
2457 lib->untag()->flags_ =
2458 UntaggedLibrary::InFullSnapshotBit::update(true, d.Read<uint8_t>());
2459#if !defined(DART_PRECOMPILED_RUNTIME)
2460 ASSERT(d_->kind() != Snapshot::kFullAOT);
2461 lib->untag()->kernel_library_index_ = d.Read<uint32_t>();
2462#endif
2463 }
2464 }
2465};
2466
2467#if !defined(DART_PRECOMPILED_RUNTIME)
2469 public:
2471 : SerializationCluster("Namespace",
2472 kNamespaceCid,
2473 compiler::target::Namespace::InstanceSize()) {}
2475
2476 void Trace(Serializer* s, ObjectPtr object) {
2477 NamespacePtr ns = Namespace::RawCast(object);
2478 objects_.Add(ns);
2479 PushFromTo(ns);
2480 }
2481
2483 const intptr_t count = objects_.length();
2484 s->WriteUnsigned(count);
2485 for (intptr_t i = 0; i < count; i++) {
2486 NamespacePtr ns = objects_[i];
2487 s->AssignRef(ns);
2488 }
2489 }
2490
2492 const intptr_t count = objects_.length();
2493 for (intptr_t i = 0; i < count; i++) {
2494 NamespacePtr ns = objects_[i];
2495 AutoTraceObject(ns);
2496 WriteFromTo(ns);
2497 }
2498 }
2499
2500 private:
2502};
2503#endif // !DART_PRECOMPILED_RUNTIME
2504
2506 public:
2509
2510 void ReadAlloc(Deserializer* d) override {
2512 }
2513
2514 void ReadFill(Deserializer* d_) override {
2516
2517 ASSERT(!is_canonical()); // Never canonical.
2518 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2519 NamespacePtr ns = static_cast<NamespacePtr>(d.Ref(id));
2520 Deserializer::InitializeHeader(ns, kNamespaceCid,
2522 d.ReadFromTo(ns);
2523 }
2524 }
2525};
2526
2527#if !defined(DART_PRECOMPILED_RUNTIME)
2528// KernelProgramInfo objects are not written into a full AOT snapshot.
2530 public:
2533 "KernelProgramInfo",
2534 kKernelProgramInfoCid,
2535 compiler::target::KernelProgramInfo::InstanceSize()) {}
2537
2538 void Trace(Serializer* s, ObjectPtr object) {
2539 KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object);
2540 objects_.Add(info);
2542 }
2543
2545 const intptr_t count = objects_.length();
2546 s->WriteUnsigned(count);
2547 for (intptr_t i = 0; i < count; i++) {
2548 KernelProgramInfoPtr info = objects_[i];
2549 s->AssignRef(info);
2550 }
2551 }
2552
2554 const intptr_t count = objects_.length();
2555 for (intptr_t i = 0; i < count; i++) {
2556 KernelProgramInfoPtr info = objects_[i];
2559 }
2560 }
2561
2562 private:
2564};
2565
2566// Since KernelProgramInfo objects are not written into full AOT snapshots,
2567// one will never need to read them from a full AOT snapshot.
2569 public:
2571 : DeserializationCluster("KernelProgramInfo") {}
2573
2574 void ReadAlloc(Deserializer* d) override {
2576 }
2577
2578 void ReadFill(Deserializer* d_) override {
2580
2581 ASSERT(!is_canonical()); // Never canonical.
2582 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2583 KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d.Ref(id));
2584 Deserializer::InitializeHeader(info, kKernelProgramInfoCid,
2586 d.ReadFromTo(info);
2587 }
2588 }
2589
2590 void PostLoad(Deserializer* d, const Array& refs) override {
2591 Array& array = Array::Handle(d->zone());
2593 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2594 info ^= refs.At(id);
2595 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2596 info.set_libraries_cache(array);
2597 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2598 info.set_classes_cache(array);
2599 }
2600 }
2601};
2602
2604 public:
2606 : SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {}
2608
2609 void Trace(Serializer* s, ObjectPtr object) {
2610 CodePtr code = Code::RawCast(object);
2611
2612 const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code);
2613 if (is_deferred) {
2614 s->RecordDeferredCode(code);
2615 } else {
2616 objects_.Add(code);
2617 }
2618
2619 // Even if this code object is itself deferred we still need to scan
2620 // the pool for references to other code objects (which might reside
2621 // in the current loading unit).
2622 ObjectPoolPtr pool = code->untag()->object_pool_;
2623 if (s->kind() == Snapshot::kFullAOT) {
2624 TracePool(s, pool, /*only_call_targets=*/is_deferred);
2625 } else {
2626 if (s->InCurrentLoadingUnitOrRoot(pool)) {
2627 s->Push(pool);
2628 } else {
2629 TracePool(s, pool, /*only_call_targets=*/true);
2630 }
2631 }
2632
2633 if (s->kind() == Snapshot::kFullJIT) {
2634 s->Push(code->untag()->deopt_info_array_);
2635 s->Push(code->untag()->static_calls_target_table_);
2636 s->Push(code->untag()->compressed_stackmaps_);
2637 } else if (s->kind() == Snapshot::kFullAOT) {
2638 // Note: we don't trace compressed_stackmaps_ because we are going to emit
2639 // a separate mapping table into RO data which is not going to be a real
2640 // heap object.
2641#if defined(DART_PRECOMPILER)
2642 auto const calls_array = code->untag()->static_calls_target_table_;
2643 if (calls_array != Array::null()) {
2644 // Some Code entries in the static calls target table may only be
2645 // accessible via here, so push the Code objects.
2646 array_ = calls_array;
2647 for (auto entry : StaticCallsTable(array_)) {
2648 auto kind = Code::KindField::decode(
2650 switch (kind) {
2651 case Code::kCallViaCode:
2652 // Code object in the pool.
2653 continue;
2655 // TTS will be reachable through type object which itself is
2656 // in the pool.
2657 continue;
2660 auto destination = entry.Get<Code::kSCallTableCodeOrTypeTarget>();
2661 ASSERT(destination->IsHeapObject() && destination->IsCode());
2662 s->Push(destination);
2663 }
2664 }
2665 }
2666#else
2667 UNREACHABLE();
2668#endif
2669 }
2670
2671 if (Code::IsDiscarded(code)) {
2672 ASSERT(s->kind() == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2673 !FLAG_retain_code_objects);
2674 // Only object pool and static call table entries and the compressed
2675 // stack maps should be pushed.
2676 return;
2677 }
2678
2679 s->Push(code->untag()->owner_);
2680 s->Push(code->untag()->exception_handlers_);
2681 s->Push(code->untag()->pc_descriptors_);
2682 s->Push(code->untag()->catch_entry_);
2683 if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) {
2684 s->Push(code->untag()->inlined_id_to_function_);
2685 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2686 s->Push(code->untag()->code_source_map_);
2687 }
2688 }
2689#if !defined(PRODUCT)
2690 s->Push(code->untag()->return_address_metadata_);
2691 if (FLAG_code_comments) {
2692 s->Push(code->untag()->comments_);
2693 }
2694#endif
2695 }
2696
2697 void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_call_targets) {
2698 if (pool == ObjectPool::null()) {
2699 return;
2700 }
2701
2702 const intptr_t length = pool->untag()->length_;
2703 uint8_t* entry_bits = pool->untag()->entry_bits();
2704 for (intptr_t i = 0; i < length; i++) {
2705 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
2706 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
2707 const ObjectPtr target = pool->untag()->data()[i].raw_obj_;
2708 // A field is a call target because its initializer may be called
2709 // indirectly by passing the field to the runtime. A const closure
2710 // is a call target because its function may be called indirectly
2711 // via a closure call.
2712 intptr_t cid = target->GetClassIdMayBeSmi();
2713 if (!only_call_targets || (cid == kCodeCid) || (cid == kFunctionCid) ||
2714 (cid == kFieldCid) || (cid == kClosureCid)) {
2715 s->Push(target);
2716 } else if (cid >= kNumPredefinedCids) {
2717 s->Push(s->isolate_group()->class_table()->At(cid));
2718 }
2719 }
2720 }
2721 }
2722
2724 CodePtr code;
2725 intptr_t not_discarded; // 1 if this code was not discarded and
2726 // 0 otherwise.
2728 };
2729
2730 // We sort code objects in such a way that code objects with the same
2731 // instructions are grouped together and ensure that all instructions
2732 // without associated code objects are grouped together at the beginning of
2733 // the code section. InstructionsTable encoding assumes that all
2734 // instructions with non-discarded Code objects are grouped at the end.
2735 //
2736 // Note that in AOT mode we expect that all Code objects pointing to
2737 // the same instructions are deduplicated, as in bare instructions mode
2738 // there is no way to identify which specific Code object (out of those
2739 // which point to the specific instructions range) actually corresponds
2740 // to a particular frame.
2742 CodeOrderInfo const* b) {
2743 if (a->not_discarded < b->not_discarded) return -1;
2744 if (a->not_discarded > b->not_discarded) return 1;
2745 if (a->instructions_id < b->instructions_id) return -1;
2746 if (a->instructions_id > b->instructions_id) return 1;
2747 return 0;
2748 }
2749
2750 static void Insert(Serializer* s,
2751 GrowableArray<CodeOrderInfo>* order_list,
2752 IntMap<intptr_t>* order_map,
2753 CodePtr code) {
2754 InstructionsPtr instr = code->untag()->instructions_;
2755 intptr_t key = static_cast<intptr_t>(instr);
2756 intptr_t instructions_id = 0;
2757
2758 if (order_map->HasKey(key)) {
2759 // We are expected to merge code objects which point to the same
2760 // instructions in the precompiled mode.
2761 RELEASE_ASSERT(!FLAG_precompiled_mode);
2762 instructions_id = order_map->Lookup(key);
2763 } else {
2764 instructions_id = order_map->Length() + 1;
2765 order_map->Insert(key, instructions_id);
2766 }
2768 info.code = code;
2769 info.instructions_id = instructions_id;
2770 info.not_discarded = Code::IsDiscarded(code) ? 0 : 1;
2771 order_list->Add(info);
2772 }
2773
2774 static void Sort(Serializer* s, GrowableArray<CodePtr>* codes) {
2776 IntMap<intptr_t> order_map;
2777 for (intptr_t i = 0; i < codes->length(); i++) {
2778 Insert(s, &order_list, &order_map, (*codes)[i]);
2779 }
2780 order_list.Sort(CompareCodeOrderInfo);
2781 ASSERT(order_list.length() == codes->length());
2782 for (intptr_t i = 0; i < order_list.length(); i++) {
2783 (*codes)[i] = order_list[i].code;
2784 }
2785 }
2786
2787 static void Sort(Serializer* s, GrowableArray<Code*>* codes) {
2789 IntMap<intptr_t> order_map;
2790 for (intptr_t i = 0; i < codes->length(); i++) {
2791 Insert(s, &order_list, &order_map, (*codes)[i]->ptr());
2792 }
2793 order_list.Sort(CompareCodeOrderInfo);
2794 ASSERT(order_list.length() == codes->length());
2795 for (intptr_t i = 0; i < order_list.length(); i++) {
2796 *(*codes)[i] = order_list[i].code;
2797 }
2798 }
2799
2801 intptr_t count = 0;
2802 for (auto code : objects_) {
2803 if (!Code::IsDiscarded(code)) {
2804 count++;
2805 }
2806 }
2807 return count;
2808 }
2809
2811 const intptr_t non_discarded_count = NonDiscardedCodeCount();
2812 const intptr_t count = objects_.length();
2813 ASSERT(count == non_discarded_count || (s->kind() == Snapshot::kFullAOT));
2814
2815 first_ref_ = s->next_ref_index();
2816 s->WriteUnsigned(non_discarded_count);
2817 for (auto code : objects_) {
2818 if (!Code::IsDiscarded(code)) {
2819 WriteAlloc(s, code);
2820 } else {
2821 // Mark discarded code unreachable, so that we could later
2822 // assign artificial references to it.
2823 s->heap()->SetObjectId(code, kUnreachableReference);
2824 }
2825 }
2826
2827 s->WriteUnsigned(deferred_objects_.length());
2828 first_deferred_ref_ = s->next_ref_index();
2829 for (auto code : deferred_objects_) {
2831 WriteAlloc(s, code);
2832 }
2833 last_ref_ = s->next_ref_index() - 1;
2834 }
2835
2836 void WriteAlloc(Serializer* s, CodePtr code) {
2838 s->AssignRef(code);
2840 const int32_t state_bits = code->untag()->state_bits_;
2841 s->Write<int32_t>(state_bits);
2843 }
2844
2846 Snapshot::Kind kind = s->kind();
2847 const intptr_t count = objects_.length();
2848 for (intptr_t i = 0; i < count; i++) {
2849 CodePtr code = objects_[i];
2850#if defined(DART_PRECOMPILER)
2851 if (FLAG_write_v8_snapshot_profile_to != nullptr &&
2853 s->CreateArtificialNodeIfNeeded(code);
2854 }
2855#endif
2856 // Note: for discarded code this function will not write anything out
2857 // it is only called to produce information into snapshot profile.
2858 WriteFill(s, kind, code, /*deferred=*/false);
2859 }
2860 const intptr_t deferred_count = deferred_objects_.length();
2861 for (intptr_t i = 0; i < deferred_count; i++) {
2862 CodePtr code = deferred_objects_[i];
2863 WriteFill(s, kind, code, /*deferred=*/true);
2864 }
2865 }
2866
2868 Snapshot::Kind kind,
2869 CodePtr code,
2870 bool deferred) {
2871 const intptr_t bytes_written = s->bytes_written();
2873
2874 intptr_t pointer_offsets_length =
2875 Code::PtrOffBits::decode(code->untag()->state_bits_);
2876 if (pointer_offsets_length != 0) {
2877 FATAL("Cannot serialize code with embedded pointers");
2878 }
2879 if (kind == Snapshot::kFullAOT && Code::IsDisabled(code)) {
2880 // Disabled code is fatal in AOT since we cannot recompile.
2881 s->UnexpectedObject(code, "Disabled code");
2882 }
2883
2884 s->WriteInstructions(code->untag()->instructions_,
2885 code->untag()->unchecked_offset_, code, deferred);
2886 if (kind == Snapshot::kFullJIT) {
2887 // TODO(rmacnak): Fix references to disabled code before serializing.
2888 // For now, we may write the FixCallersTarget or equivalent stub. This
2889 // will cause a fixup if this code is called.
2890 const uint32_t active_unchecked_offset =
2891 code->untag()->unchecked_entry_point_ - code->untag()->entry_point_;
2892 s->WriteInstructions(code->untag()->active_instructions_,
2893 active_unchecked_offset, code, deferred);
2894 }
2895
2896#if defined(DART_PRECOMPILER)
2897 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2898 // If we are writing V8 snapshot profile then attribute references going
2899 // through the object pool and static calls to the code object itself.
2900 if (kind == Snapshot::kFullAOT &&
2901 code->untag()->object_pool_ != ObjectPool::null()) {
2902 ObjectPoolPtr pool = code->untag()->object_pool_;
2903 // Non-empty per-code object pools should not be reachable in this mode.
2904 ASSERT(!s->HasRef(pool) || pool == Object::empty_object_pool().ptr());
2905 s->CreateArtificialNodeIfNeeded(pool);
2906 s->AttributePropertyRef(pool, "object_pool_");
2907 }
2908 if (kind != Snapshot::kFullJIT &&
2909 code->untag()->static_calls_target_table_ != Array::null()) {
2910 auto const table = code->untag()->static_calls_target_table_;
2911 // Non-empty static call target tables shouldn't be reachable in this
2912 // mode.
2913 ASSERT(!s->HasRef(table) || table == Object::empty_array().ptr());
2914 s->CreateArtificialNodeIfNeeded(table);
2915 s->AttributePropertyRef(table, "static_calls_target_table_");
2916 }
2917 }
2918#endif // defined(DART_PRECOMPILER)
2919
2920 if (Code::IsDiscarded(code)) {
2921 // No bytes should be written to represent this code.
2922 ASSERT(s->bytes_written() == bytes_written);
2923 // Only write instructions, compressed stackmaps and state bits
2924 // for the discarded Code objects.
2925 ASSERT(kind == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2926 !FLAG_retain_code_objects);
2927#if defined(DART_PRECOMPILER)
2928 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2929 // Keep the owner as a (possibly artificial) node for snapshot analysis.
2930 const auto& owner = code->untag()->owner_;
2931 s->CreateArtificialNodeIfNeeded(owner);
2932 s->AttributePropertyRef(owner, "owner_");
2933 }
2934#endif
2935 return;
2936 }
2937
2938 // No need to write object pool out if we are producing full AOT
2939 // snapshot with bare instructions.
2940 if (kind != Snapshot::kFullAOT) {
2941 if (s->InCurrentLoadingUnitOrRoot(code->untag()->object_pool_)) {
2942 WriteField(code, object_pool_);
2943 } else {
2944 WriteFieldValue(object_pool_, ObjectPool::null());
2945 }
2946 }
2947 WriteField(code, owner_);
2948 WriteField(code, exception_handlers_);
2949 WriteField(code, pc_descriptors_);
2950 WriteField(code, catch_entry_);
2951 if (s->kind() == Snapshot::kFullJIT) {
2952 WriteField(code, compressed_stackmaps_);
2953 }
2954 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) {
2955 WriteFieldValue(inlined_id_to_function_, Array::null());
2956 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2957 } else {
2958 WriteField(code, inlined_id_to_function_);
2959 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2960 WriteField(code, code_source_map_);
2961 } else {
2962 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2963 }
2964 }
2965 if (kind == Snapshot::kFullJIT) {
2966 WriteField(code, deopt_info_array_);
2967 WriteField(code, static_calls_target_table_);
2968 }
2969
2970#if !defined(PRODUCT)
2971 WriteField(code, return_address_metadata_);
2972 if (FLAG_code_comments) {
2973 WriteField(code, comments_);
2974 }
2975#endif
2976 }
2977
2978 GrowableArray<CodePtr>* objects() { return &objects_; }
2979 GrowableArray<CodePtr>* deferred_objects() { return &deferred_objects_; }
2980
2981 static const char* MakeDisambiguatedCodeName(Serializer* s, CodePtr c) {
2982 if (s->profile_writer() == nullptr) {
2983 return nullptr;
2984 }
2985
2986 REUSABLE_CODE_HANDLESCOPE(s->thread());
2987 Code& code = reused_code_handle.Handle();
2988 code = c;
2989 return code.QualifiedName(
2991 Object::NameVisibility::kInternalName));
2992 }
2993
2994 intptr_t first_ref() const { return first_ref_; }
2995 intptr_t first_deferred_ref() const { return first_deferred_ref_; }
2996 intptr_t last_ref() const { return last_ref_; }
2997
2998 private:
2999 intptr_t first_ref_;
3000 intptr_t first_deferred_ref_;
3001 intptr_t last_ref_;
3002 GrowableArray<CodePtr> objects_;
3003 GrowableArray<CodePtr> deferred_objects_;
3004 Array& array_;
3005};
3006#endif // !DART_PRECOMPILED_RUNTIME
3007
3009 public:
3012
3013 void ReadAlloc(Deserializer* d) override {
3014 start_index_ = d->next_index();
3015 d->set_code_start_index(start_index_);
3016 const intptr_t count = d->ReadUnsigned();
3017 for (intptr_t i = 0; i < count; i++) {
3019 }
3020 stop_index_ = d->next_index();
3021 d->set_code_stop_index(stop_index_);
3022 deferred_start_index_ = d->next_index();
3023 const intptr_t deferred_count = d->ReadUnsigned();
3024 for (intptr_t i = 0; i < deferred_count; i++) {
3026 }
3027 deferred_stop_index_ = d->next_index();
3028 }
3029
3031 const int32_t state_bits = d->Read<int32_t>();
3032 ASSERT(!Code::DiscardedBit::decode(state_bits));
3033 auto code = static_cast<CodePtr>(d->Allocate(Code::InstanceSize(0)));
3034 d->AssignRef(code);
3035 code->untag()->state_bits_ = state_bits;
3036 }
3037
3038 void ReadFill(Deserializer* d) override {
3039 ASSERT(!is_canonical()); // Never canonical.
3041#if defined(DART_PRECOMPILED_RUNTIME)
3042 ReadFill(d, deferred_start_index_, deferred_stop_index_, true);
3043#else
3044 ASSERT(deferred_start_index_ == deferred_stop_index_);
3045#endif
3046 }
3047
3049 intptr_t start_index,
3050 intptr_t stop_index,
3051 bool deferred) {
3052 for (intptr_t id = start_index, n = stop_index; id < n; id++) {
3053 auto const code = static_cast<CodePtr>(d->Ref(id));
3054
3056
3059
3060 d->ReadInstructions(code, deferred);
3061
3062#if !defined(DART_PRECOMPILED_RUNTIME)
3063 ASSERT(d->kind() == Snapshot::kFullJIT);
3064 code->untag()->object_pool_ = static_cast<ObjectPoolPtr>(d->ReadRef());
3065#else
3066 ASSERT(d->kind() == Snapshot::kFullAOT);
3067 // There is a single global pool.
3068 code->untag()->object_pool_ = ObjectPool::null();
3069#endif
3070 code->untag()->owner_ = d->ReadRef();
3071 code->untag()->exception_handlers_ =
3072 static_cast<ExceptionHandlersPtr>(d->ReadRef());
3073 code->untag()->pc_descriptors_ =
3074 static_cast<PcDescriptorsPtr>(d->ReadRef());
3075 code->untag()->catch_entry_ = d->ReadRef();
3076#if !defined(DART_PRECOMPILED_RUNTIME)
3077 ASSERT(d->kind() == Snapshot::kFullJIT);
3078 code->untag()->compressed_stackmaps_ =
3079 static_cast<CompressedStackMapsPtr>(d->ReadRef());
3080#else
3081 ASSERT(d->kind() == Snapshot::kFullAOT);
3082 code->untag()->compressed_stackmaps_ = CompressedStackMaps::null();
3083#endif
3084 code->untag()->inlined_id_to_function_ =
3085 static_cast<ArrayPtr>(d->ReadRef());
3086 code->untag()->code_source_map_ =
3087 static_cast<CodeSourceMapPtr>(d->ReadRef());
3088
3089#if !defined(DART_PRECOMPILED_RUNTIME)
3090 ASSERT(d->kind() == Snapshot::kFullJIT);
3091 code->untag()->deopt_info_array_ = static_cast<ArrayPtr>(d->ReadRef());
3092 code->untag()->static_calls_target_table_ =
3093 static_cast<ArrayPtr>(d->ReadRef());
3094#endif // !DART_PRECOMPILED_RUNTIME
3095
3096#if !defined(PRODUCT)
3097 code->untag()->return_address_metadata_ = d->ReadRef();
3098 code->untag()->var_descriptors_ = LocalVarDescriptors::null();
3099 code->untag()->comments_ = FLAG_code_comments
3100 ? static_cast<ArrayPtr>(d->ReadRef())
3101 : Array::null();
3102 code->untag()->compile_timestamp_ = 0;
3103#endif
3104 }
3105 }
3106
3107 void PostLoad(Deserializer* d, const Array& refs) override {
3108 d->EndInstructions();
3109
3110#if !defined(PRODUCT)
3111 if (!CodeObservers::AreActive() && !FLAG_support_disassembler) return;
3112#endif
3113 Code& code = Code::Handle(d->zone());
3114#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3115 Object& owner = Object::Handle(d->zone());
3116#endif
3117 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3118 code ^= refs.At(id);
3119#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT)
3121 Code::NotifyCodeObservers(code, code.is_optimized());
3122 }
3123#endif
3124#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3125 owner = code.owner();
3126 if (owner.IsFunction()) {
3127 if ((FLAG_disassemble ||
3128 (code.is_optimized() && FLAG_disassemble_optimized)) &&
3129 compiler::PrintFilter::ShouldPrint(Function::Cast(owner))) {
3130 Disassembler::DisassembleCode(Function::Cast(owner), code,
3131 code.is_optimized());
3132 }
3133 } else if (FLAG_disassemble_stubs) {
3135 }
3136#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3137 }
3138 }
3139
3140 private:
3141 intptr_t deferred_start_index_;
3142 intptr_t deferred_stop_index_;
3143};
3144
3145#if !defined(DART_PRECOMPILED_RUNTIME)
3147 public:
3149 : SerializationCluster("ObjectPool", kObjectPoolCid) {}
3151
3152 void Trace(Serializer* s, ObjectPtr object) {
3153 ObjectPoolPtr pool = ObjectPool::RawCast(object);
3154 objects_.Add(pool);
3155
3156 if (s->kind() != Snapshot::kFullAOT) {
3157 const intptr_t length = pool->untag()->length_;
3158 uint8_t* entry_bits = pool->untag()->entry_bits();
3159 for (intptr_t i = 0; i < length; i++) {
3160 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
3161 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
3162 s->Push(pool->untag()->data()[i].raw_obj_);
3163 }
3164 }
3165 }
3166 }
3167
3169 const intptr_t count = objects_.length();
3170 s->WriteUnsigned(count);
3171 for (intptr_t i = 0; i < count; i++) {
3172 ObjectPoolPtr pool = objects_[i];
3173 s->AssignRef(pool);
3175 const intptr_t length = pool->untag()->length_;
3176 s->WriteUnsigned(length);
3178 }
3179 }
3180
3182 bool weak = s->kind() == Snapshot::kFullAOT;
3183
3184 const intptr_t count = objects_.length();
3185 for (intptr_t i = 0; i < count; i++) {
3186 ObjectPoolPtr pool = objects_[i];
3188 const intptr_t length = pool->untag()->length_;
3189 s->WriteUnsigned(length);
3190 uint8_t* entry_bits = pool->untag()->entry_bits();
3191 for (intptr_t j = 0; j < length; j++) {
3192 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3193 uint8_t bits = entry_bits[j];
3195 auto snapshot_behavior = ObjectPool::SnapshotBehaviorBits::decode(bits);
3196 ASSERT(snapshot_behavior !=
3197 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3198 s->Write<uint8_t>(bits);
3199 if (snapshot_behavior != ObjectPool::SnapshotBehavior::kSnapshotable) {
3200 // The deserializer will reset this to a specific value, no need to
3201 // write anything.
3202 continue;
3203 }
3204 switch (type) {
3205 case ObjectPool::EntryType::kTaggedObject: {
3206 if (weak && !s->HasRef(entry.raw_obj_)) {
3207 // Any value will do, but null has the shortest id.
3208 s->WriteElementRef(Object::null(), j);
3209 } else {
3210 s->WriteElementRef(entry.raw_obj_, j);
3211 }
3212 break;
3213 }
3214 case ObjectPool::EntryType::kImmediate: {
3215 s->Write<intptr_t>(entry.raw_value_);
3216 break;
3217 }
3218 case ObjectPool::EntryType::kNativeFunction: {
3219 // Write nothing. Will initialize with the lazy link entry.
3220 break;
3221 }
3222 default:
3223 UNREACHABLE();
3224 }
3225 }
3226 }
3227 }
3228
3229 private:
3231};
3232#endif // !DART_PRECOMPILED_RUNTIME
3233
3235 public:
3238
3239 void ReadAlloc(Deserializer* d) override {
3240 start_index_ = d->next_index();
3241 const intptr_t count = d->ReadUnsigned();
3242 for (intptr_t i = 0; i < count; i++) {
3243 const intptr_t length = d->ReadUnsigned();
3244 d->AssignRef(d->Allocate(ObjectPool::InstanceSize(length)));
3245 }
3246 stop_index_ = d->next_index();
3247 }
3248
3249 void ReadFill(Deserializer* d_) override {
3251
3252 ASSERT(!is_canonical()); // Never canonical.
3253 fill_position_ = d.Position();
3254#if defined(DART_PRECOMPILED_RUNTIME)
3255 const uint8_t immediate_bits = ObjectPool::EncodeBits(
3256 ObjectPool::EntryType::kImmediate, ObjectPool::Patchability::kPatchable,
3257 ObjectPool::SnapshotBehavior::kSnapshotable);
3258 uword switchable_call_miss_entry_point =
3259 StubCode::SwitchableCallMiss().MonomorphicEntryPoint();
3260#endif // defined(DART_PRECOMPILED_RUNTIME)
3261
3262 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3263 const intptr_t length = d.ReadUnsigned();
3264 ObjectPoolPtr pool = static_cast<ObjectPoolPtr>(d.Ref(id));
3265 Deserializer::InitializeHeader(pool, kObjectPoolCid,
3267 pool->untag()->length_ = length;
3268 for (intptr_t j = 0; j < length; j++) {
3269 const uint8_t entry_bits = d.Read<uint8_t>();
3270 pool->untag()->entry_bits()[j] = entry_bits;
3271 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3272 const auto snapshot_behavior =
3274 ASSERT(snapshot_behavior !=
3275 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3276 switch (snapshot_behavior) {
3277 case ObjectPool::SnapshotBehavior::kSnapshotable:
3278 // Handled below.
3279 break;
3280 case ObjectPool::SnapshotBehavior::kResetToBootstrapNative:
3281 entry.raw_obj_ = StubCode::CallBootstrapNative().ptr();
3282 continue;
3283#if defined(DART_PRECOMPILED_RUNTIME)
3284 case ObjectPool::SnapshotBehavior::
3285 kResetToSwitchableCallMissEntryPoint:
3286 pool->untag()->entry_bits()[j] = immediate_bits;
3287 entry.raw_value_ =
3288 static_cast<intptr_t>(switchable_call_miss_entry_point);
3289 continue;
3290#endif // defined(DART_PRECOMPILED_RUNTIME)
3291 case ObjectPool::SnapshotBehavior::kSetToZero:
3292 entry.raw_value_ = 0;
3293 continue;
3294 default:
3295 FATAL("Unexpected snapshot behavior: %d\n", snapshot_behavior);
3296 }
3297 switch (ObjectPool::TypeBits::decode(entry_bits)) {
3298 case ObjectPool::EntryType::kTaggedObject:
3299 entry.raw_obj_ = d.ReadRef();
3300 break;
3301 case ObjectPool::EntryType::kImmediate:
3302 entry.raw_value_ = d.Read<intptr_t>();
3303 break;
3304 case ObjectPool::EntryType::kNativeFunction: {
3305 // Read nothing. Initialize with the lazy link entry.
3307 entry.raw_value_ = static_cast<intptr_t>(new_entry);
3308 break;
3309 }
3310 default:
3311 UNREACHABLE();
3312 }
3313 }
3314 }
3315 }
3316
3317 void PostLoad(Deserializer* d, const Array& refs) override {
3318#if defined(DART_PRECOMPILED_RUNTIME) && \
3319 (!defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER))
3320 if (FLAG_disassemble) {
3322 d->isolate_group()->object_store()->global_object_pool());
3323 THR_Print("Global object pool:\n");
3324 pool.DebugPrint();
3325 }
3326#endif
3327 }
3328
3329 private:
3330 intptr_t fill_position_ = 0;
3331};
3332
3333#if defined(DART_PRECOMPILER)
3334class WeakSerializationReferenceSerializationCluster
3335 : public SerializationCluster {
3336 public:
3337 WeakSerializationReferenceSerializationCluster()
3338 : SerializationCluster(
3339 "WeakSerializationReference",
3340 compiler::target::WeakSerializationReference::InstanceSize()) {}
3341 ~WeakSerializationReferenceSerializationCluster() {}
3342
3343 void Trace(Serializer* s, ObjectPtr object) {
3344 ASSERT(s->kind() == Snapshot::kFullAOT);
3345 objects_.Add(WeakSerializationReference::RawCast(object));
3346 }
3347
3348 void RetraceEphemerons(Serializer* s) {
3349 for (intptr_t i = 0; i < objects_.length(); i++) {
3350 WeakSerializationReferencePtr weak = objects_[i];
3351 if (!s->IsReachable(weak->untag()->target())) {
3352 s->Push(weak->untag()->replacement());
3353 }
3354 }
3355 }
3356
3357 intptr_t Count(Serializer* s) { return objects_.length(); }
3358
3359 void CreateArtificialTargetNodesIfNeeded(Serializer* s) {
3360 for (intptr_t i = 0; i < objects_.length(); i++) {
3361 WeakSerializationReferencePtr weak = objects_[i];
3362 s->CreateArtificialNodeIfNeeded(weak->untag()->target());
3363 }
3364 }
3365
3366 void WriteAlloc(Serializer* s) {
3367 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3368 }
3369
3370 void WriteFill(Serializer* s) {
3371 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3372 }
3373
3374 private:
3375 GrowableArray<WeakSerializationReferencePtr> objects_;
3376};
3377#endif
3378
3379#if !defined(DART_PRECOMPILED_RUNTIME)
3381 public:
3383 : SerializationCluster("PcDescriptors", kPcDescriptorsCid) {}
3385
3386 void Trace(Serializer* s, ObjectPtr object) {
3387 PcDescriptorsPtr desc = PcDescriptors::RawCast(object);
3388 objects_.Add(desc);
3389 }
3390
3392 const intptr_t count = objects_.length();
3393 s->WriteUnsigned(count);
3394 for (intptr_t i = 0; i < count; i++) {
3395 PcDescriptorsPtr desc = objects_[i];
3396 s->AssignRef(desc);
3398 const intptr_t length = desc->untag()->length_;
3399 s->WriteUnsigned(length);
3402 }
3403 }
3404
3406 const intptr_t count = objects_.length();
3407 for (intptr_t i = 0; i < count; i++) {
3408 PcDescriptorsPtr desc = objects_[i];
3410 const intptr_t length = desc->untag()->length_;
3411 s->WriteUnsigned(length);
3412 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3413 s->WriteBytes(cdata, length);
3414 }
3415 }
3416
3417 private:
3419};
3420#endif // !DART_PRECOMPILED_RUNTIME
3421
3423 public:
3425 : DeserializationCluster("PcDescriptors") {}
3427
3428 void ReadAlloc(Deserializer* d) override {
3429 start_index_ = d->next_index();
3430 const intptr_t count = d->ReadUnsigned();
3431 for (intptr_t i = 0; i < count; i++) {
3432 const intptr_t length = d->ReadUnsigned();
3433 d->AssignRef(d->Allocate(PcDescriptors::InstanceSize(length)));
3434 }
3435 stop_index_ = d->next_index();
3436 }
3437
3438 void ReadFill(Deserializer* d_) override {
3440
3441 ASSERT(!is_canonical()); // Never canonical.
3442 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3443 const intptr_t length = d.ReadUnsigned();
3444 PcDescriptorsPtr desc = static_cast<PcDescriptorsPtr>(d.Ref(id));
3445 Deserializer::InitializeHeader(desc, kPcDescriptorsCid,
3447 desc->untag()->length_ = length;
3448 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3449 d.ReadBytes(cdata, length);
3450 }
3451 }
3452};
3453
3454#if !defined(DART_PRECOMPILED_RUNTIME)
3456 public:
3458 : SerializationCluster("CodeSourceMap", kCodeSourceMapCid) {}
3460
3461 void Trace(Serializer* s, ObjectPtr object) {
3462 CodeSourceMapPtr map = CodeSourceMap::RawCast(object);
3463 objects_.Add(map);
3464 }
3465
3467 const intptr_t count = objects_.length();
3468 s->WriteUnsigned(count);
3469 for (intptr_t i = 0; i < count; i++) {
3470 CodeSourceMapPtr map = objects_[i];
3471 s->AssignRef(map);
3473 const intptr_t length = map->untag()->length_;
3474 s->WriteUnsigned(length);
3477 }
3478 }
3479
3481 const intptr_t count = objects_.length();
3482 for (intptr_t i = 0; i < count; i++) {
3483 CodeSourceMapPtr map = objects_[i];
3485 const intptr_t length = map->untag()->length_;
3486 s->WriteUnsigned(length);
3487 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3488 s->WriteBytes(cdata, length);
3489 }
3490 }
3491
3492 private:
3494};
3495#endif // !DART_PRECOMPILED_RUNTIME
3496
3498 public:
3500 : DeserializationCluster("CodeSourceMap") {}
3502
3503 void ReadAlloc(Deserializer* d) override {
3504 start_index_ = d->next_index();
3505 const intptr_t count = d->ReadUnsigned();
3506 for (intptr_t i = 0; i < count; i++) {
3507 const intptr_t length = d->ReadUnsigned();
3508 d->AssignRef(d->Allocate(CodeSourceMap::InstanceSize(length)));
3509 }
3510 stop_index_ = d->next_index();
3511 }
3512
3513 void ReadFill(Deserializer* d_) override {
3515
3516 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3517 const intptr_t length = d.ReadUnsigned();
3518 CodeSourceMapPtr map = static_cast<CodeSourceMapPtr>(d.Ref(id));
3519 Deserializer::InitializeHeader(map, kPcDescriptorsCid,
3521 map->untag()->length_ = length;
3522 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3523 d.ReadBytes(cdata, length);
3524 }
3525 }
3526};
3527
3528#if !defined(DART_PRECOMPILED_RUNTIME)
3530 public:
3532 : SerializationCluster("CompressedStackMaps", kCompressedStackMapsCid) {}
3534
3535 void Trace(Serializer* s, ObjectPtr object) {
3536 CompressedStackMapsPtr desc = CompressedStackMaps::RawCast(object);
3537 objects_.Add(desc);
3538 }
3539
3541 const intptr_t count = objects_.length();
3542 s->WriteUnsigned(count);
3543 for (intptr_t i = 0; i < count; i++) {
3544 CompressedStackMapsPtr map = objects_[i];
3545 s->AssignRef(map);
3548 map->untag()->payload()->flags_and_size());
3549 s->WriteUnsigned(length);
3552 }
3553 }
3554
3556 const intptr_t count = objects_.length();
3557 for (intptr_t i = 0; i < count; i++) {
3558 CompressedStackMapsPtr map = objects_[i];
3560 s->WriteUnsigned(map->untag()->payload()->flags_and_size());
3562 map->untag()->payload()->flags_and_size());
3563 uint8_t* cdata =
3564 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3565 s->WriteBytes(cdata, length);
3566 }
3567 }
3568
3569 private:
3571};
3572#endif // !DART_PRECOMPILED_RUNTIME
3573
3575 : public DeserializationCluster {
3576 public:
3578 : DeserializationCluster("CompressedStackMaps") {}
3580
3581 void ReadAlloc(Deserializer* d) override {
3582 start_index_ = d->next_index();
3583 const intptr_t count = d->ReadUnsigned();
3584 for (intptr_t i = 0; i < count; i++) {
3585 const intptr_t length = d->ReadUnsigned();
3586 d->AssignRef(d->Allocate(CompressedStackMaps::InstanceSize(length)));
3587 }
3588 stop_index_ = d->next_index();
3589 }
3590
3591 void ReadFill(Deserializer* d_) override {
3593
3594 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3595 const intptr_t flags_and_size = d.ReadUnsigned();
3596 const intptr_t length =
3598 CompressedStackMapsPtr map =
3599 static_cast<CompressedStackMapsPtr>(d.Ref(id));
3600 Deserializer::InitializeHeader(map, kCompressedStackMapsCid,
3602 map->untag()->payload()->set_flags_and_size(flags_and_size);
3603 uint8_t* cdata =
3604 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3605 d.ReadBytes(cdata, length);
3606 }
3607 }
3608};
3609
3610#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_COMPRESSED_POINTERS)
3611// PcDescriptor, CompressedStackMaps, OneByteString, TwoByteString
3613 : public CanonicalSetSerializationCluster<CanonicalStringSet,
3614 String,
3615 ObjectPtr> {
3616 public:
3618 const char* type,
3619 intptr_t cid,
3620 bool is_canonical)
3622 cid,
3625 ImageWriter::TagObjectTypeAsReadOnly(zone, type)),
3626 zone_(zone),
3627 cid_(cid),
3628 type_(type) {}
3630
3631 void Trace(Serializer* s, ObjectPtr object) {
3632 // A string's hash must already be computed when we write it because it
3633 // will be loaded into read-only memory. Extra bytes due to allocation
3634 // rounding need to be deterministically set for reliable deduplication in
3635 // shared images.
3636 if (object->untag()->InVMIsolateHeap() ||
3637 s->heap()->old_space()->IsObjectFromImagePages(object)) {
3638 // This object is already read-only.
3639 } else {
3641 }
3642
3643 objects_.Add(object);
3644 }
3645
3647 const bool is_string_cluster = IsStringClassId(cid_);
3648
3649 intptr_t count = objects_.length();
3650 s->WriteUnsigned(count);
3652
3653 uint32_t running_offset = 0;
3654 for (intptr_t i = 0; i < count; i++) {
3655 ObjectPtr object = objects_[i];
3656 s->AssignRef(object);
3657 const StringPtr name =
3658 is_string_cluster ? String::RawCast(object) : nullptr;
3659 Serializer::WritingObjectScope scope(s, type_, object, name);
3660 uint32_t offset = s->GetDataOffset(object);
3661 s->TraceDataOffset(offset);
3664 ASSERT(offset > running_offset);
3665 s->WriteUnsigned((offset - running_offset) >>
3667 running_offset = offset;
3668 }
3670 }
3671
3673 // No-op.
3674 }
3675
3676 private:
3677 Zone* zone_;
3678 const intptr_t cid_;
3679 const char* const type_;
3680};
3681#endif // !DART_PRECOMPILED_RUNTIME && !DART_COMPRESSED_POINTERS
3682
3683#if !defined(DART_COMPRESSED_POINTERS)
3685 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
3686 public:
3688 bool is_canonical,
3689 bool is_root_unit)
3691 is_root_unit,
3692 "ROData"),
3693 cid_(cid) {}
3695
3696 void ReadAlloc(Deserializer* d) override {
3697 start_index_ = d->next_index();
3698 intptr_t count = d->ReadUnsigned();
3699 uint32_t running_offset = 0;
3700 for (intptr_t i = 0; i < count; i++) {
3701 running_offset += d->ReadUnsigned() << kObjectAlignmentLog2;
3702 ObjectPtr object = d->GetObjectAt(running_offset);
3703 d->AssignRef(object);
3704 }
3705 stop_index_ = d->next_index();
3706 if (cid_ == kStringCid) {
3708 }
3709 }
3710
3711 void ReadFill(Deserializer* d_) override {
3713
3714 // No-op.
3715 }
3716
3717 void PostLoad(Deserializer* d, const Array& refs) override {
3718 if (!table_.IsNull()) {
3719 auto object_store = d->isolate_group()->object_store();
3720 VerifyCanonicalSet(d, refs,
3721 WeakArray::Handle(object_store->symbol_table()));
3722 object_store->set_symbol_table(table_);
3723 if (d->isolate_group() == Dart::vm_isolate_group()) {
3724 Symbols::InitFromSnapshot(d->isolate_group());
3725 }
3726 } else if (!is_root_unit_ && is_canonical()) {
3727 FATAL("Cannot recanonicalize RO objects.");
3728 }
3729 }
3730
3731 private:
3732 const intptr_t cid_;
3733};
3734#endif // !DART_COMPRESSED_POINTERS
3735
3736#if !defined(DART_PRECOMPILED_RUNTIME)
3738 public:
3740 : SerializationCluster("ExceptionHandlers", kExceptionHandlersCid) {}
3742
3743 void Trace(Serializer* s, ObjectPtr object) {
3744 ExceptionHandlersPtr handlers = ExceptionHandlers::RawCast(object);
3745 objects_.Add(handlers);
3746
3747 s->Push(handlers->untag()->handled_types_data());
3748 }
3749
3751 const intptr_t count = objects_.length();
3752 s->WriteUnsigned(count);
3753 for (intptr_t i = 0; i < count; i++) {
3754 ExceptionHandlersPtr handlers = objects_[i];
3755 s->AssignRef(handlers);
3756 AutoTraceObject(handlers);
3757 const intptr_t length = handlers->untag()->num_entries();
3758 s->WriteUnsigned(length);
3761 }
3762 }
3763
3765 const intptr_t count = objects_.length();
3766 for (intptr_t i = 0; i < count; i++) {
3767 ExceptionHandlersPtr handlers = objects_[i];
3768 AutoTraceObject(handlers);
3769 const intptr_t packed_fields = handlers->untag()->packed_fields_;
3770 const intptr_t length =
3772 s->WriteUnsigned(packed_fields);
3773 WriteCompressedField(handlers, handled_types_data);
3774 for (intptr_t j = 0; j < length; j++) {
3775 const ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3776 s->Write<uint32_t>(info.handler_pc_offset);
3777 s->Write<int16_t>(info.outer_try_index);
3778 s->Write<int8_t>(info.needs_stacktrace);
3779 s->Write<int8_t>(info.has_catch_all);
3780 s->Write<int8_t>(info.is_generated);
3781 }
3782 }
3783 }
3784
3785 private:
3787};
3788#endif // !DART_PRECOMPILED_RUNTIME
3789
3791 public:
3793 : DeserializationCluster("ExceptionHandlers") {}
3795
3796 void ReadAlloc(Deserializer* d) override {
3797 start_index_ = d->next_index();
3798 const intptr_t count = d->ReadUnsigned();
3799 for (intptr_t i = 0; i < count; i++) {
3800 const intptr_t length = d->ReadUnsigned();
3801 d->AssignRef(d->Allocate(ExceptionHandlers::InstanceSize(length)));
3802 }
3803 stop_index_ = d->next_index();
3804 }
3805
3806 void ReadFill(Deserializer* d_) override {
3808
3809 ASSERT(!is_canonical()); // Never canonical.
3810 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3811 ExceptionHandlersPtr handlers =
3812 static_cast<ExceptionHandlersPtr>(d.Ref(id));
3813 const intptr_t packed_fields = d.ReadUnsigned();
3814 const intptr_t length =
3816 Deserializer::InitializeHeader(handlers, kExceptionHandlersCid,
3818 handlers->untag()->packed_fields_ = packed_fields;
3819 handlers->untag()->handled_types_data_ =
3820 static_cast<ArrayPtr>(d.ReadRef());
3821 for (intptr_t j = 0; j < length; j++) {
3822 ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3823 info.handler_pc_offset = d.Read<uint32_t>();
3824 info.outer_try_index = d.Read<int16_t>();
3825 info.needs_stacktrace = d.Read<int8_t>();
3826 info.has_catch_all = d.Read<int8_t>();
3827 info.is_generated = d.Read<int8_t>();
3828 }
3829 }
3830 }
3831};
3832
3833#if !defined(DART_PRECOMPILED_RUNTIME)
3835 public:
3837 : SerializationCluster("Context", kContextCid) {}
3839
3840 void Trace(Serializer* s, ObjectPtr object) {
3841 ContextPtr context = Context::RawCast(object);
3842 objects_.Add(context);
3843
3844 s->Push(context->untag()->parent());
3845 const intptr_t length = context->untag()->num_variables_;
3846 for (intptr_t i = 0; i < length; i++) {
3847 s->Push(context->untag()->element(i));
3848 }
3849 }
3850
3852 const intptr_t count = objects_.length();
3853 s->WriteUnsigned(count);
3854 for (intptr_t i = 0; i < count; i++) {
3855 ContextPtr context = objects_[i];
3856 s->AssignRef(context);
3857 AutoTraceObject(context);
3858 const intptr_t length = context->untag()->num_variables_;
3859 s->WriteUnsigned(length);
3861 }
3862 }
3863
3865 const intptr_t count = objects_.length();
3866 for (intptr_t i = 0; i < count; i++) {
3867 ContextPtr context = objects_[i];
3868 AutoTraceObject(context);
3869 const intptr_t length = context->untag()->num_variables_;
3870 s->WriteUnsigned(length);
3871 WriteField(context, parent());
3872 for (intptr_t j = 0; j < length; j++) {
3873 s->WriteElementRef(context->untag()->element(j), j);
3874 }
3875 }
3876 }
3877
3878 private:
3880};
3881#endif // !DART_PRECOMPILED_RUNTIME
3882
3884 public:
3887
3888 void ReadAlloc(Deserializer* d) override {
3889 start_index_ = d->next_index();
3890 const intptr_t count = d->ReadUnsigned();
3891 for (intptr_t i = 0; i < count; i++) {
3892 const intptr_t length = d->ReadUnsigned();
3893 d->AssignRef(d->Allocate(Context::InstanceSize(length)));
3894 }
3895 stop_index_ = d->next_index();
3896 }
3897
3898 void ReadFill(Deserializer* d_) override {
3900
3901 ASSERT(!is_canonical()); // Never canonical.
3902 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3903 ContextPtr context = static_cast<ContextPtr>(d.Ref(id));
3904 const intptr_t length = d.ReadUnsigned();
3905 Deserializer::InitializeHeader(context, kContextCid,
3907 context->untag()->num_variables_ = length;
3908 context->untag()->parent_ = static_cast<ContextPtr>(d.ReadRef());
3909 for (intptr_t j = 0; j < length; j++) {
3910 context->untag()->data()[j] = d.ReadRef();
3911 }
3912 }
3913 }
3914};
3915
3916#if !defined(DART_PRECOMPILED_RUNTIME)
3918 public:
3920 : SerializationCluster("ContextScope", kContextScopeCid) {}
3922
3923 void Trace(Serializer* s, ObjectPtr object) {
3924 ContextScopePtr scope = ContextScope::RawCast(object);
3925 objects_.Add(scope);
3926
3927 const intptr_t length = scope->untag()->num_variables_;
3928 PushFromTo(scope, length);
3929 }
3930
3932 const intptr_t count = objects_.length();
3933 s->WriteUnsigned(count);
3934 for (intptr_t i = 0; i < count; i++) {
3935 ContextScopePtr scope = objects_[i];
3936 s->AssignRef(scope);
3937 AutoTraceObject(scope);
3938 const intptr_t length = scope->untag()->num_variables_;
3939 s->WriteUnsigned(length);
3942 }
3943 }
3944
3946 const intptr_t count = objects_.length();
3947 for (intptr_t i = 0; i < count; i++) {
3948 ContextScopePtr scope = objects_[i];
3949 AutoTraceObject(scope);
3950 const intptr_t length = scope->untag()->num_variables_;
3951 s->WriteUnsigned(length);
3952 s->Write<bool>(scope->untag()->is_implicit_);
3953 WriteFromTo(scope, length);
3954 }
3955 }
3956
3957 private:
3959};
3960#endif // !DART_PRECOMPILED_RUNTIME
3961
3963 public:
3965 : DeserializationCluster("ContextScope") {}
3967
3968 void ReadAlloc(Deserializer* d) override {
3969 start_index_ = d->next_index();
3970 const intptr_t count = d->ReadUnsigned();
3971 for (intptr_t i = 0; i < count; i++) {
3972 const intptr_t length = d->ReadUnsigned();
3973 d->AssignRef(d->Allocate(ContextScope::InstanceSize(length)));
3974 }
3975 stop_index_ = d->next_index();
3976 }
3977
3978 void ReadFill(Deserializer* d_) override {
3980
3981 ASSERT(!is_canonical()); // Never canonical.
3982 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3983 ContextScopePtr scope = static_cast<ContextScopePtr>(d.Ref(id));
3984 const intptr_t length = d.ReadUnsigned();
3985 Deserializer::InitializeHeader(scope, kContextScopeCid,
3987 scope->untag()->num_variables_ = length;
3988 scope->untag()->is_implicit_ = d.Read<bool>();
3989 d.ReadFromTo(scope, length);
3990 }
3991 }
3992};
3993
3994#if !defined(DART_PRECOMPILED_RUNTIME)
3996 public:
3998 : SerializationCluster("UnlinkedCall",
3999 kUnlinkedCallCid,
4000 compiler::target::UnlinkedCall::InstanceSize()) {}
4002
4003 void Trace(Serializer* s, ObjectPtr object) {
4004 UnlinkedCallPtr unlinked = UnlinkedCall::RawCast(object);
4005 objects_.Add(unlinked);
4006 PushFromTo(unlinked);
4007 }
4008
4010 const intptr_t count = objects_.length();
4011 s->WriteUnsigned(count);
4012 for (intptr_t i = 0; i < count; i++) {
4013 UnlinkedCallPtr unlinked = objects_[i];
4014 s->AssignRef(unlinked);
4015 }
4016 }
4017
4019 const intptr_t count = objects_.length();
4020 for (intptr_t i = 0; i < count; i++) {
4021 UnlinkedCallPtr unlinked = objects_[i];
4022 AutoTraceObjectName(unlinked, unlinked->untag()->target_name_);
4023 WriteFromTo(unlinked);
4024 s->Write<bool>(unlinked->untag()->can_patch_to_monomorphic_);
4025 }
4026 }
4027
4028 private:
4030};
4031#endif // !DART_PRECOMPILED_RUNTIME
4032
4034 public:
4036 : DeserializationCluster("UnlinkedCall") {}
4038
4039 void ReadAlloc(Deserializer* d) override {
4041 }
4042
4043 void ReadFill(Deserializer* d_) override {
4045
4046 ASSERT(!is_canonical()); // Never canonical.
4047 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4048 UnlinkedCallPtr unlinked = static_cast<UnlinkedCallPtr>(d.Ref(id));
4049 Deserializer::InitializeHeader(unlinked, kUnlinkedCallCid,
4051 d.ReadFromTo(unlinked);
4052 unlinked->untag()->can_patch_to_monomorphic_ = d.Read<bool>();
4053 }
4054 }
4055};
4056
4057#if !defined(DART_PRECOMPILED_RUNTIME)
4059 public:
4061 : SerializationCluster("ICData",
4062 kICDataCid,
4063 compiler::target::ICData::InstanceSize()) {}
4065
4066 void Trace(Serializer* s, ObjectPtr object) {
4067 ICDataPtr ic = ICData::RawCast(object);
4068 objects_.Add(ic);
4069 PushFromTo(ic);
4070 }
4071
4073 const intptr_t count = objects_.length();
4074 s->WriteUnsigned(count);
4075 for (intptr_t i = 0; i < count; i++) {
4076 ICDataPtr ic = objects_[i];
4077 s->AssignRef(ic);
4078 }
4079 }
4080
4082 Snapshot::Kind kind = s->kind();
4083 const intptr_t count = objects_.length();
4084 for (intptr_t i = 0; i < count; i++) {
4085 ICDataPtr ic = objects_[i];
4086 AutoTraceObjectName(ic, ic->untag()->target_name_);
4087 WriteFromTo(ic);
4088 if (kind != Snapshot::kFullAOT) {
4089 NOT_IN_PRECOMPILED(s->Write<int32_t>(ic->untag()->deopt_id_));
4090 }
4091 s->Write<uint32_t>(ic->untag()->state_bits_);
4092 }
4093 }
4094
4095 private:
4096 GrowableArray<ICDataPtr> objects_;
4097};
4098#endif // !DART_PRECOMPILED_RUNTIME
4099
4101 public:
4104
4105 void ReadAlloc(Deserializer* d) override {
4107 }
4108
4109 void ReadFill(Deserializer* d_) override {
4111
4112 ASSERT(!is_canonical()); // Never canonical.
4113 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4114 ICDataPtr ic = static_cast<ICDataPtr>(d.Ref(id));
4116 d.ReadFromTo(ic);
4117 NOT_IN_PRECOMPILED(ic->untag()->deopt_id_ = d.Read<int32_t>());
4118 ic->untag()->state_bits_ = d.Read<int32_t>();
4119 }
4120 }
4121};
4122
4123#if !defined(DART_PRECOMPILED_RUNTIME)
4125 public:
4128 "MegamorphicCache",
4129 kMegamorphicCacheCid,
4130 compiler::target::MegamorphicCache::InstanceSize()) {}
4132
4133 void Trace(Serializer* s, ObjectPtr object) {
4134 MegamorphicCachePtr cache = MegamorphicCache::RawCast(object);
4135 objects_.Add(cache);
4137 }
4138
4140 const intptr_t count = objects_.length();
4141 s->WriteUnsigned(count);
4142 for (intptr_t i = 0; i < count; i++) {
4143 MegamorphicCachePtr cache = objects_[i];
4144 s->AssignRef(cache);
4145 }
4146 }
4147
4149 const intptr_t count = objects_.length();
4150 for (intptr_t i = 0; i < count; i++) {
4151 MegamorphicCachePtr cache = objects_[i];
4152 AutoTraceObjectName(cache, cache->untag()->target_name_);
4154 s->Write<int32_t>(cache->untag()->filled_entry_count_);
4155 }
4156 }
4157
4158 private:
4160};
4161#endif // !DART_PRECOMPILED_RUNTIME
4162
4164 public:
4166 : DeserializationCluster("MegamorphicCache") {}
4168
4169 void ReadAlloc(Deserializer* d) override {
4171 }
4172
4173 void ReadFill(Deserializer* d_) override {
4175
4176 ASSERT(!is_canonical()); // Never canonical.
4177 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4178 MegamorphicCachePtr cache = static_cast<MegamorphicCachePtr>(d.Ref(id));
4179 Deserializer::InitializeHeader(cache, kMegamorphicCacheCid,
4181 d.ReadFromTo(cache);
4182 cache->untag()->filled_entry_count_ = d.Read<int32_t>();
4183 }
4184 }
4185};
4186
4187#if !defined(DART_PRECOMPILED_RUNTIME)
4189 public:
4192 "SubtypeTestCache",
4193 kSubtypeTestCacheCid,
4194 compiler::target::SubtypeTestCache::InstanceSize()) {}
4196
4197 void Trace(Serializer* s, ObjectPtr object) {
4198 SubtypeTestCachePtr cache = SubtypeTestCache::RawCast(object);
4199 objects_.Add(cache);
4200 s->Push(cache->untag()->cache_);
4201 }
4202
4204 const intptr_t count = objects_.length();
4205 s->WriteUnsigned(count);
4206 for (intptr_t i = 0; i < count; i++) {
4207 SubtypeTestCachePtr cache = objects_[i];
4208 s->AssignRef(cache);
4209 }
4210 }
4211
4213 const intptr_t count = objects_.length();
4214 for (intptr_t i = 0; i < count; i++) {
4215 SubtypeTestCachePtr cache = objects_[i];
4217 WriteField(cache, cache_);
4218 s->Write<uint32_t>(cache->untag()->num_inputs_);
4219 s->Write<uint32_t>(cache->untag()->num_occupied_);
4220 }
4221 }
4222
4223 private:
4225};
4226#endif // !DART_PRECOMPILED_RUNTIME
4227
4229 public:
4231 : DeserializationCluster("SubtypeTestCache") {}
4233
4234 void ReadAlloc(Deserializer* d) override {
4236 }
4237
4238 void ReadFill(Deserializer* d_) override {
4240
4241 ASSERT(!is_canonical()); // Never canonical.
4242 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4243 SubtypeTestCachePtr cache = static_cast<SubtypeTestCachePtr>(d.Ref(id));
4244 Deserializer::InitializeHeader(cache, kSubtypeTestCacheCid,
4246 cache->untag()->cache_ = static_cast<ArrayPtr>(d.ReadRef());
4247 cache->untag()->num_inputs_ = d.Read<uint32_t>();
4248 cache->untag()->num_occupied_ = d.Read<uint32_t>();
4249 }
4250 }
4251};
4252
4253#if !defined(DART_PRECOMPILED_RUNTIME)
4255 public:
4257 : SerializationCluster("LoadingUnit",
4258 kLoadingUnitCid,
4259 compiler::target::LoadingUnit::InstanceSize()) {}
4261
4262 void Trace(Serializer* s, ObjectPtr object) {
4263 LoadingUnitPtr unit = LoadingUnit::RawCast(object);
4264 objects_.Add(unit);
4265 s->Push(unit->untag()->parent());
4266 }
4267
4269 const intptr_t count = objects_.length();
4270 s->WriteUnsigned(count);
4271 for (intptr_t i = 0; i < count; i++) {
4272 LoadingUnitPtr unit = objects_[i];
4273 s->AssignRef(unit);
4274 }
4275 }
4276
4278 const intptr_t count = objects_.length();
4279 for (intptr_t i = 0; i < count; i++) {
4280 LoadingUnitPtr unit = objects_[i];
4281 AutoTraceObject(unit);
4282 WriteCompressedField(unit, parent);
4283 s->Write<intptr_t>(
4284 unit->untag()->packed_fields_.Read<UntaggedLoadingUnit::IdBits>());
4285 }
4286 }
4287
4288 private:
4290};
4291#endif // !DART_PRECOMPILED_RUNTIME
4292
4294 public:
4297
4298 void ReadAlloc(Deserializer* d) override {
4300 }
4301
4302 void ReadFill(Deserializer* d_) override {
4304
4305 ASSERT(!is_canonical()); // Never canonical.
4306 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4307 LoadingUnitPtr unit = static_cast<LoadingUnitPtr>(d.Ref(id));
4308 Deserializer::InitializeHeader(unit, kLoadingUnitCid,
4310 unit->untag()->parent_ = static_cast<LoadingUnitPtr>(d.ReadRef());
4311 unit->untag()->base_objects_ = Array::null();
4312 unit->untag()->instructions_image_ = nullptr;
4313 unit->untag()->packed_fields_ =
4315 UntaggedLoadingUnit::kNotLoaded) |
4316 UntaggedLoadingUnit::IdBits::encode(d.Read<intptr_t>());
4317 }
4318 }
4319};
4320
4321#if !defined(DART_PRECOMPILED_RUNTIME)
4323 public:
4325 : SerializationCluster("LanguageError",
4326 kLanguageErrorCid,
4327 compiler::target::LanguageError::InstanceSize()) {}
4329
4330 void Trace(Serializer* s, ObjectPtr object) {
4331 LanguageErrorPtr error = LanguageError::RawCast(object);
4332 objects_.Add(error);
4334 }
4335
4337 const intptr_t count = objects_.length();
4338 s->WriteUnsigned(count);
4339 for (intptr_t i = 0; i < count; i++) {
4340 LanguageErrorPtr error = objects_[i];
4341 s->AssignRef(error);
4342 }
4343 }
4344
4346 const intptr_t count = objects_.length();
4347 for (intptr_t i = 0; i < count; i++) {
4348 LanguageErrorPtr error = objects_[i];
4351 s->WriteTokenPosition(error->untag()->token_pos_);
4352 s->Write<bool>(error->untag()->report_after_token_);
4353 s->Write<int8_t>(error->untag()->kind_);
4354 }
4355 }
4356
4357 private:
4359};
4360#endif // !DART_PRECOMPILED_RUNTIME
4361
4363 public:
4365 : DeserializationCluster("LanguageError") {}
4367
4368 void ReadAlloc(Deserializer* d) override {
4370 }
4371
4372 void ReadFill(Deserializer* d_) override {
4374
4375 ASSERT(!is_canonical()); // Never canonical.
4376 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4377 LanguageErrorPtr error = static_cast<LanguageErrorPtr>(d.Ref(id));
4378 Deserializer::InitializeHeader(error, kLanguageErrorCid,
4380 d.ReadFromTo(error);
4381 error->untag()->token_pos_ = d.ReadTokenPosition();
4382 error->untag()->report_after_token_ = d.Read<bool>();
4383 error->untag()->kind_ = d.Read<int8_t>();
4384 }
4385 }
4386};
4387
4388#if !defined(DART_PRECOMPILED_RUNTIME)
4390 public:
4393 "UnhandledException",
4394 kUnhandledExceptionCid,
4395 compiler::target::UnhandledException::InstanceSize()) {}
4397
4398 void Trace(Serializer* s, ObjectPtr object) {
4399 UnhandledExceptionPtr exception = UnhandledException::RawCast(object);
4400 objects_.Add(exception);
4401 PushFromTo(exception);
4402 }
4403
4405 const intptr_t count = objects_.length();
4406 s->WriteUnsigned(count);
4407 for (intptr_t i = 0; i < count; i++) {
4408 UnhandledExceptionPtr exception = objects_[i];
4409 s->AssignRef(exception);
4410 }
4411 }
4412
4414 const intptr_t count = objects_.length();
4415 for (intptr_t i = 0; i < count; i++) {
4416 UnhandledExceptionPtr exception = objects_[i];
4417 AutoTraceObject(exception);
4418 WriteFromTo(exception);
4419 }
4420 }
4421
4422 private:
4424};
4425#endif // !DART_PRECOMPILED_RUNTIME
4426
4428 public:
4430 : DeserializationCluster("UnhandledException") {}
4432
4433 void ReadAlloc(Deserializer* d) override {
4435 }
4436
4437 void ReadFill(Deserializer* d_) override {
4439
4440 ASSERT(!is_canonical()); // Never canonical.
4441 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4442 UnhandledExceptionPtr exception =
4443 static_cast<UnhandledExceptionPtr>(d.Ref(id));
4444 Deserializer::InitializeHeader(exception, kUnhandledExceptionCid,
4446 d.ReadFromTo(exception);
4447 }
4448 }
4449};
4450
4451#if !defined(DART_PRECOMPILED_RUNTIME)
4453 public:
4456 ClassPtr cls = IsolateGroup::Current()->class_table()->At(cid);
4457 host_next_field_offset_in_words_ =
4458 cls->untag()->host_next_field_offset_in_words_;
4459 ASSERT(host_next_field_offset_in_words_ > 0);
4460#if defined(DART_PRECOMPILER)
4461 target_next_field_offset_in_words_ =
4462 cls->untag()->target_next_field_offset_in_words_;
4463 target_instance_size_in_words_ =
4464 cls->untag()->target_instance_size_in_words_;
4465#else
4466 target_next_field_offset_in_words_ =
4467 cls->untag()->host_next_field_offset_in_words_;
4468 target_instance_size_in_words_ = cls->untag()->host_instance_size_in_words_;
4469#endif // defined(DART_PRECOMPILER)
4470 ASSERT(target_next_field_offset_in_words_ > 0);
4471 ASSERT(target_instance_size_in_words_ > 0);
4472 }
4474
4475 void Trace(Serializer* s, ObjectPtr object) {
4476 InstancePtr instance = Instance::RawCast(object);
4477 objects_.Add(instance);
4478 const intptr_t next_field_offset = host_next_field_offset_in_words_
4480 const auto unboxed_fields_bitmap =
4481 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4482 intptr_t offset = Instance::NextFieldOffset();
4483 while (offset < next_field_offset) {
4484 // Skips unboxed fields
4485 if (!unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4486 ObjectPtr raw_obj =
4487 reinterpret_cast<CompressedObjectPtr*>(
4488 reinterpret_cast<uword>(instance->untag()) + offset)
4489 ->Decompress(instance->untag()->heap_base());
4490 s->Push(raw_obj);
4491 }
4493 }
4494 }
4495
4497 const intptr_t count = objects_.length();
4498 s->WriteUnsigned(count);
4499
4500 s->Write<int32_t>(target_next_field_offset_in_words_);
4501 s->Write<int32_t>(target_instance_size_in_words_);
4502
4503 for (intptr_t i = 0; i < count; i++) {
4504 InstancePtr instance = objects_[i];
4505 s->AssignRef(instance);
4506 }
4507
4508 const intptr_t instance_size = compiler::target::RoundedAllocationSize(
4509 target_instance_size_in_words_ * compiler::target::kCompressedWordSize);
4510 target_memory_size_ += instance_size * count;
4511 }
4512
4514 intptr_t next_field_offset = host_next_field_offset_in_words_
4516 const intptr_t count = objects_.length();
4517 s->WriteUnsigned64(CalculateTargetUnboxedFieldsBitmap(s, cid_).Value());
4518 const auto unboxed_fields_bitmap =
4519 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4520
4521 for (intptr_t i = 0; i < count; i++) {
4522 InstancePtr instance = objects_[i];
4524#if defined(DART_PRECOMPILER)
4525 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4526 ClassPtr cls = s->isolate_group()->class_table()->At(cid_);
4527 s->AttributePropertyRef(cls, "<class>");
4528 }
4529#endif
4530 intptr_t offset = Instance::NextFieldOffset();
4531 while (offset < next_field_offset) {
4532 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4533 // Writes 32 bits of the unboxed value at a time.
4534 const compressed_uword value = *reinterpret_cast<compressed_uword*>(
4535 reinterpret_cast<uword>(instance->untag()) + offset);
4536 s->WriteWordWith32BitWrites(value);
4537 } else {
4538 ObjectPtr raw_obj =
4539 reinterpret_cast<CompressedObjectPtr*>(
4540 reinterpret_cast<uword>(instance->untag()) + offset)
4541 ->Decompress(instance->untag()->heap_base());
4542 s->WriteElementRef(raw_obj, offset);
4543 }
4545 }
4546 }
4547 }
4548
4549 private:
4550 intptr_t host_next_field_offset_in_words_;
4551 intptr_t target_next_field_offset_in_words_;
4552 intptr_t target_instance_size_in_words_;
4554};
4555#endif // !DART_PRECOMPILED_RUNTIME
4556
4558 protected:
4560 bool is_canonical,
4561 bool is_root_unit)
4563 is_root_unit_(is_root_unit) {}
4564
4565 const bool is_root_unit_;
4566
4567 public:
4568#if defined(DART_PRECOMPILED_RUNTIME)
4569 void PostLoad(Deserializer* d, const Array& refs) override {
4570 if (!is_root_unit_ && is_canonical()) {
4572 d->isolate_group()->constant_canonicalization_mutex());
4573 Instance& instance = Instance::Handle(d->zone());
4574 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4575 instance ^= refs.At(i);
4576 instance = instance.CanonicalizeLocked(d->thread());
4577 refs.SetAt(i, instance);
4578 }
4579 }
4580 }
4581#endif
4582};
4583
4586 public:
4588 bool is_canonical,
4589 bool is_immutable,
4590 bool is_root_unit)
4593 is_root_unit),
4594 cid_(cid),
4595 is_immutable_(is_immutable) {}
4597
4598 void ReadAlloc(Deserializer* d) override {
4599 start_index_ = d->next_index();
4600 const intptr_t count = d->ReadUnsigned();
4601 next_field_offset_in_words_ = d->Read<int32_t>();
4602 instance_size_in_words_ = d->Read<int32_t>();
4603 intptr_t instance_size = Object::RoundedAllocationSize(
4604 instance_size_in_words_ * kCompressedWordSize);
4605 for (intptr_t i = 0; i < count; i++) {
4606 d->AssignRef(d->Allocate(instance_size));
4607 }
4608 stop_index_ = d->next_index();
4609 }
4610
4611 void ReadFill(Deserializer* d_) override {
4613
4614 const intptr_t cid = cid_;
4615 const bool mark_canonical = is_root_unit_ && is_canonical();
4616 const bool is_immutable = is_immutable_;
4617 intptr_t next_field_offset = next_field_offset_in_words_
4619 intptr_t instance_size = Object::RoundedAllocationSize(
4620 instance_size_in_words_ * kCompressedWordSize);
4621 const UnboxedFieldBitmap unboxed_fields_bitmap(d.ReadUnsigned64());
4622
4623 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4624 InstancePtr instance = static_cast<InstancePtr>(d.Ref(id));
4626 mark_canonical, is_immutable);
4627 intptr_t offset = Instance::NextFieldOffset();
4628 while (offset < next_field_offset) {
4629 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4630 compressed_uword* p = reinterpret_cast<compressed_uword*>(
4631 reinterpret_cast<uword>(instance->untag()) + offset);
4632 // Reads 32 bits of the unboxed value at a time
4633 *p = d.ReadWordWith32BitReads();
4634 } else {
4635 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4636 reinterpret_cast<uword>(instance->untag()) + offset);
4637 *p = d.ReadRef();
4638 }
4640 }
4641 while (offset < instance_size) {
4642 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4643 reinterpret_cast<uword>(instance->untag()) + offset);
4644 *p = Object::null();
4646 }
4647 ASSERT(offset == instance_size);
4648 }
4649 }
4650
4651 private:
4652 const intptr_t cid_;
4653 const bool is_immutable_;
4654 intptr_t next_field_offset_in_words_;
4655 intptr_t instance_size_in_words_;
4656};
4657
4658#if !defined(DART_PRECOMPILED_RUNTIME)
4660 public:
4662 : SerializationCluster("LibraryPrefix",
4663 kLibraryPrefixCid,
4664 compiler::target::LibraryPrefix::InstanceSize()) {}
4666
4667 void Trace(Serializer* s, ObjectPtr object) {
4668 LibraryPrefixPtr prefix = LibraryPrefix::RawCast(object);
4669 objects_.Add(prefix);
4671 }
4672
4674 const intptr_t count = objects_.length();
4675 s->WriteUnsigned(count);
4676 for (intptr_t i = 0; i < count; i++) {
4677 LibraryPrefixPtr prefix = objects_[i];
4678 s->AssignRef(prefix);
4679 }
4680 }
4681
4683 const intptr_t count = objects_.length();
4684 for (intptr_t i = 0; i < count; i++) {
4685 LibraryPrefixPtr prefix = objects_[i];
4688 s->Write<uint16_t>(prefix->untag()->num_imports_);
4689 s->Write<bool>(prefix->untag()->is_deferred_load_);
4690 }
4691 }
4692
4693 private:
4695};
4696#endif // !DART_PRECOMPILED_RUNTIME
4697
4699 public:
4701 : DeserializationCluster("LibraryPrefix") {}
4703
4704 void ReadAlloc(Deserializer* d) override {
4706 }
4707
4708 void ReadFill(Deserializer* d_) override {
4710
4711 ASSERT(!is_canonical()); // Never canonical.
4712 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4713 LibraryPrefixPtr prefix = static_cast<LibraryPrefixPtr>(d.Ref(id));
4714 Deserializer::InitializeHeader(prefix, kLibraryPrefixCid,
4716 d.ReadFromTo(prefix);
4717 prefix->untag()->num_imports_ = d.Read<uint16_t>();
4718 prefix->untag()->is_deferred_load_ = d.Read<bool>();
4719 }
4720 }
4721};
4722
4723#if !defined(DART_PRECOMPILED_RUNTIME)
4726 CanonicalTypeSet,
4727 Type,
4728 TypePtr,
4729 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4730 public:
4731 TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
4733 kTypeCid,
4735 represents_canonical_set,
4736 "Type",
4737 compiler::target::Type::InstanceSize()) {}
4739
4740 void Trace(Serializer* s, ObjectPtr object) {
4741 TypePtr type = Type::RawCast(object);
4742 objects_.Add(type);
4743
4745
4746 ASSERT(type->untag()->type_class_id() != kIllegalCid);
4747 ClassPtr type_class =
4748 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4749 s->Push(type_class);
4750 }
4751
4753 intptr_t count = objects_.length();
4754 s->WriteUnsigned(count);
4756 for (intptr_t i = 0; i < count; i++) {
4757 TypePtr type = objects_[i];
4758 s->AssignRef(type);
4759 }
4761 }
4762
4764 intptr_t count = objects_.length();
4765 for (intptr_t i = 0; i < count; i++) {
4766 WriteType(s, objects_[i]);
4767 }
4768 }
4769
4770 private:
4771 Type& type_ = Type::Handle();
4772 Class& cls_ = Class::Handle();
4773
4774 // Type::Canonicalize does not actually put all canonical Type objects into
4775 // canonical_types set. Some of the canonical declaration types (but not all
4776 // of them) are simply cached in UntaggedClass::declaration_type_ and are not
4777 // inserted into the canonical_types set.
4778 // Keep in sync with Type::Canonicalize.
4779 virtual bool IsInCanonicalSet(Serializer* s, TypePtr type) {
4780 ClassPtr type_class =
4781 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4782 if (type_class->untag()->declaration_type() != type) {
4783 return true;
4784 }
4785
4786 type_ = type;
4787 cls_ = type_class;
4788 return !type_.IsDeclarationTypeOf(cls_);
4789 }
4790
4791 void WriteType(Serializer* s, TypePtr type) {
4793#if defined(DART_PRECOMPILER)
4794 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4795 ClassPtr type_class =
4796 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4797 s->AttributePropertyRef(type_class, "<type_class>");
4798 }
4799#endif
4801 s->WriteUnsigned(type->untag()->flags());
4802 }
4803};
4804#endif // !DART_PRECOMPILED_RUNTIME
4805
4808 CanonicalTypeSet,
4809 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4810 public:
4811 explicit TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
4812 : CanonicalSetDeserializationCluster(is_canonical, is_root_unit, "Type") {
4813 }
4815
4816 void ReadAlloc(Deserializer* d) override {
4819 }
4820
4821 void ReadFill(Deserializer* d_) override {
4823
4824 const bool mark_canonical = is_root_unit_ && is_canonical();
4825 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4826 TypePtr type = static_cast<TypePtr>(d.Ref(id));
4828 mark_canonical);
4829 d.ReadFromTo(type);
4830 type->untag()->set_flags(d.ReadUnsigned());
4831 }
4832 }
4833
4834 void PostLoad(Deserializer* d, const Array& refs) override {
4835 if (!table_.IsNull()) {
4836 auto object_store = d->isolate_group()->object_store();
4837 VerifyCanonicalSet(d, refs,
4838 Array::Handle(object_store->canonical_types()));
4839 object_store->set_canonical_types(table_);
4840 } else if (!is_root_unit_ && is_canonical()) {
4842 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4843 type ^= refs.At(i);
4844 type = type.Canonicalize(d->thread());
4845 refs.SetAt(i, type);
4846 }
4847 }
4848
4849 Type& type = Type::Handle(d->zone());
4850 Code& stub = Code::Handle(d->zone());
4851
4852 if (Snapshot::IncludesCode(d->kind())) {
4853 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4854 type ^= refs.At(id);
4855 type.UpdateTypeTestingStubEntryPoint();
4856 }
4857 } else {
4858 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4859 type ^= refs.At(id);
4861 type.InitializeTypeTestingStubNonAtomic(stub);
4862 }
4863 }
4864 }
4865};
4866
4867#if !defined(DART_PRECOMPILED_RUNTIME)
4869 : public CanonicalSetSerializationCluster<CanonicalFunctionTypeSet,
4870 FunctionType,
4871 FunctionTypePtr> {
4872 public:
4874 bool represents_canonical_set)
4876 kFunctionTypeCid,
4878 represents_canonical_set,
4879 "FunctionType",
4880 compiler::target::FunctionType::InstanceSize()) {}
4882
4883 void Trace(Serializer* s, ObjectPtr object) {
4884 FunctionTypePtr type = FunctionType::RawCast(object);
4885 objects_.Add(type);
4887 }
4888
4890 intptr_t count = objects_.length();
4891 s->WriteUnsigned(count);
4893
4894 for (intptr_t i = 0; i < count; i++) {
4895 FunctionTypePtr type = objects_[i];
4896 s->AssignRef(type);
4897 }
4899 }
4900
4902 intptr_t count = objects_.length();
4903 for (intptr_t i = 0; i < count; i++) {
4904 WriteFunctionType(s, objects_[i]);
4905 }
4906 }
4907
4908 private:
4909 void WriteFunctionType(Serializer* s, FunctionTypePtr type) {
4912 ASSERT(Utils::IsUint(8, type->untag()->flags()));
4913 s->Write<uint8_t>(type->untag()->flags());
4914 s->Write<uint32_t>(type->untag()->packed_parameter_counts_);
4915 s->Write<uint16_t>(type->untag()->packed_type_parameter_counts_);
4916 }
4917};
4918#endif // !DART_PRECOMPILED_RUNTIME
4919
4921 : public CanonicalSetDeserializationCluster<CanonicalFunctionTypeSet> {
4922 public:
4924 bool is_root_unit)
4926 is_root_unit,
4927 "FunctionType") {}
4929
4930 void ReadAlloc(Deserializer* d) override {
4933 }
4934
4935 void ReadFill(Deserializer* d_) override {
4937
4938 const bool mark_canonical = is_root_unit_ && is_canonical();
4939 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4940 FunctionTypePtr type = static_cast<FunctionTypePtr>(d.Ref(id));
4942 type, kFunctionTypeCid, FunctionType::InstanceSize(), mark_canonical);
4943 d.ReadFromTo(type);
4944 type->untag()->set_flags(d.Read<uint8_t>());
4945 type->untag()->packed_parameter_counts_ = d.Read<uint32_t>();
4946 type->untag()->packed_type_parameter_counts_ = d.Read<uint16_t>();
4947 }
4948 }
4949
4950 void PostLoad(Deserializer* d, const Array& refs) override {
4951 if (!table_.IsNull()) {
4952 auto object_store = d->isolate_group()->object_store();
4954 d, refs, Array::Handle(object_store->canonical_function_types()));
4955 object_store->set_canonical_function_types(table_);
4956 } else if (!is_root_unit_ && is_canonical()) {
4958 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4959 type ^= refs.At(i);
4960 type = type.Canonicalize(d->thread());
4961 refs.SetAt(i, type);
4962 }
4963 }
4964
4966 Code& stub = Code::Handle(d->zone());
4967
4968 if (Snapshot::IncludesCode(d->kind())) {
4969 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4970 type ^= refs.At(id);
4971 type.UpdateTypeTestingStubEntryPoint();
4972 }
4973 } else {
4974 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4975 type ^= refs.At(id);
4977 type.InitializeTypeTestingStubNonAtomic(stub);
4978 }
4979 }
4980 }
4981};
4982
4983#if !defined(DART_PRECOMPILED_RUNTIME)
4985 : public CanonicalSetSerializationCluster<CanonicalRecordTypeSet,
4986 RecordType,
4987 RecordTypePtr> {
4988 public:
4990 bool represents_canonical_set)
4992 kRecordTypeCid,
4994 represents_canonical_set,
4995 "RecordType",
4996 compiler::target::RecordType::InstanceSize()) {}
4998
4999 void Trace(Serializer* s, ObjectPtr object) {
5000 RecordTypePtr type = RecordType::RawCast(object);
5001 objects_.Add(type);
5003 }
5004
5006 intptr_t count = objects_.length();
5007 s->WriteUnsigned(count);
5009
5010 for (intptr_t i = 0; i < count; i++) {
5011 RecordTypePtr type = objects_[i];
5012 s->AssignRef(type);
5013 }
5015 }
5016
5018 intptr_t count = objects_.length();
5019 for (intptr_t i = 0; i < count; i++) {
5020 WriteRecordType(s, objects_[i]);
5021 }
5022 }
5023
5024 private:
5025 void WriteRecordType(Serializer* s, RecordTypePtr type) {
5028 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5029 s->Write<uint8_t>(type->untag()->flags());
5030 }
5031};
5032#endif // !DART_PRECOMPILED_RUNTIME
5033
5035 : public CanonicalSetDeserializationCluster<CanonicalRecordTypeSet> {
5036 public:
5039 is_root_unit,
5040 "RecordType") {}
5042
5043 void ReadAlloc(Deserializer* d) override {
5046 }
5047
5048 void ReadFill(Deserializer* d_) override {
5050
5051 const bool mark_canonical = is_root_unit_ && is_canonical();
5052 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5053 RecordTypePtr type = static_cast<RecordTypePtr>(d.Ref(id));
5055 type, kRecordTypeCid, RecordType::InstanceSize(), mark_canonical);
5056 d.ReadFromTo(type);
5057 type->untag()->set_flags(d.Read<uint8_t>());
5058 }
5059 }
5060
5061 void PostLoad(Deserializer* d, const Array& refs) override {
5062 if (!table_.IsNull()) {
5063 auto object_store = d->isolate_group()->object_store();
5064 VerifyCanonicalSet(d, refs,
5065 Array::Handle(object_store->canonical_record_types()));
5066 object_store->set_canonical_record_types(table_);
5067 } else if (!is_root_unit_ && is_canonical()) {
5069 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5070 type ^= refs.At(i);
5071 type = type.Canonicalize(d->thread());
5072 refs.SetAt(i, type);
5073 }
5074 }
5075
5076 RecordType& type = RecordType::Handle(d->zone());
5077 Code& stub = Code::Handle(d->zone());
5078
5079 if (Snapshot::IncludesCode(d->kind())) {
5080 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5081 type ^= refs.At(id);
5082 type.UpdateTypeTestingStubEntryPoint();
5083 }
5084 } else {
5085 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5086 type ^= refs.At(id);
5088 type.InitializeTypeTestingStubNonAtomic(stub);
5089 }
5090 }
5091 }
5092};
5093
5094#if !defined(DART_PRECOMPILED_RUNTIME)
5096 : public CanonicalSetSerializationCluster<CanonicalTypeParameterSet,
5097 TypeParameter,
5098 TypeParameterPtr> {
5099 public:
5101 bool cluster_represents_canonical_set)
5103 kTypeParameterCid,
5105 cluster_represents_canonical_set,
5106 "TypeParameter",
5107 compiler::target::TypeParameter::InstanceSize()) {}
5109
5110 void Trace(Serializer* s, ObjectPtr object) {
5111 TypeParameterPtr type = TypeParameter::RawCast(object);
5112 objects_.Add(type);
5113
5115 }
5116
5118 intptr_t count = objects_.length();
5119 s->WriteUnsigned(count);
5121 for (intptr_t i = 0; i < count; i++) {
5122 TypeParameterPtr type = objects_[i];
5123 s->AssignRef(type);
5124 }
5126 }
5127
5129 intptr_t count = objects_.length();
5130 for (intptr_t i = 0; i < count; i++) {
5131 WriteTypeParameter(s, objects_[i]);
5132 }
5133 }
5134
5135 private:
5136 void WriteTypeParameter(Serializer* s, TypeParameterPtr type) {
5139 s->Write<uint16_t>(type->untag()->base_);
5140 s->Write<uint16_t>(type->untag()->index_);
5141 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5142 s->Write<uint8_t>(type->untag()->flags());
5143 }
5144};
5145#endif // !DART_PRECOMPILED_RUNTIME
5146
5148 : public CanonicalSetDeserializationCluster<CanonicalTypeParameterSet> {
5149 public:
5151 bool is_root_unit)
5153 is_root_unit,
5154 "TypeParameter") {}
5156
5157 void ReadAlloc(Deserializer* d) override {
5160 }
5161
5162 void ReadFill(Deserializer* d_) override {
5164
5165 const bool mark_canonical = is_root_unit_ && is_canonical();
5166 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5167 TypeParameterPtr type = static_cast<TypeParameterPtr>(d.Ref(id));
5168 Deserializer::InitializeHeader(type, kTypeParameterCid,
5170 mark_canonical);
5171 d.ReadFromTo(type);
5172 type->untag()->base_ = d.Read<uint16_t>();
5173 type->untag()->index_ = d.Read<uint16_t>();
5174 type->untag()->set_flags(d.Read<uint8_t>());
5175 }
5176 }
5177
5178 void PostLoad(Deserializer* d, const Array& refs) override {
5179 if (!table_.IsNull()) {
5180 auto object_store = d->isolate_group()->object_store();
5182 d, refs, Array::Handle(object_store->canonical_type_parameters()));
5183 object_store->set_canonical_type_parameters(table_);
5184 } else if (!is_root_unit_ && is_canonical()) {
5185 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5186 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5187 type_param ^= refs.At(i);
5188 type_param ^= type_param.Canonicalize(d->thread());
5189 refs.SetAt(i, type_param);
5190 }
5191 }
5192
5193 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5194 Code& stub = Code::Handle(d->zone());
5195
5196 if (Snapshot::IncludesCode(d->kind())) {
5197 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5198 type_param ^= refs.At(id);
5200 }
5201 } else {
5202 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5203 type_param ^= refs.At(id);
5205 type_param.InitializeTypeTestingStubNonAtomic(stub);
5206 }
5207 }
5208 }
5209};
5210
5211#if !defined(DART_PRECOMPILED_RUNTIME)
5213 public:
5215 : SerializationCluster("Closure",
5216 kClosureCid,
5217 compiler::target::Closure::InstanceSize(),
5218 is_canonical) {}
5220
5221 void Trace(Serializer* s, ObjectPtr object) {
5222 ClosurePtr closure = Closure::RawCast(object);
5223 objects_.Add(closure);
5225 }
5226
5228 const intptr_t count = objects_.length();
5229 s->WriteUnsigned(count);
5230 for (intptr_t i = 0; i < count; i++) {
5231 ClosurePtr closure = objects_[i];
5232 s->AssignRef(closure);
5233 }
5234 }
5235
5237 const intptr_t count = objects_.length();
5238 for (intptr_t i = 0; i < count; i++) {
5239 ClosurePtr closure = objects_[i];
5242 }
5243 }
5244
5245 private:
5247};
5248#endif // !DART_PRECOMPILED_RUNTIME
5249
5252 public:
5253 explicit ClosureDeserializationCluster(bool is_canonical, bool is_root_unit)
5256 is_root_unit) {}
5258
5259 void ReadAlloc(Deserializer* d) override {
5261 }
5262
5263 void ReadFill(Deserializer* d_) override {
5265
5266 const bool mark_canonical = is_root_unit_ && is_canonical();
5267 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5268 ClosurePtr closure = static_cast<ClosurePtr>(d.Ref(id));
5270 Closure::InstanceSize(), mark_canonical);
5271 d.ReadFromTo(closure);
5272#if defined(DART_PRECOMPILED_RUNTIME)
5273 closure->untag()->entry_point_ = 0;
5274#endif
5275 }
5276 }
5277
5278#if defined(DART_PRECOMPILED_RUNTIME)
5279 void PostLoad(Deserializer* d, const Array& refs) override {
5280 // We only cache the entry point in bare instructions mode (as we need
5281 // to load the function anyway otherwise).
5282 ASSERT(d->kind() == Snapshot::kFullAOT);
5283 auto& closure = Closure::Handle(d->zone());
5284 auto& func = Function::Handle(d->zone());
5285 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5286 closure ^= refs.At(i);
5287 func = closure.function();
5288 uword entry_point = func.entry_point();
5289 ASSERT(entry_point != 0);
5290 closure.ptr()->untag()->entry_point_ = entry_point;
5291 }
5292 }
5293#endif
5294};
5295
5296#if !defined(DART_PRECOMPILED_RUNTIME)
5298 public:
5300 : SerializationCluster("int", kMintCid, kSizeVaries, is_canonical) {}
5302
5303 void Trace(Serializer* s, ObjectPtr object) {
5304 if (!object->IsHeapObject()) {
5305 SmiPtr smi = Smi::RawCast(object);
5306 smis_.Add(smi);
5307 } else {
5308 MintPtr mint = Mint::RawCast(object);
5309 mints_.Add(mint);
5310 }
5311 }
5312
5314 s->WriteUnsigned(smis_.length() + mints_.length());
5315 for (intptr_t i = 0; i < smis_.length(); i++) {
5316 SmiPtr smi = smis_[i];
5317 s->AssignRef(smi);
5318 AutoTraceObject(smi);
5319 const int64_t value = Smi::Value(smi);
5320 s->Write<int64_t>(value);
5321 if (!Smi::IsValid(value)) {
5322 // This Smi will become a Mint when loaded.
5324 }
5325 }
5326 for (intptr_t i = 0; i < mints_.length(); i++) {
5327 MintPtr mint = mints_[i];
5328 s->AssignRef(mint);
5329 AutoTraceObject(mint);
5330 s->Write<int64_t>(mint->untag()->value_);
5331 // All Mints on the host should be Mints on the target.
5332 ASSERT(!Smi::IsValid(mint->untag()->value_));
5334 }
5335 }
5336
5338
5339 private:
5342};
5343#endif // !DART_PRECOMPILED_RUNTIME
5344
5347 public:
5348 explicit MintDeserializationCluster(bool is_canonical, bool is_root_unit)
5351 is_root_unit) {}
5353
5354 void ReadAlloc(Deserializer* d) override {
5355 start_index_ = d->next_index();
5356 const intptr_t count = d->ReadUnsigned();
5357 const bool mark_canonical = is_canonical();
5358 for (intptr_t i = 0; i < count; i++) {
5359 int64_t value = d->Read<int64_t>();
5360 if (Smi::IsValid(value)) {
5361 d->AssignRef(Smi::New(value));
5362 } else {
5363 MintPtr mint = static_cast<MintPtr>(d->Allocate(Mint::InstanceSize()));
5365 mark_canonical);
5366 mint->untag()->value_ = value;
5367 d->AssignRef(mint);
5368 }
5369 }
5370 stop_index_ = d->next_index();
5371 }
5372
5373 void ReadFill(Deserializer* d_) override { Deserializer::Local d(d_); }
5374};
5375
5376#if !defined(DART_PRECOMPILED_RUNTIME)
5378 public:
5380 : SerializationCluster("double",
5381 kDoubleCid,
5382 compiler::target::Double::InstanceSize(),
5383 is_canonical) {}
5385
5386 void Trace(Serializer* s, ObjectPtr object) {
5387 DoublePtr dbl = Double::RawCast(object);
5388 objects_.Add(dbl);
5389 }
5390
5392 const intptr_t count = objects_.length();
5393 s->WriteUnsigned(count);
5394 for (intptr_t i = 0; i < count; i++) {
5395 DoublePtr dbl = objects_[i];
5396 s->AssignRef(dbl);
5397 }
5398 }
5399
5401 const intptr_t count = objects_.length();
5402 for (intptr_t i = 0; i < count; i++) {
5403 DoublePtr dbl = objects_[i];
5404 AutoTraceObject(dbl);
5405 s->Write<double>(dbl->untag()->value_);
5406 }
5407 }
5408
5409 private:
5410 GrowableArray<DoublePtr> objects_;
5411};
5412#endif // !DART_PRECOMPILED_RUNTIME
5413
5416 public:
5417 explicit DoubleDeserializationCluster(bool is_canonical, bool is_root_unit)
5420 is_root_unit) {}
5422
5423 void ReadAlloc(Deserializer* d) override {
5425 }
5426
5427 void ReadFill(Deserializer* d_) override {
5429 const bool mark_canonical = is_root_unit_ && is_canonical();
5430 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5431 DoublePtr dbl = static_cast<DoublePtr>(d.Ref(id));
5433 mark_canonical);
5434 dbl->untag()->value_ = d.Read<double>();
5435 }
5436 }
5437};
5438
5439#if !defined(DART_PRECOMPILED_RUNTIME)
5441 public:
5443 : SerializationCluster("Simd128",
5444 cid,
5445 compiler::target::Int32x4::InstanceSize(),
5446 is_canonical) {
5451 }
5453
5454 void Trace(Serializer* s, ObjectPtr object) { objects_.Add(object); }
5455
5457 const intptr_t count = objects_.length();
5458 s->WriteUnsigned(count);
5459 for (intptr_t i = 0; i < count; i++) {
5460 ObjectPtr vector = objects_[i];
5461 s->AssignRef(vector);
5462 }
5463 }
5464
5466 const intptr_t count = objects_.length();
5467 for (intptr_t i = 0; i < count; i++) {
5468 ObjectPtr vector = objects_[i];
5469 AutoTraceObject(vector);
5472 s->WriteBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5473 sizeof(simd128_value_t));
5474 }
5475 }
5476
5477 private:
5478 GrowableArray<ObjectPtr> objects_;
5479};
5480#endif // !DART_PRECOMPILED_RUNTIME
5481
5484 public:
5486 bool is_canonical,
5487 bool is_root_unit)
5490 is_root_unit),
5491 cid_(cid) {}
5493
5494 void ReadAlloc(Deserializer* d) override {
5498 }
5499
5500 void ReadFill(Deserializer* d_) override {
5502 const intptr_t cid = cid_;
5503 const bool mark_canonical = is_root_unit_ && is_canonical();
5504 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5505 ObjectPtr vector = d.Ref(id);
5507 mark_canonical);
5508 d.ReadBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5509 sizeof(simd128_value_t));
5510 }
5511 }
5512
5513 private:
5514 intptr_t cid_;
5515};
5516
5517#if !defined(DART_PRECOMPILED_RUNTIME)
5519 public:
5522 "GrowableObjectArray",
5523 kGrowableObjectArrayCid,
5524 compiler::target::GrowableObjectArray::InstanceSize()) {}
5526
5527 void Trace(Serializer* s, ObjectPtr object) {
5528 GrowableObjectArrayPtr array = GrowableObjectArray::RawCast(object);
5529 objects_.Add(array);
5530 PushFromTo(array);
5531 }
5532
5534 const intptr_t count = objects_.length();
5535 s->WriteUnsigned(count);
5536 for (intptr_t i = 0; i < count; i++) {
5537 GrowableObjectArrayPtr array = objects_[i];
5538 s->AssignRef(array);
5539 }
5540 }
5541
5543 const intptr_t count = objects_.length();
5544 for (intptr_t i = 0; i < count; i++) {
5545 GrowableObjectArrayPtr array = objects_[i];
5546 AutoTraceObject(array);
5547 WriteFromTo(array);
5548 }
5549 }
5550
5551 private:
5553};
5554#endif // !DART_PRECOMPILED_RUNTIME
5555
5557 : public DeserializationCluster {
5558 public:
5560 : DeserializationCluster("GrowableObjectArray") {}
5562
5563 void ReadAlloc(Deserializer* d) override {
5565 }
5566
5567 void ReadFill(Deserializer* d_) override {
5569
5570 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5571 GrowableObjectArrayPtr list =
5572 static_cast<GrowableObjectArrayPtr>(d.Ref(id));
5573 Deserializer::InitializeHeader(list, kGrowableObjectArrayCid,
5575 d.ReadFromTo(list);
5576 }
5577 }
5578};
5579
5580#if !defined(DART_PRECOMPILED_RUNTIME)
5582 public:
5584 : SerializationCluster("Record", kRecordCid, kSizeVaries, is_canonical) {}
5586
5587 void Trace(Serializer* s, ObjectPtr object) {
5588 RecordPtr record = Record::RawCast(object);
5589 objects_.Add(record);
5590
5591 const intptr_t num_fields = Record::NumFields(record);
5592 for (intptr_t i = 0; i < num_fields; ++i) {
5593 s->Push(record->untag()->field(i));
5594 }
5595 }
5596
5598 const intptr_t count = objects_.length();
5599 s->WriteUnsigned(count);
5600 for (intptr_t i = 0; i < count; ++i) {
5601 RecordPtr record = objects_[i];
5602 s->AssignRef(record);
5603 AutoTraceObject(record);
5604 const intptr_t num_fields = Record::NumFields(record);
5605 s->WriteUnsigned(num_fields);
5607 }
5608 }
5609
5611 const intptr_t count = objects_.length();
5612 for (intptr_t i = 0; i < count; ++i) {
5613 RecordPtr record = objects_[i];
5614 AutoTraceObject(record);
5615 const RecordShape shape(record->untag()->shape());
5616 s->WriteUnsigned(shape.AsInt());
5617 const intptr_t num_fields = shape.num_fields();
5618 for (intptr_t j = 0; j < num_fields; ++j) {
5619 s->WriteElementRef(record->untag()->field(j), j);
5620 }
5621 }
5622 }
5623
5624 private:
5625 GrowableArray<RecordPtr> objects_;
5626};
5627#endif // !DART_PRECOMPILED_RUNTIME
5628
5631 public:
5632 explicit RecordDeserializationCluster(bool is_canonical, bool is_root_unit)
5635 is_root_unit) {}
5637
5638 void ReadAlloc(Deserializer* d) override {
5639 start_index_ = d->next_index();
5640 const intptr_t count = d->ReadUnsigned();
5641 for (intptr_t i = 0; i < count; i++) {
5642 const intptr_t num_fields = d->ReadUnsigned();
5643 d->AssignRef(d->Allocate(Record::InstanceSize(num_fields)));
5644 }
5645 stop_index_ = d->next_index();
5646 }
5647
5648 void ReadFill(Deserializer* d_) override {
5650
5651 const bool stamp_canonical = is_root_unit_ && is_canonical();
5652 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5653 RecordPtr record = static_cast<RecordPtr>(d.Ref(id));
5654 const intptr_t shape = d.ReadUnsigned();
5655 const intptr_t num_fields = RecordShape(shape).num_fields();
5656 Deserializer::InitializeHeader(record, kRecordCid,
5657 Record::InstanceSize(num_fields),
5658 stamp_canonical);
5659 record->untag()->shape_ = Smi::New(shape);
5660 for (intptr_t j = 0; j < num_fields; ++j) {
5661 record->untag()->data()[j] = d.ReadRef();
5662 }
5663 }
5664 }
5665};
5666
5667#if !defined(DART_PRECOMPILED_RUNTIME)
5669 public:
5671 : SerializationCluster("TypedData", cid) {}
5673
5674 void Trace(Serializer* s, ObjectPtr object) {
5675 TypedDataPtr data = TypedData::RawCast(object);
5676 objects_.Add(data);
5677 }
5678
5680 const intptr_t count = objects_.length();
5681 s->WriteUnsigned(count);
5683 for (intptr_t i = 0; i < count; i++) {
5684 TypedDataPtr data = objects_[i];
5685 s->AssignRef(data);
5687 const intptr_t length = Smi::Value(data->untag()->length());
5688 s->WriteUnsigned(length);
5691 }
5692 }
5693
5695 const intptr_t count = objects_.length();
5697 for (intptr_t i = 0; i < count; i++) {
5698 TypedDataPtr data = objects_[i];
5700 const intptr_t length = Smi::Value(data->untag()->length());
5701 s->WriteUnsigned(length);
5702 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5703 s->WriteBytes(cdata, length * element_size);
5704 }
5705 }
5706
5707 private:
5709};
5710#endif // !DART_PRECOMPILED_RUNTIME
5711
5713 public:
5715 : DeserializationCluster("TypedData"), cid_(cid) {}
5717
5718 void ReadAlloc(Deserializer* d) override {
5719 start_index_ = d->next_index();
5720 const intptr_t count = d->ReadUnsigned();
5722 for (intptr_t i = 0; i < count; i++) {
5723 const intptr_t length = d->ReadUnsigned();
5724 d->AssignRef(d->Allocate(TypedData::InstanceSize(length * element_size)));
5725 }
5726 stop_index_ = d->next_index();
5727 }
5728
5729 void ReadFill(Deserializer* d_) override {
5731
5732 ASSERT(!is_canonical()); // Never canonical.
5734
5735 const intptr_t cid = cid_;
5736 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5737 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5738 const intptr_t length = d.ReadUnsigned();
5739 const intptr_t length_in_bytes = length * element_size;
5741 TypedData::InstanceSize(length_in_bytes));
5742 data->untag()->length_ = Smi::New(length);
5743 data->untag()->RecomputeDataField();
5744 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5745 d.ReadBytes(cdata, length_in_bytes);
5746 }
5747 }
5748
5749 private:
5750 const intptr_t cid_;
5751};
5752
5753#if !defined(DART_PRECOMPILED_RUNTIME)
5755 public:
5757 : SerializationCluster("TypedDataView",
5758 cid,
5759 compiler::target::TypedDataView::InstanceSize()) {}
5761
5762 void Trace(Serializer* s, ObjectPtr object) {
5763 TypedDataViewPtr view = TypedDataView::RawCast(object);
5764 objects_.Add(view);
5765
5766 PushFromTo(view);
5767 }
5768
5770 const intptr_t count = objects_.length();
5771 s->WriteUnsigned(count);
5772 for (intptr_t i = 0; i < count; i++) {
5773 TypedDataViewPtr view = objects_[i];
5774 s->AssignRef(view);
5775 }
5776 }
5777
5779 const intptr_t count = objects_.length();
5780 for (intptr_t i = 0; i < count; i++) {
5781 TypedDataViewPtr view = objects_[i];
5782 AutoTraceObject(view);
5783 WriteFromTo(view);
5784 }
5785 }
5786
5787 private:
5789};
5790#endif // !DART_PRECOMPILED_RUNTIME
5791
5793 public:
5795 : DeserializationCluster("TypedDataView"), cid_(cid) {}
5797
5798 void ReadAlloc(Deserializer* d) override {
5800 }
5801
5802 void ReadFill(Deserializer* d_) override {
5804
5805 const intptr_t cid = cid_;
5806 ASSERT(!is_canonical()); // Never canonical.
5807 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5808 TypedDataViewPtr view = static_cast<TypedDataViewPtr>(d.Ref(id));
5810 d.ReadFromTo(view);
5811 }
5812 }
5813
5814 void PostLoad(Deserializer* d, const Array& refs) override {
5815 auto& view = TypedDataView::Handle(d->zone());
5816 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5817 view ^= refs.At(id);
5818 view.RecomputeDataField();
5819 }
5820 }
5821
5822 private:
5823 const intptr_t cid_;
5824};
5825
5826#if !defined(DART_PRECOMPILED_RUNTIME)
5828 public:
5831 "ExternalTypedData",
5832 cid,
5833 compiler::target::ExternalTypedData::InstanceSize()) {}
5835
5836 void Trace(Serializer* s, ObjectPtr object) {
5837 ExternalTypedDataPtr data = ExternalTypedData::RawCast(object);
5838 objects_.Add(data);
5839 }
5840
5842 const intptr_t count = objects_.length();
5843 s->WriteUnsigned(count);
5844 for (intptr_t i = 0; i < count; i++) {
5845 ExternalTypedDataPtr data = objects_[i];
5846 s->AssignRef(data);
5847 }
5848 }
5849
5851 const intptr_t count = objects_.length();
5853 for (intptr_t i = 0; i < count; i++) {
5854 ExternalTypedDataPtr data = objects_[i];
5856 const intptr_t length = Smi::Value(data->untag()->length());
5857 s->WriteUnsigned(length);
5858 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data_);
5860 s->WriteBytes(cdata, length * element_size);
5861 }
5862 }
5863
5864 private:
5866};
5867#endif // !DART_PRECOMPILED_RUNTIME
5868
5870 public:
5872 : DeserializationCluster("ExternalTypedData"), cid_(cid) {}
5874
5875 void ReadAlloc(Deserializer* d) override {
5877 }
5878
5879 void ReadFill(Deserializer* d_) override {
5881
5882 ASSERT(!is_canonical()); // Never canonical.
5883 const intptr_t cid = cid_;
5885 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5886 ExternalTypedDataPtr data = static_cast<ExternalTypedDataPtr>(d.Ref(id));
5887 const intptr_t length = d.ReadUnsigned();
5890 data->untag()->length_ = Smi::New(length);
5892 data->untag()->data_ = const_cast<uint8_t*>(d.AddressOfCurrentPosition());
5893 d.Advance(length * element_size);
5894 // No finalizer / external size 0.
5895 }
5896 }
5897
5898 private:
5899 const intptr_t cid_;
5900};
5901
5902#if !defined(DART_PRECOMPILED_RUNTIME)
5904 public:
5906 : SerializationCluster("DeltaEncodedTypedData",
5907 kDeltaEncodedTypedDataCid) {}
5909
5910 void Trace(Serializer* s, ObjectPtr object) {
5911 TypedDataPtr data = TypedData::RawCast(object);
5912 objects_.Add(data);
5913 }
5914
5916 const intptr_t count = objects_.length();
5917 s->WriteUnsigned(count);
5918 for (intptr_t i = 0; i < count; i++) {
5919 const TypedDataPtr data = objects_[i];
5920 const intptr_t element_size =
5921 TypedData::ElementSizeInBytes(data->GetClassId());
5922 s->AssignRef(data);
5924 const intptr_t length_in_bytes =
5925 Smi::Value(data->untag()->length()) * element_size;
5926 s->WriteUnsigned(length_in_bytes);
5929 }
5930 }
5931
5933 const intptr_t count = objects_.length();
5934 TypedData& typed_data = TypedData::Handle(s->zone());
5935 for (intptr_t i = 0; i < count; i++) {
5936 const TypedDataPtr data = objects_[i];
5938 const intptr_t cid = data->GetClassId();
5939 // Only Uint16 and Uint32 typed data is supported at the moment. So encode
5940 // which this is in the low bit of the length. Uint16 is 0, Uint32 is 1.
5941 ASSERT(cid == kTypedDataUint16ArrayCid ||
5942 cid == kTypedDataUint32ArrayCid);
5943 const intptr_t cid_flag = cid == kTypedDataUint16ArrayCid ? 0 : 1;
5944 const intptr_t length = Smi::Value(data->untag()->length());
5945 const intptr_t encoded_length = (length << 1) | cid_flag;
5946 s->WriteUnsigned(encoded_length);
5947 intptr_t prev = 0;
5948 typed_data = data;
5949 for (intptr_t j = 0; j < length; ++j) {
5950 const intptr_t value = (cid == kTypedDataUint16ArrayCid)
5951 ? typed_data.GetUint16(j << 1)
5952 : typed_data.GetUint32(j << 2);
5953 ASSERT(value >= prev);
5954 s->WriteUnsigned(value - prev);
5955 prev = value;
5956 }
5957 }
5958 }
5959
5960 private:
5962};
5963#endif // !DART_PRECOMPILED_RUNTIME
5964
5966 : public DeserializationCluster {
5967 public:
5969 : DeserializationCluster("DeltaEncodedTypedData") {}
5971
5972 void ReadAlloc(Deserializer* d) override {
5973 start_index_ = d->next_index();
5974 const intptr_t count = d->ReadUnsigned();
5975 for (intptr_t i = 0; i < count; i++) {
5976 const intptr_t length_in_bytes = d->ReadUnsigned();
5977 d->AssignRef(d->Allocate(TypedData::InstanceSize(length_in_bytes)));
5978 }
5979 stop_index_ = d->next_index();
5980 }
5981
5982 void ReadFill(Deserializer* d_) override {
5984 TypedData& typed_data = TypedData::Handle(d_->zone());
5985
5986 ASSERT(!is_canonical()); // Never canonical.
5987
5988 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5989 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5990 const intptr_t encoded_length = d.ReadUnsigned();
5991 const intptr_t length = encoded_length >> 1;
5992 const intptr_t cid = (encoded_length & 0x1) == 0
5993 ? kTypedDataUint16ArrayCid
5994 : kTypedDataUint32ArrayCid;
5996 const intptr_t length_in_bytes = length * element_size;
5998 TypedData::InstanceSize(length_in_bytes));
5999 data->untag()->length_ = Smi::New(length);
6000 data->untag()->RecomputeDataField();
6001 intptr_t value = 0;
6002 typed_data = data;
6003 for (intptr_t j = 0; j < length; ++j) {
6004 value += d.ReadUnsigned();
6005 if (cid == kTypedDataUint16ArrayCid) {
6006 typed_data.SetUint16(j << 1, static_cast<uint16_t>(value));
6007 } else {
6008 typed_data.SetUint32(j << 2, value);
6009 }
6010 }
6011 }
6012 }
6013};
6014
6015#if !defined(DART_PRECOMPILED_RUNTIME)
6017 public:
6019 : SerializationCluster("StackTrace",
6020 kStackTraceCid,
6021 compiler::target::StackTrace::InstanceSize()) {}
6023
6024 void Trace(Serializer* s, ObjectPtr object) {
6025 StackTracePtr trace = StackTrace::RawCast(object);
6026 objects_.Add(trace);
6027 PushFromTo(trace);
6028 }
6029
6031 const intptr_t count = objects_.length();
6032 s->WriteUnsigned(count);
6033 for (intptr_t i = 0; i < count; i++) {
6034 StackTracePtr trace = objects_[i];
6035 s->AssignRef(trace);
6036 }
6037 }
6038
6040 const intptr_t count = objects_.length();
6041 for (intptr_t i = 0; i < count; i++) {
6042 StackTracePtr trace = objects_[i];
6043 AutoTraceObject(trace);
6044 WriteFromTo(trace);
6045 }
6046 }
6047
6048 private:
6050};
6051#endif // !DART_PRECOMPILED_RUNTIME
6052
6054 public:
6057
6058 void ReadAlloc(Deserializer* d) override {
6060 }
6061
6062 void ReadFill(Deserializer* d_) override {
6064
6065 ASSERT(!is_canonical()); // Never canonical.
6066 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6067 StackTracePtr trace = static_cast<StackTracePtr>(d.Ref(id));
6068 Deserializer::InitializeHeader(trace, kStackTraceCid,
6070 d.ReadFromTo(trace);
6071 }
6072 }
6073};
6074
6075#if !defined(DART_PRECOMPILED_RUNTIME)
6077 public:
6079 : SerializationCluster("RegExp",
6080 kRegExpCid,
6081 compiler::target::RegExp::InstanceSize()) {}
6083
6084 void Trace(Serializer* s, ObjectPtr object) {
6085 RegExpPtr regexp = RegExp::RawCast(object);
6086 objects_.Add(regexp);
6087 PushFromTo(regexp);
6088 }
6089
6091 const intptr_t count = objects_.length();
6092 s->WriteUnsigned(count);
6093 for (intptr_t i = 0; i < count; i++) {
6094 RegExpPtr regexp = objects_[i];
6095 s->AssignRef(regexp);
6096 }
6097 }
6098
6100 const intptr_t count = objects_.length();
6101 for (intptr_t i = 0; i < count; i++) {
6102 RegExpPtr regexp = objects_[i];
6103 AutoTraceObject(regexp);
6104 WriteFromTo(regexp);
6105 s->Write<int32_t>(regexp->untag()->num_one_byte_registers_);
6106 s->Write<int32_t>(regexp->untag()->num_two_byte_registers_);
6107 s->Write<int8_t>(regexp->untag()->type_flags_);
6108 }
6109 }
6110
6111 private:
6112 GrowableArray<RegExpPtr> objects_;
6113};
6114#endif // !DART_PRECOMPILED_RUNTIME
6115
6117 public:
6120
6121 void ReadAlloc(Deserializer* d) override {
6123 }
6124
6125 void ReadFill(Deserializer* d_) override {
6127
6128 ASSERT(!is_canonical()); // Never canonical.
6129 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6130 RegExpPtr regexp = static_cast<RegExpPtr>(d.Ref(id));
6131 Deserializer::InitializeHeader(regexp, kRegExpCid,
6133 d.ReadFromTo(regexp);
6134 regexp->untag()->num_one_byte_registers_ = d.Read<int32_t>();
6135 regexp->untag()->num_two_byte_registers_ = d.Read<int32_t>();
6136 regexp->untag()->type_flags_ = d.Read<int8_t>();
6137 }
6138 }
6139};
6140
6141#if !defined(DART_PRECOMPILED_RUNTIME)
6143 public:
6145 : SerializationCluster("WeakProperty",
6146 kWeakPropertyCid,
6147 compiler::target::WeakProperty::InstanceSize()) {}
6149
6150 void Trace(Serializer* s, ObjectPtr object) {
6151 WeakPropertyPtr property = WeakProperty::RawCast(object);
6152 objects_.Add(property);
6153
6154 s->PushWeak(property->untag()->key());
6155 }
6156
6158 for (intptr_t i = 0; i < objects_.length(); i++) {
6159 WeakPropertyPtr property = objects_[i];
6160 if (s->IsReachable(property->untag()->key())) {
6161 s->Push(property->untag()->value());
6162 }
6163 }
6164 }
6165
6167 const intptr_t count = objects_.length();
6168 s->WriteUnsigned(count);
6169 for (intptr_t i = 0; i < count; i++) {
6170 WeakPropertyPtr property = objects_[i];
6171 s->AssignRef(property);
6172 }
6173 }
6174
6176 const intptr_t count = objects_.length();
6177 for (intptr_t i = 0; i < count; i++) {
6178 WeakPropertyPtr property = objects_[i];
6179 AutoTraceObject(property);
6180 if (s->HasRef(property->untag()->key())) {
6181 s->WriteOffsetRef(property->untag()->key(), WeakProperty::key_offset());
6182 s->WriteOffsetRef(property->untag()->value(),
6184 } else {
6185 s->WriteOffsetRef(Object::null(), WeakProperty::key_offset());
6186 s->WriteOffsetRef(Object::null(), WeakProperty::value_offset());
6187 }
6188 }
6189 }
6190
6191 private:
6193};
6194#endif // !DART_PRECOMPILED_RUNTIME
6195
6197 public:
6199 : DeserializationCluster("WeakProperty") {}
6201
6202 void ReadAlloc(Deserializer* d) override {
6204 }
6205
6206 void ReadFill(Deserializer* d_) override {
6208
6209 ASSERT(!is_canonical()); // Never canonical.
6210 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6211 WeakPropertyPtr property = static_cast<WeakPropertyPtr>(d.Ref(id));
6212 Deserializer::InitializeHeader(property, kWeakPropertyCid,
6214 d.ReadFromTo(property);
6215 property->untag()->next_seen_by_gc_ = WeakProperty::null();
6216 }
6217 }
6218};
6219
6220#if !defined(DART_PRECOMPILED_RUNTIME)
6222 public:
6224 : SerializationCluster("Map",
6225 cid,
6226 compiler::target::Map::InstanceSize(),
6227 is_canonical) {}
6229
6230 void Trace(Serializer* s, ObjectPtr object) {
6231 MapPtr map = Map::RawCast(object);
6232 // We never have mutable hashmaps in snapshots.
6233 ASSERT(map->untag()->IsCanonical());
6234 ASSERT_EQUAL(map.GetClassId(), kConstMapCid);
6235 objects_.Add(map);
6236 PushFromTo(map);
6237 }
6238
6240 const intptr_t count = objects_.length();
6241 s->WriteUnsigned(count);
6242 for (intptr_t i = 0; i < count; i++) {
6243 MapPtr map = objects_[i];
6244 s->AssignRef(map);
6245 }
6246 }
6247
6249 const intptr_t count = objects_.length();
6250 for (intptr_t i = 0; i < count; i++) {
6251 MapPtr map = objects_[i];
6254 }
6255 }
6256
6257 private:
6258 GrowableArray<MapPtr> objects_;
6259};
6260#endif // !DART_PRECOMPILED_RUNTIME
6261
6264 public:
6266 bool is_canonical,
6267 bool is_root_unit)
6270 is_root_unit),
6271 cid_(cid) {}
6273
6274 void ReadAlloc(Deserializer* d) override {
6276 }
6277
6278 void ReadFill(Deserializer* d_) override {
6280
6281 const intptr_t cid = cid_;
6282 const bool mark_canonical = is_root_unit_ && is_canonical();
6283 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6284 MapPtr map = static_cast<MapPtr>(d.Ref(id));
6286 mark_canonical);
6287 d.ReadFromTo(map);
6288 }
6289 }
6290
6291 private:
6292 const intptr_t cid_;
6293};
6294
6295#if !defined(DART_PRECOMPILED_RUNTIME)
6297 public:
6299 : SerializationCluster("Set",
6300 cid,
6301 compiler::target::Set::InstanceSize(),
6302 is_canonical) {}
6304
6305 void Trace(Serializer* s, ObjectPtr object) {
6306 SetPtr set = Set::RawCast(object);
6307 // We never have mutable hashsets in snapshots.
6308 ASSERT(set->untag()->IsCanonical());
6309 ASSERT_EQUAL(set.GetClassId(), kConstSetCid);
6310 objects_.Add(set);
6311 PushFromTo(set);
6312 }
6313
6315 const intptr_t count = objects_.length();
6316 s->WriteUnsigned(count);
6317 for (intptr_t i = 0; i < count; i++) {
6318 SetPtr set = objects_[i];
6319 s->AssignRef(set);
6320 }
6321 }
6322
6324 const intptr_t count = objects_.length();
6325 for (intptr_t i = 0; i < count; i++) {
6326 SetPtr set = objects_[i];
6329 }
6330 }
6331
6332 private:
6333 GrowableArray<SetPtr> objects_;
6334};
6335#endif // !DART_PRECOMPILED_RUNTIME
6336
6339 public:
6341 bool is_canonical,
6342 bool is_root_unit)
6345 is_root_unit),
6346 cid_(cid) {}
6348
6349 void ReadAlloc(Deserializer* d) override {
6351 }
6352
6353 void ReadFill(Deserializer* d_) override {
6355
6356 const intptr_t cid = cid_;
6357 const bool mark_canonical = is_root_unit_ && is_canonical();
6358 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6359 SetPtr set = static_cast<SetPtr>(d.Ref(id));
6361 mark_canonical);
6362 d.ReadFromTo(set);
6363 }
6364 }
6365
6366 private:
6367 const intptr_t cid_;
6368};
6369
6370#if !defined(DART_PRECOMPILED_RUNTIME)
6372 public:
6376
6377 void Trace(Serializer* s, ObjectPtr object) {
6378 ArrayPtr array = Array::RawCast(object);
6379 objects_.Add(array);
6380
6381 s->Push(array->untag()->type_arguments());
6382 const intptr_t length = Smi::Value(array->untag()->length());
6383 for (intptr_t i = 0; i < length; i++) {
6384 s->Push(array->untag()->element(i));
6385 }
6386 }
6387
6388#if defined(DART_PRECOMPILER)
6389 static bool IsReadOnlyCid(intptr_t cid) {
6390 switch (cid) {
6391 case kPcDescriptorsCid:
6392 case kCodeSourceMapCid:
6393 case kCompressedStackMapsCid:
6394 case kOneByteStringCid:
6395 case kTwoByteStringCid:
6396 return true;
6397 default:
6398 return false;
6399 }
6400 }
6401#endif // defined(DART_PRECOMPILER)
6402
6404#if defined(DART_PRECOMPILER)
6405 if (FLAG_print_array_optimization_candidates) {
6406 intptr_t array_count = objects_.length();
6407 intptr_t array_count_allsmi = 0;
6408 intptr_t array_count_allro = 0;
6409 intptr_t array_count_empty = 0;
6410 intptr_t element_count = 0;
6411 intptr_t element_count_allsmi = 0;
6412 intptr_t element_count_allro = 0;
6413 for (intptr_t i = 0; i < array_count; i++) {
6414 ArrayPtr array = objects_[i];
6415 bool allsmi = true;
6416 bool allro = true;
6417 const intptr_t length = Smi::Value(array->untag()->length());
6418 for (intptr_t i = 0; i < length; i++) {
6419 ObjectPtr element = array->untag()->element(i);
6420 intptr_t cid = element->GetClassIdMayBeSmi();
6421 if (!IsReadOnlyCid(cid)) allro = false;
6422 if (cid != kSmiCid) allsmi = false;
6423 }
6424 element_count += length;
6425 if (length == 0) {
6426 array_count_empty++;
6427 } else if (allsmi) {
6428 array_count_allsmi++;
6429 element_count_allsmi += length;
6430 } else if (allro) {
6431 array_count_allro++;
6432 element_count_allro += length;
6433 }
6434 }
6435 OS::PrintErr("Arrays\n");
6436 OS::PrintErr(" total: %" Pd ", % " Pd " elements\n", array_count,
6437 element_count);
6438 OS::PrintErr(" smi-only:%" Pd ", % " Pd " elements\n",
6439 array_count_allsmi, element_count_allsmi);
6440 OS::PrintErr(" ro-only:%" Pd " , % " Pd " elements\n", array_count_allro,
6441 element_count_allro);
6442 OS::PrintErr(" empty:%" Pd "\n", array_count_empty);
6443 }
6444#endif // defined(DART_PRECOMPILER)
6445
6446 const intptr_t count = objects_.length();
6447 s->WriteUnsigned(count);
6448 for (intptr_t i = 0; i < count; i++) {
6449 ArrayPtr array = objects_[i];
6450 s->AssignRef(array);
6451 AutoTraceObject(array);
6452 const intptr_t length = Smi::Value(array->untag()->length());
6453 s->WriteUnsigned(length);
6455 }
6456 }
6457
6459 const intptr_t count = objects_.length();
6460 for (intptr_t i = 0; i < count; i++) {
6461 ArrayPtr array = objects_[i];
6462 AutoTraceObject(array);
6463 const intptr_t length = Smi::Value(array->untag()->length());
6464 s->WriteUnsigned(length);
6465 WriteCompressedField(array, type_arguments);
6466 for (intptr_t j = 0; j < length; j++) {
6467 s->WriteElementRef(array->untag()->element(j), j);
6468 }
6469 }
6470 }
6471
6472 private:
6473 GrowableArray<ArrayPtr> objects_;
6474};
6475#endif // !DART_PRECOMPILED_RUNTIME
6476
6479 public:
6481 bool is_canonical,
6482 bool is_root_unit)
6485 is_root_unit),
6486 cid_(cid) {}
6488
6489 void ReadAlloc(Deserializer* d) override {
6490 start_index_ = d->next_index();
6491 const intptr_t count = d->ReadUnsigned();
6492 for (intptr_t i = 0; i < count; i++) {
6493 const intptr_t length = d->ReadUnsigned();
6494 d->AssignRef(d->Allocate(Array::InstanceSize(length)));
6495 }
6496 stop_index_ = d->next_index();
6497 }
6498
6499 void ReadFill(Deserializer* d_) override {
6501
6502 const intptr_t cid = cid_;
6503 const bool stamp_canonical = is_root_unit_ && is_canonical();
6504 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6505 ArrayPtr array = static_cast<ArrayPtr>(d.Ref(id));
6506 const intptr_t length = d.ReadUnsigned();
6508 stamp_canonical);
6510 array->untag()->SetCardRememberedBitUnsynchronized();
6511 }
6512 array->untag()->type_arguments_ =
6513 static_cast<TypeArgumentsPtr>(d.ReadRef());
6514 array->untag()->length_ = Smi::New(length);
6515 for (intptr_t j = 0; j < length; j++) {
6516 array->untag()->data()[j] = d.ReadRef();
6517 }
6518 }
6519 }
6520
6521 private:
6522 const intptr_t cid_;
6523};
6524
6525#if !defined(DART_PRECOMPILED_RUNTIME)
6527 public:
6529 : SerializationCluster("WeakArray", kWeakArrayCid, kSizeVaries) {}
6531
6532 void Trace(Serializer* s, ObjectPtr object) {
6533 WeakArrayPtr array = WeakArray::RawCast(object);
6534 objects_.Add(array);
6535
6536 const intptr_t length = Smi::Value(array->untag()->length());
6537 for (intptr_t i = 0; i < length; i++) {
6538 s->PushWeak(array->untag()->element(i));
6539 }
6540 }
6541
6543 const intptr_t count = objects_.length();
6544 s->WriteUnsigned(count);
6545 for (intptr_t i = 0; i < count; i++) {
6546 WeakArrayPtr array = objects_[i];
6547 s->AssignRef(array);
6548 AutoTraceObject(array);
6549 const intptr_t length = Smi::Value(array->untag()->length());
6550 s->WriteUnsigned(length);
6552 }
6553 }
6554
6556 const intptr_t count = objects_.length();
6557 for (intptr_t i = 0; i < count; i++) {
6558 WeakArrayPtr array = objects_[i];
6559 AutoTraceObject(array);
6560 const intptr_t length = Smi::Value(array->untag()->length());
6561 s->WriteUnsigned(length);
6562 for (intptr_t j = 0; j < length; j++) {
6563 if (s->HasRef(array->untag()->element(j))) {
6564 s->WriteElementRef(array->untag()->element(j), j);
6565 } else {
6566 s->WriteElementRef(Object::null(), j);
6567 }
6568 }
6569 }
6570 }
6571
6572 private:
6574};
6575#endif // !DART_PRECOMPILED_RUNTIME
6576
6578 public:
6581
6582 void ReadAlloc(Deserializer* d) override {
6583 start_index_ = d->next_index();
6584 const intptr_t count = d->ReadUnsigned();
6585 for (intptr_t i = 0; i < count; i++) {
6586 const intptr_t length = d->ReadUnsigned();
6587 d->AssignRef(d->Allocate(WeakArray::InstanceSize(length)));
6588 }
6589 stop_index_ = d->next_index();
6590 }
6591
6592 void ReadFill(Deserializer* d_) override {
6594
6595 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6596 WeakArrayPtr array = static_cast<WeakArrayPtr>(d.Ref(id));
6597 const intptr_t length = d.ReadUnsigned();
6598 Deserializer::InitializeHeader(array, kWeakArrayCid,
6600 array->untag()->next_seen_by_gc_ = WeakArray::null();
6601 array->untag()->length_ = Smi::New(length);
6602 for (intptr_t j = 0; j < length; j++) {
6603 array->untag()->data()[j] = d.ReadRef();
6604 }
6605 }
6606 }
6607};
6608
6609#if !defined(DART_PRECOMPILED_RUNTIME)
6611 : public CanonicalSetSerializationCluster<CanonicalStringSet,
6612 String,
6613 StringPtr> {
6614 public:
6615 // To distinguish one and two byte strings, we put a bit in the length to
6616 // indicate which it is. The length is an unsigned SMI, so we actually have
6617 // two spare bits available. Keep in sync with DecodeLengthAndCid.
6618 static intptr_t EncodeLengthAndCid(intptr_t length, intptr_t cid) {
6619 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
6621 return (length << 1) | (cid == kTwoByteStringCid ? 0x1 : 0x0);
6622 }
6623
6625 bool represents_canonical_set)
6628 represents_canonical_set,
6629 "String",
6630 kSizeVaries) {}
6632
6633 void Trace(Serializer* s, ObjectPtr object) {
6634 StringPtr str = static_cast<StringPtr>(object);
6635 objects_.Add(str);
6636 }
6637
6639 const intptr_t count = objects_.length();
6640 s->WriteUnsigned(count);
6642 for (intptr_t i = 0; i < count; i++) {
6643 StringPtr str = objects_[i];
6644 s->AssignRef(str);
6645 AutoTraceObject(str);
6646 const intptr_t cid = str->GetClassId();
6647 const intptr_t length = Smi::Value(str->untag()->length());
6648 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6649 s->WriteUnsigned(encoded);
6651 cid == kOneByteStringCid
6654 }
6656 }
6657
6659 const intptr_t count = objects_.length();
6660 for (intptr_t i = 0; i < count; i++) {
6661 StringPtr str = objects_[i];
6662 AutoTraceObject(str);
6663 const intptr_t cid = str->GetClassId();
6664 const intptr_t length = Smi::Value(str->untag()->length());
6665 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6666 s->WriteUnsigned(encoded);
6667 if (cid == kOneByteStringCid) {
6668 s->WriteBytes(static_cast<OneByteStringPtr>(str)->untag()->data(),
6669 length);
6670 } else {
6671 s->WriteBytes(reinterpret_cast<uint8_t*>(
6672 static_cast<TwoByteStringPtr>(str)->untag()->data()),
6673 length * 2);
6674 }
6675 }
6676 }
6677};
6678#endif // !DART_PRECOMPILED_RUNTIME
6679
6681 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
6682 public:
6683 static intptr_t DecodeLengthAndCid(intptr_t encoded, intptr_t* out_cid) {
6684 *out_cid = (encoded & 0x1) != 0 ? kTwoByteStringCid : kOneByteStringCid;
6685 return encoded >> 1;
6686 }
6687
6688 static intptr_t InstanceSize(intptr_t length, intptr_t cid) {
6689 return cid == kOneByteStringCid ? OneByteString::InstanceSize(length)
6691 }
6692
6693 explicit StringDeserializationCluster(bool is_canonical, bool is_root_unit)
6695 is_root_unit,
6696 "String") {}
6698
6699 void ReadAlloc(Deserializer* d) override {
6700 start_index_ = d->next_index();
6701 const intptr_t count = d->ReadUnsigned();
6702 for (intptr_t i = 0; i < count; i++) {
6703 const intptr_t encoded = d->ReadUnsigned();
6704 intptr_t cid = 0;
6705 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6706 d->AssignRef(d->Allocate(InstanceSize(length, cid)));
6707 }
6708 stop_index_ = d->next_index();
6710 }
6711
6712 void ReadFill(Deserializer* d_) override {
6714
6715 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6716 StringPtr str = static_cast<StringPtr>(d.Ref(id));
6717 const intptr_t encoded = d.ReadUnsigned();
6718 intptr_t cid = 0;
6719 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6720 const intptr_t instance_size = InstanceSize(length, cid);
6721 // Clean up last two words of the string object to simplify future
6722 // string comparisons.
6723 // Objects are rounded up to two-word size boundary.
6724 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6725 instance_size - 1 * kWordSize) = 0;
6726 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6727 instance_size - 2 * kWordSize) = 0;
6728 Deserializer::InitializeHeader(str, cid, instance_size, is_canonical());
6729#if DART_COMPRESSED_POINTERS
6730 // Gap caused by less-than-a-word length_ smi sitting before data_.
6731 const intptr_t length_offset =
6732 reinterpret_cast<intptr_t>(&str->untag()->length_);
6733 const intptr_t data_offset =
6734 cid == kOneByteStringCid
6735 ? reinterpret_cast<intptr_t>(
6736 static_cast<OneByteStringPtr>(str)->untag()->data())
6737 : reinterpret_cast<intptr_t>(
6738 static_cast<TwoByteStringPtr>(str)->untag()->data());
6739 const intptr_t length_with_gap = data_offset - length_offset;
6740 ASSERT(length_with_gap > kCompressedWordSize);
6741 ASSERT(length_with_gap == kWordSize);
6742 memset(reinterpret_cast<void*>(length_offset), 0, length_with_gap);
6743#endif
6744 str->untag()->length_ = Smi::New(length);
6745
6746 StringHasher hasher;
6747 if (cid == kOneByteStringCid) {
6748 for (intptr_t j = 0; j < length; j++) {
6749 uint8_t code_unit = d.Read<uint8_t>();
6750 static_cast<OneByteStringPtr>(str)->untag()->data()[j] = code_unit;
6751 hasher.Add(code_unit);
6752 }
6753
6754 } else {
6755 for (intptr_t j = 0; j < length; j++) {
6756 uint16_t code_unit = d.Read<uint8_t>();
6757 code_unit = code_unit | (d.Read<uint8_t>() << 8);
6758 static_cast<TwoByteStringPtr>(str)->untag()->data()[j] = code_unit;
6759 hasher.Add(code_unit);
6760 }
6761 }
6762 String::SetCachedHash(str, hasher.Finalize());
6763 }
6764 }
6765
6766 void PostLoad(Deserializer* d, const Array& refs) override {
6767 if (!table_.IsNull()) {
6768 auto object_store = d->isolate_group()->object_store();
6769 VerifyCanonicalSet(d, refs,
6770 WeakArray::Handle(object_store->symbol_table()));
6771 object_store->set_symbol_table(table_);
6772 if (d->isolate_group() == Dart::vm_isolate_group()) {
6773 Symbols::InitFromSnapshot(d->isolate_group());
6774 }
6775#if defined(DEBUG)
6776 Symbols::New(Thread::Current(), ":some:new:symbol:");
6777 ASSERT(object_store->symbol_table() == table_.ptr()); // Did not rehash.
6778#endif
6779 }
6780 }
6781};
6782
6783#if !defined(DART_PRECOMPILED_RUNTIME)
6785 public:
6787 intptr_t num_objects,
6788 intptr_t size,
6789 intptr_t target_memory_size = 0)
6790 : SerializationCluster(name, -1) {
6792 size_ = size;
6794 }
6796
6800};
6801#endif // !DART_PRECOMPILED_RUNTIME
6802
6803#if !defined(DART_PRECOMPILED_RUNTIME)
6805 public:
6807 bool should_write_symbols)
6808 : symbols_(symbols),
6809 should_write_symbols_(should_write_symbols),
6810 zone_(Thread::Current()->zone()) {}
6811
6813 // These objects are always allocated by Object::InitOnce, so they are not
6814 // written into the snapshot.
6815
6816 s->AddBaseObject(Object::null(), "Null", "null");
6817 s->AddBaseObject(Object::sentinel().ptr(), "Null", "sentinel");
6818 s->AddBaseObject(Object::transition_sentinel().ptr(), "Null",
6819 "transition_sentinel");
6820 s->AddBaseObject(Object::optimized_out().ptr(), "Null", "<optimized out>");
6821 s->AddBaseObject(Object::empty_array().ptr(), "Array", "<empty_array>");
6822 s->AddBaseObject(Object::empty_instantiations_cache_array().ptr(), "Array",
6823 "<empty_instantiations_cache_array>");
6824 s->AddBaseObject(Object::empty_subtype_test_cache_array().ptr(), "Array",
6825 "<empty_subtype_test_cache_array>");
6826 s->AddBaseObject(Object::dynamic_type().ptr(), "Type", "<dynamic type>");
6827 s->AddBaseObject(Object::void_type().ptr(), "Type", "<void type>");
6828 s->AddBaseObject(Object::empty_type_arguments().ptr(), "TypeArguments",
6829 "[]");
6830 s->AddBaseObject(Bool::True().ptr(), "bool", "true");
6831 s->AddBaseObject(Bool::False().ptr(), "bool", "false");
6832 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6833 s->AddBaseObject(Object::synthetic_getter_parameter_types().ptr(), "Array",
6834 "<synthetic getter parameter types>");
6835 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6836 s->AddBaseObject(Object::synthetic_getter_parameter_names().ptr(), "Array",
6837 "<synthetic getter parameter names>");
6838 s->AddBaseObject(Object::empty_context_scope().ptr(), "ContextScope",
6839 "<empty>");
6840 s->AddBaseObject(Object::empty_object_pool().ptr(), "ObjectPool",
6841 "<empty>");
6842 s->AddBaseObject(Object::empty_compressed_stackmaps().ptr(),
6843 "CompressedStackMaps", "<empty>");
6844 s->AddBaseObject(Object::empty_descriptors().ptr(), "PcDescriptors",
6845 "<empty>");
6846 s->AddBaseObject(Object::empty_var_descriptors().ptr(),
6847 "LocalVarDescriptors", "<empty>");
6848 s->AddBaseObject(Object::empty_exception_handlers().ptr(),
6849 "ExceptionHandlers", "<empty>");
6850 s->AddBaseObject(Object::empty_async_exception_handlers().ptr(),
6851 "ExceptionHandlers", "<empty async>");
6852
6853 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6854 s->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i],
6855 "ArgumentsDescriptor", "<cached arguments descriptor>");
6856 }
6857 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6858 s->AddBaseObject(ICData::cached_icdata_arrays_[i], "Array",
6859 "<empty icdata entries>");
6860 }
6861
6862 ClassTable* table = s->isolate_group()->class_table();
6864 cid++) {
6865 // Error, CallSiteData has no class object.
6866 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6867 ASSERT(table->HasValidClassAt(cid));
6868 s->AddBaseObject(
6869 table->At(cid), "Class",
6870 Class::Handle(table->At(cid))
6871 .NameCString(Object::NameVisibility::kInternalName));
6872 }
6873 }
6874 s->AddBaseObject(table->At(kDynamicCid), "Class", "dynamic");
6875 s->AddBaseObject(table->At(kVoidCid), "Class", "void");
6876
6877 if (!Snapshot::IncludesCode(s->kind())) {
6878 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6879 s->AddBaseObject(StubCode::EntryAt(i).ptr());
6880 }
6881 }
6882 }
6883
6885 if (should_write_symbols_) {
6886 s->Push(symbols_.ptr());
6887 } else {
6888 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6889 s->Push(symbols_.At(i));
6890 }
6891 }
6892 if (Snapshot::IncludesCode(s->kind())) {
6893 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6894 s->Push(StubCode::EntryAt(i).ptr());
6895 }
6896 }
6897 }
6898
6900 s->WriteRootRef(should_write_symbols_ ? symbols_.ptr() : Object::null(),
6901 "symbol-table");
6902 if (Snapshot::IncludesCode(s->kind())) {
6903 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6904 s->WriteRootRef(StubCode::EntryAt(i).ptr(),
6905 zone_->PrintToString("Stub:%s", StubCode::NameAt(i)));
6906 }
6907 }
6908
6909 if (!should_write_symbols_ && s->profile_writer() != nullptr) {
6910 // If writing V8 snapshot profile create an artificial node representing
6911 // VM isolate symbol table.
6912 ASSERT(!s->IsReachable(symbols_.ptr()));
6913 s->AssignArtificialRef(symbols_.ptr());
6914 const auto& symbols_snapshot_id = s->GetProfileId(symbols_.ptr());
6915 s->profile_writer()->SetObjectTypeAndName(symbols_snapshot_id, "Symbols",
6916 "vm_symbols");
6917 s->profile_writer()->AddRoot(symbols_snapshot_id);
6918 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6919 s->profile_writer()->AttributeReferenceTo(
6920 symbols_snapshot_id, V8SnapshotProfileWriter::Reference::Element(i),
6921 s->GetProfileId(symbols_.At(i)));
6922 }
6923 }
6924 }
6925
6926 private:
6927 const WeakArray& symbols_;
6928 const bool should_write_symbols_;
6929 Zone* zone_;
6930};
6931#endif // !DART_PRECOMPILED_RUNTIME
6932
6934 public:
6935 VMDeserializationRoots() : symbol_table_(WeakArray::Handle()) {}
6936
6938 // These objects are always allocated by Object::InitOnce, so they are not
6939 // written into the snapshot.
6940
6941 d->AddBaseObject(Object::null());
6942 d->AddBaseObject(Object::sentinel().ptr());
6943 d->AddBaseObject(Object::transition_sentinel().ptr());
6944 d->AddBaseObject(Object::optimized_out().ptr());
6945 d->AddBaseObject(Object::empty_array().ptr());
6946 d->AddBaseObject(Object::empty_instantiations_cache_array().ptr());
6947 d->AddBaseObject(Object::empty_subtype_test_cache_array().ptr());
6948 d->AddBaseObject(Object::dynamic_type().ptr());
6949 d->AddBaseObject(Object::void_type().ptr());
6950 d->AddBaseObject(Object::empty_type_arguments().ptr());
6951 d->AddBaseObject(Bool::True().ptr());
6952 d->AddBaseObject(Bool::False().ptr());
6953 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6954 d->AddBaseObject(Object::synthetic_getter_parameter_types().ptr());
6955 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6956 d->AddBaseObject(Object::synthetic_getter_parameter_names().ptr());
6957 d->AddBaseObject(Object::empty_context_scope().ptr());
6958 d->AddBaseObject(Object::empty_object_pool().ptr());
6959 d->AddBaseObject(Object::empty_compressed_stackmaps().ptr());
6960 d->AddBaseObject(Object::empty_descriptors().ptr());
6961 d->AddBaseObject(Object::empty_var_descriptors().ptr());
6962 d->AddBaseObject(Object::empty_exception_handlers().ptr());
6963 d->AddBaseObject(Object::empty_async_exception_handlers().ptr());
6964
6965 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6966 d->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i]);
6967 }
6968 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6969 d->AddBaseObject(ICData::cached_icdata_arrays_[i]);
6970 }
6971
6972 ClassTable* table = d->isolate_group()->class_table();
6974 cid++) {
6975 // Error, CallSiteData has no class object.
6976 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6977 ASSERT(table->HasValidClassAt(cid));
6978 d->AddBaseObject(table->At(cid));
6979 }
6980 }
6981 d->AddBaseObject(table->At(kDynamicCid));
6982 d->AddBaseObject(table->At(kVoidCid));
6983
6984 if (!Snapshot::IncludesCode(d->kind())) {
6985 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6986 d->AddBaseObject(StubCode::EntryAt(i).ptr());
6987 }
6988 }
6989 }
6990
6991 void ReadRoots(Deserializer* d) override {
6992 symbol_table_ ^= d->ReadRef();
6993 if (!symbol_table_.IsNull()) {
6994 d->isolate_group()->object_store()->set_symbol_table(symbol_table_);
6995 }
6996 if (Snapshot::IncludesCode(d->kind())) {
6997 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6999 *code ^= d->ReadRef();
7001 }
7003 }
7004 }
7005
7006 void PostLoad(Deserializer* d, const Array& refs) override {
7007 // Move remaining bump allocation space to the freelist so it used by C++
7008 // allocations (e.g., FinalizeVMIsolate) before allocating new pages.
7009 d->heap()->old_space()->ReleaseBumpAllocation();
7010
7011 if (!symbol_table_.IsNull()) {
7012 Symbols::InitFromSnapshot(d->isolate_group());
7013 }
7014
7016 }
7017
7018 private:
7019 WeakArray& symbol_table_;
7020};
7021
7022#if !defined(DART_PRECOMPILED_RUNTIME)
7023static const char* const kObjectStoreFieldNames[] = {
7024#define DECLARE_OBJECT_STORE_FIELD(Type, Name) #Name,
7034#undef DECLARE_OBJECT_STORE_FIELD
7035};
7036
7038 public:
7039#define RESET_ROOT_LIST(V) \
7040 V(symbol_table, WeakArray, HashTables::New<CanonicalStringSet>(4)) \
7041 V(canonical_types, Array, HashTables::New<CanonicalTypeSet>(4)) \
7042 V(canonical_function_types, Array, \
7043 HashTables::New<CanonicalFunctionTypeSet>(4)) \
7044 V(canonical_record_types, Array, HashTables::New<CanonicalRecordTypeSet>(4)) \
7045 V(canonical_type_arguments, Array, \
7046 HashTables::New<CanonicalTypeArgumentsSet>(4)) \
7047 V(canonical_type_parameters, Array, \
7048 HashTables::New<CanonicalTypeParameterSet>(4)) \
7049 ONLY_IN_PRODUCT(ONLY_IN_AOT( \
7050 V(closure_functions, GrowableObjectArray, GrowableObjectArray::null()))) \
7051 ONLY_IN_AOT(V(closure_functions_table, Array, Array::null())) \
7052 ONLY_IN_AOT(V(canonicalized_stack_map_entries, CompressedStackMaps, \
7053 CompressedStackMaps::null()))
7054
7056 ObjectStore* object_store,
7058 : base_objects_(base_objects),
7059 object_store_(object_store),
7060 snapshot_kind_(snapshot_kind) {
7061#define ONLY_IN_AOT(code) \
7062 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7063 code \
7064 }
7065#define SAVE_AND_RESET_ROOT(name, Type, init) \
7066 do { \
7067 saved_##name##_ = object_store->name(); \
7068 object_store->set_##name(Type::Handle(init)); \
7069 } while (0);
7070
7072#undef SAVE_AND_RESET_ROOT
7073#undef ONLY_IN_AOT
7074 }
7076#define ONLY_IN_AOT(code) \
7077 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7078 code \
7079 }
7080#define RESTORE_ROOT(name, Type, init) \
7081 object_store_->set_##name(saved_##name##_);
7083#undef RESTORE_ROOT
7084#undef ONLY_IN_AOT
7085 }
7086
7088 if (base_objects_ == nullptr) {
7089 // Not writing a new vm isolate: use the one this VM was loaded from.
7090 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7091 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7092 s->AddBaseObject(base_objects.At(i));
7093 }
7094 } else {
7095 // Base objects carried over from WriteVMSnapshot.
7096 for (intptr_t i = 0; i < base_objects_->length(); i++) {
7097 s->AddBaseObject((*base_objects_)[i]->ptr());
7098 }
7099 }
7100 }
7101
7103 ObjectPtr* from = object_store_->from();
7104 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7105 for (ObjectPtr* p = from; p <= to; p++) {
7106 s->Push(*p);
7107 }
7108
7109 FieldTable* initial_field_table =
7110 s->thread()->isolate_group()->initial_field_table();
7111 for (intptr_t i = 0, n = initial_field_table->NumFieldIds(); i < n; i++) {
7112 s->Push(initial_field_table->At(i));
7113 }
7114
7115 FieldTable* shared_initial_field_table =
7116 s->thread()->isolate_group()->shared_initial_field_table();
7117 for (intptr_t i = 0, n = shared_initial_field_table->NumFieldIds(); i < n;
7118 i++) {
7119 s->Push(shared_initial_field_table->At(i));
7120 }
7121
7122 dispatch_table_entries_ = object_store_->dispatch_table_code_entries();
7123 // We should only have a dispatch table in precompiled mode.
7124 ASSERT(dispatch_table_entries_.IsNull() || s->kind() == Snapshot::kFullAOT);
7125
7126#if defined(DART_PRECOMPILER)
7127 // We treat the dispatch table as a root object and trace the Code objects
7128 // it references. Otherwise, a non-empty entry could be invalid on
7129 // deserialization if the corresponding Code object was not reachable from
7130 // the existing snapshot roots.
7131 if (!dispatch_table_entries_.IsNull()) {
7132 for (intptr_t i = 0; i < dispatch_table_entries_.Length(); i++) {
7133 s->Push(dispatch_table_entries_.At(i));
7134 }
7135 }
7136#endif
7137 }
7138
7140 ObjectPtr* from = object_store_->from();
7141 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7142 for (ObjectPtr* p = from; p <= to; p++) {
7143 s->WriteRootRef(*p, kObjectStoreFieldNames[p - from]);
7144 }
7145
7146 FieldTable* initial_field_table =
7147 s->thread()->isolate_group()->initial_field_table();
7148 intptr_t n = initial_field_table->NumFieldIds();
7149 s->WriteUnsigned(n);
7150 for (intptr_t i = 0; i < n; i++) {
7151 s->WriteRootRef(initial_field_table->At(i), "some-static-field");
7152 }
7153
7154 FieldTable* shared_initial_field_table =
7155 s->thread()->isolate_group()->shared_initial_field_table();
7156 intptr_t n_shared = shared_initial_field_table->NumFieldIds();
7157 s->WriteUnsigned(n_shared);
7158 for (intptr_t i = 0; i < n_shared; i++) {
7159 s->WriteRootRef(shared_initial_field_table->At(i),
7160 "some-shared-static-field");
7161 }
7162
7163 // The dispatch table is serialized only for precompiled snapshots.
7164 s->WriteDispatchTable(dispatch_table_entries_);
7165 }
7166
7168 return saved_canonicalized_stack_map_entries_;
7169 }
7170
7171 private:
7172 ZoneGrowableArray<Object*>* const base_objects_;
7173 ObjectStore* const object_store_;
7174 const Snapshot::Kind snapshot_kind_;
7175 Array& dispatch_table_entries_ = Array::Handle();
7176
7177#define ONLY_IN_AOT(code) code
7178#define DECLARE_FIELD(name, Type, init) Type& saved_##name##_ = Type::Handle();
7180#undef DECLARE_FIELD
7181#undef ONLY_IN_AOT
7182};
7183#endif // !DART_PRECOMPILED_RUNTIME
7184
7186 public:
7188 : object_store_(object_store) {}
7189
7191 // N.B.: Skipping index 0 because ref 0 is illegal.
7192 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7193 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7194 d->AddBaseObject(base_objects.At(i));
7195 }
7196 }
7197
7198 void ReadRoots(Deserializer* d) override {
7199 // Read roots.
7200 ObjectPtr* from = object_store_->from();
7201 ObjectPtr* to = object_store_->to_snapshot(d->kind());
7202 for (ObjectPtr* p = from; p <= to; p++) {
7203 *p = d->ReadRef();
7204 }
7205
7206 {
7207 FieldTable* initial_field_table =
7208 d->thread()->isolate_group()->initial_field_table();
7209 intptr_t n = d->ReadUnsigned();
7210 initial_field_table->AllocateIndex(n - 1);
7211 for (intptr_t i = 0; i < n; i++) {
7212 initial_field_table->SetAt(i, d->ReadRef());
7213 }
7214 }
7215
7216 {
7217 FieldTable* shared_initial_field_table =
7218 d->thread()->isolate_group()->shared_initial_field_table();
7219 intptr_t n_shared = d->ReadUnsigned();
7220 if (n_shared > 0) {
7221 shared_initial_field_table->AllocateIndex(n_shared - 1);
7222 for (intptr_t i = 0; i < n_shared; i++) {
7223 shared_initial_field_table->SetAt(i, d->ReadRef());
7224 }
7225 }
7226 }
7227
7228 // Deserialize dispatch table (when applicable)
7229 d->ReadDispatchTable();
7230 }
7231
7232 void PostLoad(Deserializer* d, const Array& refs) override {
7233 auto isolate_group = d->isolate_group();
7234 {
7235 isolate_group->class_table()->CopySizesFromClassObjects();
7236 }
7237 d->heap()->old_space()->EvaluateAfterLoading();
7238
7239 auto object_store = isolate_group->object_store();
7240 const Array& units = Array::Handle(object_store->loading_units());
7241 if (!units.IsNull()) {
7243 unit ^= units.At(LoadingUnit::kRootId);
7244 unit.set_base_objects(refs);
7245 }
7246
7247 // Setup native resolver for bootstrap impl.
7249 }
7250
7251 private:
7252 ObjectStore* object_store_;
7253};
7254
7255#if !defined(DART_PRECOMPILED_RUNTIME)
7257 public:
7259 : unit_(unit) {}
7260
7262 ZoneGrowableArray<Object*>* objects = unit_->parent()->objects();
7263 for (intptr_t i = 0; i < objects->length(); i++) {
7264 s->AddBaseObject(objects->At(i)->ptr());
7265 }
7266 }
7267
7269 for (auto deferred_object : *unit_->deferred_objects()) {
7270 ASSERT(deferred_object->IsCode());
7271 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7272 ObjectPoolPtr pool = code->untag()->object_pool_;
7273 if (pool != ObjectPool::null()) {
7274 const intptr_t length = pool->untag()->length_;
7275 uint8_t* entry_bits = pool->untag()->entry_bits();
7276 for (intptr_t i = 0; i < length; i++) {
7277 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7278 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7279 s->Push(pool->untag()->data()[i].raw_obj_);
7280 }
7281 }
7282 }
7283 s->Push(code->untag()->code_source_map_);
7284 }
7285 }
7286
7288#if defined(DART_PRECOMPILER)
7289 intptr_t start_index = 0;
7290 intptr_t num_deferred_objects = unit_->deferred_objects()->length();
7291 if (num_deferred_objects != 0) {
7292 start_index = s->RefId(unit_->deferred_objects()->At(0)->ptr());
7293 ASSERT(start_index > 0);
7294 }
7295 s->WriteUnsigned(start_index);
7296 s->WriteUnsigned(num_deferred_objects);
7297 for (intptr_t i = 0; i < num_deferred_objects; i++) {
7298 const Object* deferred_object = (*unit_->deferred_objects())[i];
7299 ASSERT(deferred_object->IsCode());
7300 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7301 ASSERT(s->RefId(code) == (start_index + i));
7303 s->WriteInstructions(code->untag()->instructions_,
7304 code->untag()->unchecked_offset_, code, false);
7305 s->WriteRootRef(code->untag()->code_source_map_, "deferred-code");
7306 }
7307
7308 ObjectPoolPtr pool =
7309 s->isolate_group()->object_store()->global_object_pool();
7310 const intptr_t length = pool->untag()->length_;
7311 uint8_t* entry_bits = pool->untag()->entry_bits();
7312 intptr_t last_write = 0;
7313 for (intptr_t i = 0; i < length; i++) {
7314 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7315 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7316 if (s->IsWritten(pool->untag()->data()[i].raw_obj_)) {
7317 intptr_t skip = i - last_write;
7318 s->WriteUnsigned(skip);
7319 s->WriteRootRef(pool->untag()->data()[i].raw_obj_,
7320 "deferred-literal");
7321 last_write = i;
7322 }
7323 }
7324 }
7325 s->WriteUnsigned(length - last_write);
7326#endif
7327 }
7328
7329 private:
7331};
7332#endif // !DART_PRECOMPILED_RUNTIME
7333
7335 public:
7336 explicit UnitDeserializationRoots(const LoadingUnit& unit) : unit_(unit) {}
7337
7339 const Array& base_objects =
7340 Array::Handle(LoadingUnit::Handle(unit_.parent()).base_objects());
7341 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7342 d->AddBaseObject(base_objects.At(i));
7343 }
7344 }
7345
7346 void ReadRoots(Deserializer* d) override {
7347 deferred_start_index_ = d->ReadUnsigned();
7348 deferred_stop_index_ = deferred_start_index_ + d->ReadUnsigned();
7349 for (intptr_t id = deferred_start_index_; id < deferred_stop_index_; id++) {
7350 CodePtr code = static_cast<CodePtr>(d->Ref(id));
7352 d->ReadInstructions(code, /*deferred=*/false);
7353 if (code->untag()->owner_->IsHeapObject() &&
7354 code->untag()->owner_->IsFunction()) {
7355 FunctionPtr func = static_cast<FunctionPtr>(code->untag()->owner_);
7356 uword entry_point = code->untag()->entry_point_;
7357 ASSERT(entry_point != 0);
7358 func->untag()->entry_point_ = entry_point;
7359 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
7360 ASSERT(unchecked_entry_point != 0);
7361 func->untag()->unchecked_entry_point_ = unchecked_entry_point;
7362#if defined(DART_PRECOMPILED_RUNTIME)
7363 if (func->untag()->data()->IsHeapObject() &&
7364 func->untag()->data()->IsClosureData()) {
7365 // For closure functions in bare instructions mode, also update the
7366 // cache inside the static implicit closure object, if any.
7367 auto data = static_cast<ClosureDataPtr>(func->untag()->data());
7368 if (data->untag()->closure() != Closure::null()) {
7369 // Closure functions only have one entry point.
7370 ASSERT_EQUAL(entry_point, unchecked_entry_point);
7371 data->untag()->closure()->untag()->entry_point_ = entry_point;
7372 }
7373 }
7374#endif
7375 }
7376 code->untag()->code_source_map_ =
7377 static_cast<CodeSourceMapPtr>(d->ReadRef());
7378 }
7379
7380 ObjectPoolPtr pool =
7381 d->isolate_group()->object_store()->global_object_pool();
7382 const intptr_t length = pool->untag()->length_;
7383 uint8_t* entry_bits = pool->untag()->entry_bits();
7384 for (intptr_t i = d->ReadUnsigned(); i < length; i += d->ReadUnsigned()) {
7385 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7386 ASSERT(entry_type == ObjectPool::EntryType::kTaggedObject);
7387 // The existing entry will usually be null, but it might also be an
7388 // equivalent object that was duplicated in another loading unit.
7389 pool->untag()->data()[i].raw_obj_ = d->ReadRef();
7390 }
7391
7392 // Reinitialize the dispatch table by rereading the table's serialization
7393 // in the root snapshot.
7394 auto isolate_group = d->isolate_group();
7395 if (isolate_group->dispatch_table_snapshot() != nullptr) {
7396 ReadStream stream(isolate_group->dispatch_table_snapshot(),
7397 isolate_group->dispatch_table_snapshot_size());
7399 isolate_group->object_store()->instructions_tables());
7401 root_table ^= tables.At(0);
7402 d->ReadDispatchTable(&stream, /*deferred=*/true, root_table,
7403 deferred_start_index_, deferred_stop_index_);
7404 }
7405 }
7406
7407 void PostLoad(Deserializer* d, const Array& refs) override {
7408 d->EndInstructions();
7409 unit_.set_base_objects(refs);
7410 }
7411
7412 private:
7413 const LoadingUnit& unit_;
7414 intptr_t deferred_start_index_;
7415 intptr_t deferred_stop_index_;
7416};
7417
7418#if defined(DEBUG)
7419static constexpr int32_t kSectionMarker = 0xABAB;
7420#endif
7421
7423 Snapshot::Kind kind,
7425 ImageWriter* image_writer,
7426 bool vm,
7427 V8SnapshotProfileWriter* profile_writer)
7428 : ThreadStackResource(thread),
7429 heap_(thread->isolate_group()->heap()),
7430 zone_(thread->zone()),
7431 kind_(kind),
7432 stream_(stream),
7433 image_writer_(image_writer),
7434 canonical_clusters_by_cid_(nullptr),
7435 clusters_by_cid_(nullptr),
7436 stack_(),
7437 num_cids_(0),
7438 num_tlc_cids_(0),
7439 num_base_objects_(0),
7440 num_written_objects_(0),
7441 next_ref_index_(kFirstReference),
7442 vm_(vm),
7443 profile_writer_(profile_writer)
7444#if defined(SNAPSHOT_BACKTRACE)
7445 ,
7446 current_parent_(Object::null()),
7447 parent_pairs_()
7448#endif
7449#if defined(DART_PRECOMPILER)
7450 ,
7451 deduped_instructions_sources_(zone_)
7452#endif
7453{
7454 num_cids_ = thread->isolate_group()->class_table()->NumCids();
7455 num_tlc_cids_ = thread->isolate_group()->class_table()->NumTopLevelCids();
7456 canonical_clusters_by_cid_ = new SerializationCluster*[num_cids_];
7457 for (intptr_t i = 0; i < num_cids_; i++) {
7458 canonical_clusters_by_cid_[i] = nullptr;
7459 }
7460 clusters_by_cid_ = new SerializationCluster*[num_cids_];
7461 for (intptr_t i = 0; i < num_cids_; i++) {
7462 clusters_by_cid_[i] = nullptr;
7463 }
7464 if (profile_writer_ != nullptr) {
7465 offsets_table_ = new (zone_) OffsetsTable(zone_);
7466 }
7467}
7468
7470 delete[] canonical_clusters_by_cid_;
7471 delete[] clusters_by_cid_;
7472}
7473
7475 const char* type,
7476 const char* name) {
7477 // Don't assign references to the discarded code.
7478 const bool is_discarded_code = base_object->IsHeapObject() &&
7479 base_object->IsCode() &&
7480 Code::IsDiscarded(Code::RawCast(base_object));
7481 if (!is_discarded_code) {
7482 AssignRef(base_object);
7483 }
7484 num_base_objects_++;
7485
7486 if ((profile_writer_ != nullptr) && (type != nullptr)) {
7487 const auto& profile_id = GetProfileId(base_object);
7488 profile_writer_->SetObjectTypeAndName(profile_id, type, name);
7489 profile_writer_->AddRoot(profile_id);
7490 }
7491}
7492
7494 ASSERT(IsAllocatedReference(next_ref_index_));
7495
7496 // The object id weak table holds image offsets for Instructions instead
7497 // of ref indices.
7498 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7499 heap_->SetObjectId(object, next_ref_index_);
7500 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
7501
7502 objects_->Add(&Object::ZoneHandle(object));
7503
7504 return next_ref_index_++;
7505}
7506
7508 const intptr_t ref = -(next_ref_index_++);
7510 if (object != nullptr) {
7511 ASSERT(!object.IsHeapObject() || !object.IsInstructions());
7512 ASSERT(heap_->GetObjectId(object) == kUnreachableReference);
7513 heap_->SetObjectId(object, ref);
7514 ASSERT(heap_->GetObjectId(object) == ref);
7515 }
7516 return ref;
7517}
7518
7519void Serializer::FlushProfile() {
7520 if (profile_writer_ == nullptr) return;
7521 const intptr_t bytes =
7522 stream_->Position() - object_currently_writing_.last_stream_position_;
7523 profile_writer_->AttributeBytesTo(object_currently_writing_.id_, bytes);
7524 object_currently_writing_.last_stream_position_ = stream_->Position();
7525}
7526
7528 ObjectPtr object) const {
7529 // Instructions are handled separately.
7530 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7531 return GetProfileId(UnsafeRefId(object));
7532}
7533
7535 intptr_t heap_id) const {
7536 if (IsArtificialReference(heap_id)) {
7537 return {IdSpace::kArtificial, -heap_id};
7538 }
7539 ASSERT(IsAllocatedReference(heap_id));
7540 return {IdSpace::kSnapshot, heap_id};
7541}
7542
7544 ObjectPtr object,
7545 const V8SnapshotProfileWriter::Reference& reference) {
7546 if (profile_writer_ == nullptr) return;
7547 const auto& object_id = GetProfileId(object);
7548#if defined(DART_PRECOMPILER)
7549 if (object->IsHeapObject() && object->IsWeakSerializationReference()) {
7550 auto const wsr = WeakSerializationReference::RawCast(object);
7551 auto const target = wsr->untag()->target();
7552 const auto& target_id = GetProfileId(target);
7553 if (object_id != target_id) {
7554 const auto& replacement_id = GetProfileId(wsr->untag()->replacement());
7555 ASSERT(object_id == replacement_id);
7556 // The target of the WSR will be replaced in the snapshot, so write
7557 // attributions for both the dropped target and for the replacement.
7558 profile_writer_->AttributeDroppedReferenceTo(
7559 object_currently_writing_.id_, reference, target_id, replacement_id);
7560 return;
7561 }
7562 // The replacement isn't used for this WSR in the snapshot, as either the
7563 // target is strongly referenced or the WSR itself is unreachable, so fall
7564 // through to attributing a reference to the WSR (which shares the profile
7565 // ID of the target).
7566 }
7567#endif
7568 profile_writer_->AttributeReferenceTo(object_currently_writing_.id_,
7569 reference, object_id);
7570}
7571
7573 Serializer* serializer,
7575 ObjectPtr object)
7576 : serializer_(serializer),
7577 old_object_(serializer->object_currently_writing_.object_),
7578 old_id_(serializer->object_currently_writing_.id_),
7579 old_cid_(serializer->object_currently_writing_.cid_) {
7580 if (serializer_->profile_writer_ == nullptr) return;
7581 // The ID should correspond to one already added appropriately to the
7582 // profile writer.
7583 ASSERT(serializer_->profile_writer_->HasId(id));
7584 serializer_->FlushProfile();
7585 serializer_->object_currently_writing_.object_ = object;
7586 serializer_->object_currently_writing_.id_ = id;
7587 serializer_->object_currently_writing_.cid_ =
7588 object == nullptr ? -1 : object->GetClassIdMayBeSmi();
7589}
7590
7592 if (serializer_->profile_writer_ == nullptr) return;
7593 serializer_->FlushProfile();
7594 serializer_->object_currently_writing_.object_ = old_object_;
7595 serializer_->object_currently_writing_.id_ = old_id_;
7596 serializer_->object_currently_writing_.cid_ = old_cid_;
7597}
7598
7599V8SnapshotProfileWriter::ObjectId Serializer::WritingObjectScope::ReserveId(
7600 Serializer* s,
7601 const char* type,
7602 ObjectPtr obj,
7603 const char* name) {
7604 if (s->profile_writer_ == nullptr) {
7606 }
7607 if (name == nullptr) {
7608 // Handle some cases where there are obvious names to assign.
7609 switch (obj->GetClassIdMayBeSmi()) {
7610 case kSmiCid: {
7611 name = OS::SCreate(s->zone(), "%" Pd "", Smi::Value(Smi::RawCast(obj)));
7612 break;
7613 }
7614 case kMintCid: {
7615 name = OS::SCreate(s->zone(), "%" Pd64 "",
7616 Mint::RawCast(obj)->untag()->value_);
7617 break;
7618 }
7619 case kOneByteStringCid:
7620 case kTwoByteStringCid: {
7621 name = String::ToCString(s->thread(), String::RawCast(obj));
7622 break;
7623 }
7624 }
7625 }
7626 const auto& obj_id = s->GetProfileId(obj);
7627 s->profile_writer_->SetObjectTypeAndName(obj_id, type, name);
7628 return obj_id;
7629}
7630
7631#if !defined(DART_PRECOMPILED_RUNTIME)
7633 ASSERT(profile_writer() != nullptr);
7634
7635 // UnsafeRefId will do lazy reference allocation for WSRs.
7636 intptr_t id = UnsafeRefId(obj);
7638 if (id != kUnreachableReference) {
7639 return IsArtificialReference(id);
7640 }
7641 if (obj->IsHeapObject() && obj->IsWeakSerializationReference()) {
7642 auto const target =
7645 // Since the WSR is unreachable, we can replace its id with whatever the
7646 // ID of the target is, whether real or artificial.
7647 id = heap_->GetObjectId(target);
7648 heap_->SetObjectId(obj, id);
7649 return IsArtificialReference(id);
7650 }
7651
7652 const char* type = nullptr;
7653 const char* name = nullptr;
7655 const classid_t cid = obj->GetClassIdMayBeSmi();
7656 switch (cid) {
7657 // For profiling static call target tables in AOT mode.
7658 case kSmiCid: {
7659 type = "Smi";
7660 break;
7661 }
7662 // For profiling per-code object pools in bare instructions mode.
7663 case kObjectPoolCid: {
7664 type = "ObjectPool";
7665 auto const pool = ObjectPool::RawCast(obj);
7666 for (intptr_t i = 0; i < pool->untag()->length_; i++) {
7667 uint8_t bits = pool->untag()->entry_bits()[i];
7669 ObjectPool::EntryType::kTaggedObject) {
7670 auto const elem = pool->untag()->data()[i].raw_obj_;
7671 // Elements should be reachable from the global object pool.
7672 ASSERT(HasRef(elem));
7674 }
7675 }
7676 break;
7677 }
7678 // For profiling static call target tables and the dispatch table in AOT.
7679 case kImmutableArrayCid:
7680 case kArrayCid: {
7681 type = "Array";
7682 auto const array = Array::RawCast(obj);
7683 for (intptr_t i = 0, n = Smi::Value(array->untag()->length()); i < n;
7684 i++) {
7685 ObjectPtr elem = array->untag()->element(i);
7687 }
7688 break;
7689 }
7690 // For profiling the dispatch table.
7691 case kCodeCid: {
7692 type = "Code";
7693 auto const code = Code::RawCast(obj);
7695 links.Add({code->untag()->owner(),
7697 break;
7698 }
7699 case kFunctionCid: {
7700 FunctionPtr func = static_cast<FunctionPtr>(obj);
7701 type = "Function";
7703 func);
7704 links.Add({func->untag()->owner(),
7706 ObjectPtr data = func->untag()->data();
7707 if (data->GetClassId() == kClosureDataCid) {
7708 links.Add(
7710 }
7711 break;
7712 }
7713 case kClosureDataCid: {
7714 auto data = static_cast<ClosureDataPtr>(obj);
7715 type = "ClosureData";
7716 links.Add(
7717 {data->untag()->parent_function(),
7719 break;
7720 }
7721 case kClassCid: {
7722 ClassPtr cls = static_cast<ClassPtr>(obj);
7723 type = "Class";
7724 name = String::ToCString(thread(), cls->untag()->name());
7725 links.Add({cls->untag()->library(),
7727 break;
7728 }
7729 case kPatchClassCid: {
7730 PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj);
7731 type = "PatchClass";
7732 links.Add(
7733 {patch_cls->untag()->wrapped_class(),
7735 break;
7736 }
7737 case kLibraryCid: {
7738 LibraryPtr lib = static_cast<LibraryPtr>(obj);
7739 type = "Library";
7740 name = String::ToCString(thread(), lib->untag()->url());
7741 break;
7742 }
7743 case kFunctionTypeCid: {
7744 type = "FunctionType";
7745 break;
7746 };
7747 case kRecordTypeCid: {
7748 type = "RecordType";
7749 break;
7750 };
7751 default:
7752 FATAL("Request to create artificial node for object with cid %d", cid);
7753 }
7754
7755 id = AssignArtificialRef(obj);
7756 Serializer::WritingObjectScope scope(this, type, obj, name);
7757 for (const auto& link : links) {
7759 AttributeReference(link.first, link.second);
7760 }
7761 return true;
7762}
7763#endif // !defined(DART_PRECOMPILED_RUNTIME)
7764
7765intptr_t Serializer::RefId(ObjectPtr object) const {
7766 auto const id = UnsafeRefId(object);
7767 if (IsAllocatedReference(id)) {
7768 return id;
7769 }
7772 auto& handle = thread()->ObjectHandle();
7773 handle = object;
7774 FATAL("Reference to unreachable object %s", handle.ToCString());
7775}
7776
7777intptr_t Serializer::UnsafeRefId(ObjectPtr object) const {
7778 // The object id weak table holds image offsets for Instructions instead
7779 // of ref indices.
7780 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7781 if (!Snapshot::IncludesCode(kind_) &&
7782 object->GetClassIdMayBeSmi() == kCodeCid) {
7783 return RefId(Object::null());
7784 }
7785 auto id = heap_->GetObjectId(object);
7786 if (id != kUnallocatedReference) {
7787 return id;
7788 }
7789 // This is the only case where we may still see unallocated references after
7790 // WriteAlloc is finished.
7791 if (object->IsWeakSerializationReference()) {
7792 // Lazily set the object ID of the WSR to the object which will replace
7793 // it in the snapshot.
7794 auto const wsr = static_cast<WeakSerializationReferencePtr>(object);
7795 // Either the target or the replacement must be allocated, since the
7796 // WSR is reachable.
7797 id = HasRef(wsr->untag()->target()) ? RefId(wsr->untag()->target())
7798 : RefId(wsr->untag()->replacement());
7799 heap_->SetObjectId(wsr, id);
7800 return id;
7801 }
7803 auto& handle = thread()->ObjectHandle();
7804 handle = object;
7805 FATAL("Reference for object %s is unallocated", handle.ToCString());
7806}
7807
7808const char* Serializer::ReadOnlyObjectType(intptr_t cid) {
7809 switch (cid) {
7810 case kPcDescriptorsCid:
7811 return "PcDescriptors";
7812 case kCodeSourceMapCid:
7813 return "CodeSourceMap";
7814 case kCompressedStackMapsCid:
7815 return "CompressedStackMaps";
7816 case kStringCid:
7817 return current_loading_unit_id_ <= LoadingUnit::kRootId
7818 ? "CanonicalString"
7819 : nullptr;
7820 case kOneByteStringCid:
7821 return current_loading_unit_id_ <= LoadingUnit::kRootId
7822 ? "OneByteStringCid"
7823 : nullptr;
7824 case kTwoByteStringCid:
7825 return current_loading_unit_id_ <= LoadingUnit::kRootId
7826 ? "TwoByteStringCid"
7827 : nullptr;
7828 default:
7829 return nullptr;
7830 }
7831}
7832
7834 bool is_canonical) {
7835#if defined(DART_PRECOMPILED_RUNTIME)
7836 UNREACHABLE();
7837 return nullptr;
7838#else
7839 Zone* Z = zone_;
7840 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
7841 Push(isolate_group()->class_table()->At(cid));
7842 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7843 }
7846 }
7849 }
7850 if (IsTypedDataClassId(cid)) {
7851 return new (Z) TypedDataSerializationCluster(cid);
7852 }
7853
7854#if !defined(DART_COMPRESSED_POINTERS)
7855 // Sometimes we write memory images for read-only objects that contain no
7856 // pointers. These can be mmapped directly, needing no relocation, and added
7857 // to the list of heap pages. This gives us lazy/demand paging from the OS.
7858 // We do not do this for snapshots without code to keep snapshots portable
7859 // between machines with different word sizes. We do not do this when we use
7860 // compressed pointers because we cannot always control the load address of
7861 // the memory image, and it might be outside the 4GB region addressable by
7862 // compressed pointers.
7863 if (Snapshot::IncludesCode(kind_)) {
7864 if (auto const type = ReadOnlyObjectType(cid)) {
7865 return new (Z) RODataSerializationCluster(Z, type, cid, is_canonical);
7866 }
7867 }
7868#endif
7869
7870 const bool cluster_represents_canonical_set =
7871 current_loading_unit_id_ <= LoadingUnit::kRootId && is_canonical;
7872
7873 switch (cid) {
7874 case kClassCid:
7875 return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
7876 case kTypeParametersCid:
7878 case kTypeArgumentsCid:
7880 is_canonical, cluster_represents_canonical_set);
7881 case kPatchClassCid:
7882 return new (Z) PatchClassSerializationCluster();
7883 case kFunctionCid:
7884 return new (Z) FunctionSerializationCluster();
7885 case kClosureDataCid:
7886 return new (Z) ClosureDataSerializationCluster();
7887 case kFfiTrampolineDataCid:
7889 case kFieldCid:
7890 return new (Z) FieldSerializationCluster();
7891 case kScriptCid:
7892 return new (Z) ScriptSerializationCluster();
7893 case kLibraryCid:
7894 return new (Z) LibrarySerializationCluster();
7895 case kNamespaceCid:
7896 return new (Z) NamespaceSerializationCluster();
7897 case kKernelProgramInfoCid:
7899 case kCodeCid:
7900 return new (Z) CodeSerializationCluster(heap_);
7901 case kObjectPoolCid:
7902 return new (Z) ObjectPoolSerializationCluster();
7903 case kPcDescriptorsCid:
7904 return new (Z) PcDescriptorsSerializationCluster();
7905 case kCodeSourceMapCid:
7906 return new (Z) CodeSourceMapSerializationCluster();
7907 case kCompressedStackMapsCid:
7909 case kExceptionHandlersCid:
7911 case kContextCid:
7912 return new (Z) ContextSerializationCluster();
7913 case kContextScopeCid:
7914 return new (Z) ContextScopeSerializationCluster();
7915 case kUnlinkedCallCid:
7916 return new (Z) UnlinkedCallSerializationCluster();
7917 case kICDataCid:
7918 return new (Z) ICDataSerializationCluster();
7919 case kMegamorphicCacheCid:
7921 case kSubtypeTestCacheCid:
7923 case kLoadingUnitCid:
7924 return new (Z) LoadingUnitSerializationCluster();
7925 case kLanguageErrorCid:
7926 return new (Z) LanguageErrorSerializationCluster();
7927 case kUnhandledExceptionCid:
7929 case kLibraryPrefixCid:
7930 return new (Z) LibraryPrefixSerializationCluster();
7931 case kTypeCid:
7932 return new (Z) TypeSerializationCluster(is_canonical,
7933 cluster_represents_canonical_set);
7934 case kFunctionTypeCid:
7936 is_canonical, cluster_represents_canonical_set);
7937 case kRecordTypeCid:
7938 return new (Z) RecordTypeSerializationCluster(
7939 is_canonical, cluster_represents_canonical_set);
7940 case kTypeParameterCid:
7942 is_canonical, cluster_represents_canonical_set);
7943 case kClosureCid:
7944 return new (Z) ClosureSerializationCluster(is_canonical);
7945 case kMintCid:
7946 return new (Z) MintSerializationCluster(is_canonical);
7947 case kDoubleCid:
7948 return new (Z) DoubleSerializationCluster(is_canonical);
7949 case kInt32x4Cid:
7950 case kFloat32x4Cid:
7951 case kFloat64x2Cid:
7952 return new (Z) Simd128SerializationCluster(cid, is_canonical);
7953 case kGrowableObjectArrayCid:
7955 case kRecordCid:
7956 return new (Z) RecordSerializationCluster(is_canonical);
7957 case kStackTraceCid:
7958 return new (Z) StackTraceSerializationCluster();
7959 case kRegExpCid:
7960 return new (Z) RegExpSerializationCluster();
7961 case kWeakPropertyCid:
7962 return new (Z) WeakPropertySerializationCluster();
7963 case kMapCid:
7964 // We do not have mutable hash maps in snapshots.
7965 UNREACHABLE();
7966 case kConstMapCid:
7967 return new (Z) MapSerializationCluster(is_canonical, kConstMapCid);
7968 case kSetCid:
7969 // We do not have mutable hash sets in snapshots.
7970 UNREACHABLE();
7971 case kConstSetCid:
7972 return new (Z) SetSerializationCluster(is_canonical, kConstSetCid);
7973 case kArrayCid:
7974 return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
7975 case kImmutableArrayCid:
7976 return new (Z)
7977 ArraySerializationCluster(is_canonical, kImmutableArrayCid);
7978 case kWeakArrayCid:
7979 return new (Z) WeakArraySerializationCluster();
7980 case kStringCid:
7981 return new (Z) StringSerializationCluster(
7982 is_canonical, cluster_represents_canonical_set && !vm_);
7983#define CASE_FFI_CID(name) case kFfi##name##Cid:
7985#undef CASE_FFI_CID
7986 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7987 case kDeltaEncodedTypedDataCid:
7989 case kWeakSerializationReferenceCid:
7990#if defined(DART_PRECOMPILER)
7991 ASSERT(kind_ == Snapshot::kFullAOT);
7992 return new (Z) WeakSerializationReferenceSerializationCluster();
7993#endif
7994 default:
7995 break;
7996 }
7997
7998 // The caller will check for nullptr and provide an error with more context
7999 // than is available here.
8000 return nullptr;
8001#endif // !DART_PRECOMPILED_RUNTIME
8002}
8003
8005 if (loading_units_ == nullptr) return true;
8006
8007 intptr_t unit_id = heap_->GetLoadingUnit(obj);
8008 if (unit_id == WeakTable::kNoValue) {
8009 FATAL("Missing loading unit assignment: %s\n",
8010 Object::Handle(obj).ToCString());
8011 }
8012 return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
8013}
8014
8016 const intptr_t unit_id = heap_->GetLoadingUnit(code);
8017 ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
8018 (*loading_units_)[unit_id]->AddDeferredObject(code);
8019}
8020
8021#if !defined(DART_PRECOMPILED_RUNTIME)
8022#if defined(DART_PRECOMPILER)
8023// We use the following encoding schemes when encoding references to Code
8024// objects.
8025//
8026// In AOT mode:
8027//
8028// 0 -- LazyCompile stub
8029// 1 -+
8030// | for non-root-unit/non-VM snapshots
8031// ... > reference into parent snapshot objects
8032// | (base is num_base_objects_ in this case, 0 otherwise).
8033// base -+
8034// base + 1 -+
8035// | for non-deferred Code objects (those with instructions)
8036// > index in into the instructions table (code_index_).
8037// | (L is code_index_.Length()).
8038// base + L -+
8039// ... -+
8040// | for deferred Code objects (those without instructions)
8041// > index of this Code object in the deferred part of the
8042// | Code cluster.
8043//
8044// Note that this encoding has the following property: non-discarded
8045// non-deferred Code objects form the tail of the instruction table
8046// which makes indices assigned to non-discarded non-deferred Code objects
8047// and deferred Code objects continuous. This means when decoding
8048// code_index - (base + 1) - first_entry_with_code yields an index of the
8049// Code object in the Code cluster both for non-deferred and deferred
8050// Code objects.
8051//
8052// For JIT snapshots we do:
8053//
8054// 0 -- LazyCompile stub
8055// 1 -+
8056// |
8057// ... > index of the Code object in the Code cluster.
8058// |
8059//
8060intptr_t Serializer::GetCodeIndex(CodePtr code) {
8061 // In the precompiled mode Code object is uniquely identified by its
8062 // instructions (because ProgramVisitor::DedupInstructions will dedup Code
8063 // objects with the same instructions).
8064 if (code == StubCode::LazyCompile().ptr() && !vm_) {
8065 return 0;
8066 } else if (FLAG_precompiled_mode) {
8067 const intptr_t ref = heap_->GetObjectId(code);
8069
8070 const intptr_t base =
8072 ? 0
8073 : num_base_objects_;
8074
8075 // Check if we are referring to the Code object which originates from the
8076 // parent loading unit. In this case we write out the reference of this
8077 // object.
8078 if (!Code::IsDiscarded(code) && ref < base) {
8080 return 1 + ref;
8081 }
8082
8083 // Otherwise the code object must either be discarded or originate from
8084 // the Code cluster.
8085 ASSERT(Code::IsDiscarded(code) || (code_cluster_->first_ref() <= ref &&
8086 ref <= code_cluster_->last_ref()));
8087
8088 // If Code object is non-deferred then simply write out the index of the
8089 // entry point, otherwise write out the index of the deferred code object.
8090 if (ref < code_cluster_->first_deferred_ref()) {
8091 const intptr_t key = static_cast<intptr_t>(code->untag()->instructions_);
8092 ASSERT(code_index_.HasKey(key));
8093 const intptr_t result = code_index_.Lookup(key);
8094 ASSERT(0 < result && result <= code_index_.Length());
8095 // Note: result already has + 1.
8096 return base + result;
8097 } else {
8098 // Note: only root snapshot can have deferred Code objects in the
8099 // cluster.
8100 const intptr_t cluster_index = ref - code_cluster_->first_deferred_ref();
8101 return 1 + base + code_index_.Length() + cluster_index;
8102 }
8103 } else {
8104 const intptr_t ref = heap_->GetObjectId(code);
8106 ASSERT(code_cluster_->first_ref() <= ref &&
8107 ref <= code_cluster_->last_ref());
8108 return 1 + (ref - code_cluster_->first_ref());
8109 }
8110}
8111#endif // defined(DART_PRECOMPILER)
8112
8114 const CompressedStackMaps& canonical_stack_map_entries) {
8115 if (!Snapshot::IncludesCode(kind())) return;
8116
8117 // Code objects that have identical/duplicate instructions must be adjacent in
8118 // the order that Code objects are written because the encoding of the
8119 // reference from the Code to the Instructions assumes monotonically
8120 // increasing offsets as part of a delta encoding. Also the code order table
8121 // that allows for mapping return addresses back to Code objects depends on
8122 // this sorting.
8123 if (code_cluster_ != nullptr) {
8124 CodeSerializationCluster::Sort(this, code_cluster_->objects());
8125 }
8126 if ((loading_units_ != nullptr) &&
8127 (current_loading_unit_id_ == LoadingUnit::kRootId)) {
8128 for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units_->length();
8129 i++) {
8130 auto unit_objects = loading_units_->At(i)->deferred_objects();
8131 CodeSerializationCluster::Sort(this, unit_objects);
8132 ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
8133 for (intptr_t j = 0; j < unit_objects->length(); j++) {
8134 code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
8135 }
8136 }
8137 }
8138
8139#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8140 if (kind() == Snapshot::kFullAOT) {
8141 // Group the code objects whose instructions are not being deferred in this
8142 // snapshot unit in the order they will be written: first the code objects
8143 // encountered for this first time in this unit being written by the
8144 // CodeSerializationCluster, then code object previously deferred whose
8145 // instructions are now written by UnitSerializationRoots. This order needs
8146 // to be known to finalize bare-instructions-mode's PC-relative calls.
8147 GrowableArray<CodePtr> code_objects;
8148 if (code_cluster_ != nullptr) {
8149 auto in = code_cluster_->objects();
8150 for (intptr_t i = 0; i < in->length(); i++) {
8151 code_objects.Add(in->At(i));
8152 }
8153 }
8154 if (loading_units_ != nullptr) {
8155 auto in =
8156 loading_units_->At(current_loading_unit_id_)->deferred_objects();
8157 for (intptr_t i = 0; i < in->length(); i++) {
8158 code_objects.Add(in->At(i)->ptr());
8159 }
8160 }
8161
8162 GrowableArray<ImageWriterCommand> writer_commands;
8163 RelocateCodeObjects(vm_, &code_objects, &writer_commands);
8164 image_writer_->PrepareForSerialization(&writer_commands);
8165
8166 if (code_objects.length() == 0) {
8167 return;
8168 }
8169
8170 // Build UntaggedInstructionsTable::Data object to be added to the
8171 // read-only data section of the snapshot. It contains:
8172 //
8173 // - a binary search table mapping an Instructions entry point to its
8174 // stack maps (by offset from the beginning of the Data object);
8175 // - followed by stack maps bytes;
8176 // - followed by canonical stack map entries.
8177 //
8178 struct StackMapInfo : public ZoneAllocated {
8179 CompressedStackMapsPtr map;
8180 intptr_t use_count;
8181 uint32_t offset;
8182 };
8183
8185 IntMap<StackMapInfo*> stack_maps_info;
8186
8187 // Build code_index_ (which maps Instructions object to the order in
8188 // which they appear in the code section in the end) and collect all
8189 // stack maps.
8190 // We also find the first Instructions object which is going to have
8191 // Code object associated with it. This will allow to reduce the binary
8192 // search space when searching specifically for the code object in runtime.
8193 uint32_t total = 0;
8194 intptr_t not_discarded_count = 0;
8195 uint32_t first_entry_with_code = 0;
8196 for (auto& cmd : writer_commands) {
8198 RELEASE_ASSERT(code_objects[total] ==
8199 cmd.insert_instruction_of_code.code);
8200 ASSERT(!Code::IsDiscarded(cmd.insert_instruction_of_code.code) ||
8201 (not_discarded_count == 0));
8202 if (!Code::IsDiscarded(cmd.insert_instruction_of_code.code)) {
8203 if (not_discarded_count == 0) {
8204 first_entry_with_code = total;
8205 }
8206 not_discarded_count++;
8207 }
8208 total++;
8209
8210 // Update code_index_.
8211 {
8212 const intptr_t instr = static_cast<intptr_t>(
8213 cmd.insert_instruction_of_code.code->untag()->instructions_);
8214 ASSERT(!code_index_.HasKey(instr));
8215 code_index_.Insert(instr, total);
8216 }
8217
8218 // Collect stack maps.
8219 CompressedStackMapsPtr stack_map =
8220 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8221 const intptr_t key = static_cast<intptr_t>(stack_map);
8222
8223 if (stack_maps_info.HasKey(key)) {
8224 stack_maps_info.Lookup(key)->use_count++;
8225 } else {
8226 auto info = new StackMapInfo();
8227 info->map = stack_map;
8228 info->use_count = 1;
8229 stack_maps.Add(info);
8230 stack_maps_info.Insert(key, info);
8231 }
8232 }
8233 }
8234 ASSERT(static_cast<intptr_t>(total) == code_index_.Length());
8235 instructions_table_len_ = not_discarded_count;
8236
8237 // Sort stack maps by usage so that most commonly used stack maps are
8238 // together at the start of the Data object.
8239 stack_maps.Sort([](StackMapInfo* const* a, StackMapInfo* const* b) {
8240 if ((*a)->use_count < (*b)->use_count) return 1;
8241 if ((*a)->use_count > (*b)->use_count) return -1;
8242 return 0;
8243 });
8244
8245 // Build Data object.
8246 MallocWriteStream pc_mapping(4 * KB);
8247
8248 // Write the header out.
8249 {
8251 memset(&header, 0, sizeof(header));
8252 header.length = total;
8253 header.first_entry_with_code = first_entry_with_code;
8255 }
8256
8257 // Reserve space for the binary search table.
8258 for (auto& cmd : writer_commands) {
8260 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>({0, 0});
8261 }
8262 }
8263
8264 // Now write collected stack maps after the binary search table.
8265 auto write_stack_map = [&](CompressedStackMapsPtr smap) {
8266 const auto flags_and_size = smap->untag()->payload()->flags_and_size();
8267 const auto payload_size =
8269 pc_mapping.WriteFixed<uint32_t>(flags_and_size);
8270 pc_mapping.WriteBytes(smap->untag()->payload()->data(), payload_size);
8271 };
8272
8273 for (auto sm : stack_maps) {
8274 sm->offset = pc_mapping.bytes_written();
8275 write_stack_map(sm->map);
8276 }
8277
8278 // Write canonical entries (if any).
8279 if (!canonical_stack_map_entries.IsNull()) {
8280 auto header = reinterpret_cast<UntaggedInstructionsTable::Data*>(
8281 pc_mapping.buffer());
8282 header->canonical_stack_map_entries_offset = pc_mapping.bytes_written();
8283 write_stack_map(canonical_stack_map_entries.ptr());
8284 }
8285 const auto total_bytes = pc_mapping.bytes_written();
8286
8287 // Now that we have offsets to all stack maps we can write binary
8288 // search table.
8289 pc_mapping.SetPosition(
8290 sizeof(UntaggedInstructionsTable::Data)); // Skip the header.
8291 for (auto& cmd : writer_commands) {
8293 CompressedStackMapsPtr smap =
8294 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8295 const auto offset =
8296 stack_maps_info.Lookup(static_cast<intptr_t>(smap))->offset;
8297 const auto entry = image_writer_->GetTextOffsetFor(
8298 Code::InstructionsOf(cmd.insert_instruction_of_code.code),
8299 cmd.insert_instruction_of_code.code);
8300
8301 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>(
8302 {static_cast<uint32_t>(entry), offset});
8303 }
8304 }
8305 // Restore position so that Steal does not truncate the buffer.
8306 pc_mapping.SetPosition(total_bytes);
8307
8308 intptr_t length = 0;
8309 uint8_t* bytes = pc_mapping.Steal(&length);
8310
8311 instructions_table_rodata_offset_ =
8312 image_writer_->AddBytesToData(bytes, length);
8313 // Attribute all bytes in this object to the root for simplicity.
8314 if (profile_writer_ != nullptr) {
8315 const auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8316 profile_writer_->AttributeReferenceTo(
8319 "<instructions-table-rodata>"),
8320 {offset_space, instructions_table_rodata_offset_});
8321 }
8322 }
8323#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8324}
8325
8326void Serializer::WriteInstructions(InstructionsPtr instr,
8327 uint32_t unchecked_offset,
8328 CodePtr code,
8329 bool deferred) {
8330 ASSERT(code != Code::null());
8331
8333 if (deferred) {
8334 return;
8335 }
8336
8337 const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code);
8338#if defined(DART_PRECOMPILER)
8339 if (profile_writer_ != nullptr) {
8340 ASSERT(object_currently_writing_.id_ !=
8342 const auto offset_space = vm_ ? IdSpace::kVmText : IdSpace::kIsolateText;
8343 profile_writer_->AttributeReferenceTo(
8344 object_currently_writing_.id_,
8346 {offset_space, offset});
8347 }
8348
8349 if (Code::IsDiscarded(code)) {
8350 // Discarded Code objects are not supported in the vm isolate snapshot.
8351 ASSERT(!vm_);
8352 return;
8353 }
8354
8355 if (FLAG_precompiled_mode) {
8356 const uint32_t payload_info =
8357 (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0);
8358 WriteUnsigned(payload_info);
8359 return;
8360 }
8361#endif
8362 Write<uint32_t>(offset);
8363 WriteUnsigned(unchecked_offset);
8364}
8365
8367 if (profile_writer_ == nullptr) return;
8368 // ROData cannot be roots.
8369 ASSERT(object_currently_writing_.id_ !=
8371 auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8372 // TODO(sjindel): Give this edge a more appropriate type than element
8373 // (internal, maybe?).
8374 profile_writer_->AttributeReferenceTo(
8375 object_currently_writing_.id_,
8376 V8SnapshotProfileWriter::Reference::Element(0), {offset_space, offset});
8377}
8378
8379uint32_t Serializer::GetDataOffset(ObjectPtr object) const {
8380#if defined(SNAPSHOT_BACKTRACE)
8381 return image_writer_->GetDataOffsetFor(object, ParentOf(object));
8382#else
8383 return image_writer_->GetDataOffsetFor(object);
8384#endif
8385}
8386
8387intptr_t Serializer::GetDataSize() const {
8388 if (image_writer_ == nullptr) {
8389 return 0;
8390 }
8391 return image_writer_->data_size();
8392}
8393#endif // !defined(DART_PRECOMPILED_RUNTIME)
8394
8395void Serializer::Push(ObjectPtr object, intptr_t cid_override) {
8396 const bool is_code = object->IsHeapObject() && object->IsCode();
8397 if (is_code && !Snapshot::IncludesCode(kind_)) {
8398 return; // Do not trace, will write null.
8399 }
8400
8401 intptr_t id = heap_->GetObjectId(object);
8402 if (id == kUnreachableReference) {
8403 // When discovering the transitive closure of objects reachable from the
8404 // roots we do not trace references, e.g. inside [RawCode], to
8405 // [RawInstructions], since [RawInstructions] doesn't contain any references
8406 // and the serialization code uses an [ImageWriter] for those.
8407 if (object->IsHeapObject() && object->IsInstructions()) {
8408 UnexpectedObject(object,
8409 "Instructions should only be reachable from Code");
8410 }
8411
8412 heap_->SetObjectId(object, kUnallocatedReference);
8413 ASSERT(IsReachableReference(heap_->GetObjectId(object)));
8414 stack_.Add({object, cid_override});
8415 if (!(is_code && Code::IsDiscarded(Code::RawCast(object)))) {
8416 num_written_objects_++;
8417 }
8418#if defined(SNAPSHOT_BACKTRACE)
8419 parent_pairs_.Add(&Object::Handle(zone_, object));
8420 parent_pairs_.Add(&Object::Handle(zone_, current_parent_));
8421#endif
8422 }
8423}
8424
8426 // The GC considers immediate objects to always be alive. This doesn't happen
8427 // automatically in the serializer because the serializer does not have
8428 // immediate objects: it handles Smis as ref indices like all other objects.
8429 // This visit causes the serializer to reproduce the GC's semantics for
8430 // weakness, which in particular allows the templates in hash_table.h to work
8431 // with weak arrays because the metadata Smis always survive.
8432 if (!object->IsHeapObject() || vm_) {
8433 Push(object);
8434 }
8435}
8436
8437void Serializer::Trace(ObjectPtr object, intptr_t cid_override) {
8438 intptr_t cid;
8439 bool is_canonical;
8440 if (!object->IsHeapObject()) {
8441 // Smis are merged into the Mint cluster because Smis for the writer might
8442 // become Mints for the reader and vice versa.
8443 cid = kMintCid;
8444 is_canonical = true;
8445 } else {
8446 cid = object->GetClassId();
8447 is_canonical = object->untag()->IsCanonical();
8448 }
8449 if (cid_override != kIllegalCid) {
8450 cid = cid_override;
8451 } else if (IsStringClassId(cid)) {
8452 cid = kStringCid;
8453 }
8454
8455 SerializationCluster** cluster_ref =
8456 is_canonical ? &canonical_clusters_by_cid_[cid] : &clusters_by_cid_[cid];
8457 if (*cluster_ref == nullptr) {
8458 *cluster_ref = NewClusterForClass(cid, is_canonical);
8459 if (*cluster_ref == nullptr) {
8460 UnexpectedObject(object, "No serialization cluster defined");
8461 }
8462 }
8463 SerializationCluster* cluster = *cluster_ref;
8464 ASSERT(cluster != nullptr);
8465 if (cluster->is_canonical() != is_canonical) {
8466 FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
8467 cluster->name(), cid,
8468 cluster->is_canonical() ? "marked" : "not marked",
8469 is_canonical ? "should be" : "should not be");
8470 }
8471
8472#if defined(SNAPSHOT_BACKTRACE)
8473 current_parent_ = object;
8474#endif
8475
8476 cluster->Trace(this, object);
8477
8478#if defined(SNAPSHOT_BACKTRACE)
8479 current_parent_ = Object::null();
8480#endif
8481}
8482
8483void Serializer::UnexpectedObject(ObjectPtr raw_object, const char* message) {
8484 // Exit the no safepoint scope so we can allocate while printing.
8485 while (thread()->no_safepoint_scope_depth() > 0) {
8487 }
8488 Object& object = Object::Handle(raw_object);
8489 OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n", message,
8490 Snapshot::KindToCString(kind_), static_cast<uword>(object.ptr()),
8491 object.ToCString());
8492#if defined(SNAPSHOT_BACKTRACE)
8493 while (!object.IsNull()) {
8494 object = ParentOf(object);
8495 OS::PrintErr("referenced by 0x%" Px " %s\n",
8496 static_cast<uword>(object.ptr()), object.ToCString());
8497 }
8498#endif
8499 OS::Abort();
8500}
8501
8502#if defined(SNAPSHOT_BACKTRACE)
8503ObjectPtr Serializer::ParentOf(ObjectPtr object) const {
8504 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8505 if (parent_pairs_[i]->ptr() == object) {
8506 return parent_pairs_[i + 1]->ptr();
8507 }
8508 }
8509 return Object::null();
8510}
8511
8512ObjectPtr Serializer::ParentOf(const Object& object) const {
8513 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8514 if (parent_pairs_[i]->ptr() == object.ptr()) {
8515 return parent_pairs_[i + 1]->ptr();
8516 }
8517 }
8518 return Object::null();
8519}
8520#endif // SNAPSHOT_BACKTRACE
8521
8522void Serializer::WriteVersionAndFeatures(bool is_vm_snapshot) {
8523 const char* expected_version = Version::SnapshotString();
8524 ASSERT(expected_version != nullptr);
8525 const intptr_t version_len = strlen(expected_version);
8526 WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len);
8527
8528 char* expected_features =
8529 Dart::FeaturesString(IsolateGroup::Current(), is_vm_snapshot, kind_);
8530 ASSERT(expected_features != nullptr);
8531 const intptr_t features_len = strlen(expected_features);
8532 WriteBytes(reinterpret_cast<const uint8_t*>(expected_features),
8533 features_len + 1);
8534 free(expected_features);
8535}
8536
8537#if !defined(DART_PRECOMPILED_RUNTIME)
8539 SerializationCluster* const* b) {
8540 if ((*a)->size() > (*b)->size()) {
8541 return -1;
8542 } else if ((*a)->size() < (*b)->size()) {
8543 return 1;
8544 } else {
8545 return 0;
8546 }
8547}
8548
8549#define CID_CLUSTER(Type) \
8550 reinterpret_cast<Type##SerializationCluster*>(clusters_by_cid_[k##Type##Cid])
8551
8553 const {
8555}
8556
8558 // While object_currently_writing_ is initialized to the artificial root, we
8559 // set up a scope to ensure proper flushing to the profile.
8562 roots->AddBaseObjects(this);
8563
8564 NoSafepointScope no_safepoint;
8565
8566 roots->PushRoots(this);
8567
8568 // Resolving WeakSerializationReferences and WeakProperties may cause new
8569 // objects to be pushed on the stack, and handling the changes to the stack
8570 // may cause the targets of WeakSerializationReferences and keys of
8571 // WeakProperties to become reachable, so we do this as a fixed point
8572 // computation. Note that reachability is computed monotonically (an object
8573 // can change from not reachable to reachable, but never the reverse), which
8574 // is technically a conservative approximation for WSRs, but doing a strict
8575 // analysis that allows non-monotonic reachability may not halt.
8576 //
8577 // To see this, take a WSR whose replacement causes the target of another WSR
8578 // to become reachable, which then causes the target of the first WSR to
8579 // become reachable, but the only way to reach the target is through the
8580 // target of the second WSR, which was only reachable via the replacement
8581 // the first.
8582 //
8583 // In practice, this case doesn't come up as replacements tend to be either
8584 // null, smis, or singleton objects that do not contain WSRs currently.
8585 while (stack_.length() > 0) {
8586 // Strong references.
8587 while (stack_.length() > 0) {
8588 StackEntry entry = stack_.RemoveLast();
8589 Trace(entry.obj, entry.cid_override);
8590 }
8591
8592 // Ephemeron references.
8593#if defined(DART_PRECOMPILER)
8594 if (auto const cluster = CID_CLUSTER(WeakSerializationReference)) {
8595 cluster->RetraceEphemerons(this);
8596 }
8597#endif
8598 if (auto const cluster = CID_CLUSTER(WeakProperty)) {
8599 cluster->RetraceEphemerons(this);
8600 }
8601 }
8602
8603#if defined(DART_PRECOMPILER)
8604 auto const wsr_cluster = CID_CLUSTER(WeakSerializationReference);
8605 if (wsr_cluster != nullptr) {
8606 // Now that we have computed the reachability fixpoint, we remove the
8607 // count of now-reachable WSRs as they are not actually serialized.
8608 num_written_objects_ -= wsr_cluster->Count(this);
8609 // We don't need to write this cluster, so remove it from consideration.
8610 clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
8611 }
8612 ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
8613#endif
8614
8615 code_cluster_ = CID_CLUSTER(Code);
8616
8618 // The order that PostLoad runs matters for some classes because of
8619 // assumptions during canonicalization, read filling, or post-load filling of
8620 // some classes about what has already been read and/or canonicalized.
8621 // Explicitly add these clusters first, then add the rest ordered by class id.
8622#define ADD_CANONICAL_NEXT(cid) \
8623 if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
8624 clusters.Add(cluster); \
8625 canonical_clusters_by_cid_[cid] = nullptr; \
8626 }
8627#define ADD_NON_CANONICAL_NEXT(cid) \
8628 if (auto const cluster = clusters_by_cid_[cid]) { \
8629 clusters.Add(cluster); \
8630 clusters_by_cid_[cid] = nullptr; \
8631 }
8632 ADD_CANONICAL_NEXT(kOneByteStringCid)
8633 ADD_CANONICAL_NEXT(kTwoByteStringCid)
8634 ADD_CANONICAL_NEXT(kStringCid)
8635 ADD_CANONICAL_NEXT(kMintCid)
8636 ADD_CANONICAL_NEXT(kDoubleCid)
8637 ADD_CANONICAL_NEXT(kTypeParameterCid)
8638 ADD_CANONICAL_NEXT(kTypeCid)
8639 ADD_CANONICAL_NEXT(kTypeArgumentsCid)
8640 // Code cluster should be deserialized before Function as
8641 // FunctionDeserializationCluster::ReadFill uses instructions table
8642 // which is filled in CodeDeserializationCluster::ReadFill.
8643 // Code cluster should also precede ObjectPool as its ReadFill uses
8644 // entry points of stubs.
8645 ADD_NON_CANONICAL_NEXT(kCodeCid)
8646 // The function cluster should be deserialized before any closures, as
8647 // PostLoad for closures caches the entry point found in the function.
8648 ADD_NON_CANONICAL_NEXT(kFunctionCid)
8649 ADD_CANONICAL_NEXT(kClosureCid)
8650#undef ADD_CANONICAL_NEXT
8651#undef ADD_NON_CANONICAL_NEXT
8652 const intptr_t out_of_order_clusters = clusters.length();
8653 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8654 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8655 clusters.Add(cluster);
8656 }
8657 }
8658 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8659 if (auto const cluster = clusters_by_cid_[cid]) {
8660 clusters.Add(clusters_by_cid_[cid]);
8661 }
8662 }
8663 // Put back any taken out temporarily to avoid re-adding them during the loop.
8664 for (intptr_t i = 0; i < out_of_order_clusters; i++) {
8665 const auto& cluster = clusters.At(i);
8666 const intptr_t cid = cluster->cid();
8667 auto const cid_clusters =
8668 cluster->is_canonical() ? canonical_clusters_by_cid_ : clusters_by_cid_;
8669 ASSERT(cid_clusters[cid] == nullptr);
8670 cid_clusters[cid] = cluster;
8671 }
8672
8673 PrepareInstructions(roots->canonicalized_stack_map_entries());
8674
8675 intptr_t num_objects = num_base_objects_ + num_written_objects_;
8676#if defined(ARCH_IS_64_BIT)
8677 if (!Utils::IsInt(32, num_objects)) {
8678 FATAL("Ref overflow");
8679 }
8680#endif
8681
8682 WriteUnsigned(num_base_objects_);
8683 WriteUnsigned(num_objects);
8684 WriteUnsigned(clusters.length());
8685 ASSERT((instructions_table_len_ == 0) || FLAG_precompiled_mode);
8686 WriteUnsigned(instructions_table_len_);
8687 WriteUnsigned(instructions_table_rodata_offset_);
8688
8689 for (SerializationCluster* cluster : clusters) {
8690 cluster->WriteAndMeasureAlloc(this);
8691 bytes_heap_allocated_ += cluster->target_memory_size();
8692#if defined(DEBUG)
8693 Write<int32_t>(next_ref_index_);
8694#endif
8695 }
8696
8697 // We should have assigned a ref to every object we pushed.
8698 ASSERT((next_ref_index_ - 1) == num_objects);
8699 // And recorded them all in [objects_].
8700 ASSERT(objects_->length() == num_objects);
8701
8702#if defined(DART_PRECOMPILER)
8703 if (profile_writer_ != nullptr && wsr_cluster != nullptr) {
8704 // Post-WriteAlloc, we eagerly create artificial nodes for any unreachable
8705 // targets in reachable WSRs if writing a v8 snapshot profile, since they
8706 // will be used in AttributeReference().
8707 //
8708 // Unreachable WSRs may also need artificial nodes, as they may be members
8709 // of other unreachable objects that have artificial nodes in the profile,
8710 // but they are instead lazily handled in CreateArtificialNodeIfNeeded().
8711 wsr_cluster->CreateArtificialTargetNodesIfNeeded(this);
8712 }
8713#endif
8714
8715 for (SerializationCluster* cluster : clusters) {
8716 cluster->WriteAndMeasureFill(this);
8717#if defined(DEBUG)
8718 Write<int32_t>(kSectionMarker);
8719#endif
8720 }
8721
8722 roots->WriteRoots(this);
8723
8724#if defined(DEBUG)
8725 Write<int32_t>(kSectionMarker);
8726#endif
8727
8729
8731
8732 return objects_;
8733}
8734#endif // !defined(DART_PRECOMPILED_RUNTIME)
8735
8736#if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8737// The serialized format of the dispatch table is a sequence of variable-length
8738// integers (the built-in variable-length integer encoding/decoding of
8739// the stream). Each encoded integer e is interpreted thus:
8740// -kRecentCount .. -1 Pick value from the recent values buffer at index -1-e.
8741// 0 Empty (unused) entry.
8742// 1 .. kMaxRepeat Repeat previous entry e times.
8743// kIndexBase or higher Pick entry point from the object at index e-kIndexBase
8744// in the snapshot code cluster. Also put it in the recent
8745// values buffer at the next round-robin index.
8746
8747// Constants for serialization format. Chosen such that repeats and recent
8748// values are encoded as single bytes in SLEB128 encoding.
8749static constexpr intptr_t kDispatchTableSpecialEncodingBits = 6;
8750static constexpr intptr_t kDispatchTableRecentCount =
8751 1 << kDispatchTableSpecialEncodingBits;
8752static constexpr intptr_t kDispatchTableRecentMask =
8753 (1 << kDispatchTableSpecialEncodingBits) - 1;
8754static constexpr intptr_t kDispatchTableMaxRepeat =
8755 (1 << kDispatchTableSpecialEncodingBits) - 1;
8756static constexpr intptr_t kDispatchTableIndexBase = kDispatchTableMaxRepeat + 1;
8757#endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8758
8760#if defined(DART_PRECOMPILER)
8761 if (kind() != Snapshot::kFullAOT) return;
8762
8763 // Create an artificial node to which the bytes should be attributed. We
8764 // don't attribute them to entries.ptr(), as we don't want to attribute the
8765 // bytes for printing out a length of 0 to Object::null() when the dispatch
8766 // table is empty.
8767 const intptr_t profile_ref = AssignArtificialRef();
8768 const auto& dispatch_table_profile_id = GetProfileId(profile_ref);
8769 if (profile_writer_ != nullptr) {
8770 profile_writer_->SetObjectTypeAndName(dispatch_table_profile_id,
8771 "DispatchTable", "dispatch_table");
8772 profile_writer_->AddRoot(dispatch_table_profile_id);
8773 }
8774 WritingObjectScope scope(this, dispatch_table_profile_id);
8775 if (profile_writer_ != nullptr) {
8776 // We'll write the Array object as a property of the artificial dispatch
8777 // table node, so Code objects otherwise unreferenced will have it as an
8778 // ancestor.
8780 AttributePropertyRef(entries.ptr(), "<code entries>");
8781 }
8782
8783 const intptr_t bytes_before = bytes_written();
8784 const intptr_t table_length = entries.IsNull() ? 0 : entries.Length();
8785
8786 ASSERT(table_length <= compiler::target::kWordMax);
8787 WriteUnsigned(table_length);
8788 if (table_length == 0) {
8789 dispatch_table_size_ = bytes_written() - bytes_before;
8790 return;
8791 }
8792
8793 ASSERT(code_cluster_ != nullptr);
8794 // If instructions can be deduped, the code order table in the deserializer
8795 // may not contain all Code objects in the snapshot. Thus, we write the ID
8796 // for the first code object here so we can retrieve it during deserialization
8797 // and calculate the snapshot ID for Code objects from the cluster index.
8798 //
8799 // We could just use the snapshot reference ID of the Code object itself
8800 // instead of the cluster index and avoid this. However, since entries are
8801 // SLEB128 encoded, the size delta for serializing the first ID once is less
8802 // than the size delta of serializing the ID plus kIndexBase for each entry,
8803 // even when Code objects are allocated before all other non-base objects.
8804 //
8805 // We could also map Code objects to the first Code object in the cluster with
8806 // the same entry point and serialize that ID instead, but that loses
8807 // information about which Code object was originally referenced.
8808 WriteUnsigned(code_cluster_->first_ref());
8809
8810 CodePtr previous_code = nullptr;
8811 CodePtr recent[kDispatchTableRecentCount] = {nullptr};
8812 intptr_t recent_index = 0;
8813 intptr_t repeat_count = 0;
8814 for (intptr_t i = 0; i < table_length; i++) {
8815 auto const code = Code::RawCast(entries.At(i));
8816 // First, see if we're repeating the previous entry (invalid, recent, or
8817 // encoded).
8818 if (code == previous_code) {
8819 if (++repeat_count == kDispatchTableMaxRepeat) {
8820 Write(kDispatchTableMaxRepeat);
8821 repeat_count = 0;
8822 }
8823 continue;
8824 }
8825 // Emit any outstanding repeat count before handling the new code value.
8826 if (repeat_count > 0) {
8827 Write(repeat_count);
8828 repeat_count = 0;
8829 }
8830 previous_code = code;
8831 // The invalid entry can be repeated, but is never part of the recent list
8832 // since it already encodes to a single byte..
8833 if (code == Code::null()) {
8834 Write(0);
8835 continue;
8836 }
8837 // Check against the recent entries, and write an encoded reference to
8838 // the recent entry if found.
8839 intptr_t found_index = 0;
8840 for (; found_index < kDispatchTableRecentCount; found_index++) {
8841 if (recent[found_index] == code) break;
8842 }
8843 if (found_index < kDispatchTableRecentCount) {
8844 Write(~found_index);
8845 continue;
8846 }
8847 // We have a non-repeated, non-recent entry, so encode the reference ID of
8848 // the code object and emit that.
8849 auto const code_index = GetCodeIndex(code);
8850 // Use the index in the code cluster, not in the snapshot..
8851 auto const encoded = kDispatchTableIndexBase + code_index;
8853 Write(encoded);
8854 recent[recent_index] = code;
8855 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
8856 }
8857 if (repeat_count > 0) {
8858 Write(repeat_count);
8859 }
8860 dispatch_table_size_ = bytes_written() - bytes_before;
8861#endif // defined(DART_PRECOMPILER)
8862}
8863
8865#if !defined(DART_PRECOMPILED_RUNTIME)
8866 if (FLAG_print_snapshot_sizes_verbose) {
8867 TextBuffer buffer(1024);
8868 // Header, using format sizes matching those below to ensure alignment.
8869 buffer.Printf("%25s", "Cluster");
8870 buffer.Printf(" %6s", "Objs");
8871 buffer.Printf(" %8s", "Size");
8872 buffer.Printf(" %8s", "Fraction");
8873 buffer.Printf(" %10s", "Cumulative");
8874 buffer.Printf(" %8s", "HeapSize");
8875 buffer.Printf(" %5s", "Cid");
8876 buffer.Printf(" %9s", "Canonical");
8877 buffer.AddString("\n");
8878 GrowableArray<SerializationCluster*> clusters_by_size;
8879 for (intptr_t cid = 1; cid < num_cids_; cid++) {
8880 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8881 clusters_by_size.Add(cluster);
8882 }
8883 if (auto const cluster = clusters_by_cid_[cid]) {
8884 clusters_by_size.Add(cluster);
8885 }
8886 }
8887 intptr_t text_size = 0;
8888 if (image_writer_ != nullptr) {
8889 auto const text_object_count = image_writer_->GetTextObjectCount();
8890 text_size = image_writer_->text_size();
8891 intptr_t trampoline_count, trampoline_size;
8892 image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size);
8893 auto const instructions_count = text_object_count - trampoline_count;
8894 auto const instructions_size = text_size - trampoline_size;
8895 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8896 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions"),
8897 instructions_count, instructions_size));
8898 if (trampoline_size > 0) {
8899 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8900 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline"),
8901 trampoline_count, trampoline_size));
8902 }
8903 }
8904 // The dispatch_table_size_ will be 0 if the snapshot did not include a
8905 // dispatch table (i.e., the VM snapshot). For a precompiled isolate
8906 // snapshot, we always serialize at least _one_ byte for the DispatchTable.
8907 if (dispatch_table_size_ > 0) {
8908 const auto& dispatch_table_entries = Array::Handle(
8909 zone_,
8910 isolate_group()->object_store()->dispatch_table_code_entries());
8911 auto const entry_count =
8912 dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length();
8913 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8914 "DispatchTable", entry_count, dispatch_table_size_));
8915 }
8916 if (instructions_table_len_ > 0) {
8917 const intptr_t memory_size =
8919 compiler::target::Array::InstanceSize(instructions_table_len_);
8920 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8921 "InstructionsTable", instructions_table_len_, 0, memory_size));
8922 }
8923 clusters_by_size.Sort(CompareClusters);
8924 double total_size =
8925 static_cast<double>(bytes_written() + GetDataSize() + text_size);
8926 double cumulative_fraction = 0.0;
8927 for (intptr_t i = 0; i < clusters_by_size.length(); i++) {
8928 SerializationCluster* cluster = clusters_by_size[i];
8929 double fraction = static_cast<double>(cluster->size()) / total_size;
8930 cumulative_fraction += fraction;
8931 buffer.Printf("%25s", cluster->name());
8932 buffer.Printf(" %6" Pd "", cluster->num_objects());
8933 buffer.Printf(" %8" Pd "", cluster->size());
8934 buffer.Printf(" %1.6lf", fraction);
8935 buffer.Printf(" %1.8lf", cumulative_fraction);
8936 buffer.Printf(" %8" Pd "", cluster->target_memory_size());
8937 if (cluster->cid() != -1) {
8938 buffer.Printf(" %5" Pd "", cluster->cid());
8939 } else {
8940 buffer.Printf(" %5s", "");
8941 }
8942 if (cluster->is_canonical()) {
8943 buffer.Printf(" %9s", "canonical");
8944 } else {
8945 buffer.Printf(" %9s", "");
8946 }
8947 buffer.AddString("\n");
8948 }
8949 OS::PrintErr("%s", buffer.buffer());
8950 }
8951#endif // !defined(DART_PRECOMPILED_RUNTIME)
8952}
8953
8956 const uint8_t* buffer,
8957 intptr_t size,
8958 const uint8_t* data_buffer,
8959 const uint8_t* instructions_buffer,
8960 bool is_non_root_unit,
8961 intptr_t offset)
8963 heap_(thread->isolate_group()->heap()),
8964 old_space_(heap_->old_space()),
8965 freelist_(old_space_->DataFreeList()),
8966 zone_(thread->zone()),
8967 kind_(kind),
8968 stream_(buffer, size),
8969 image_reader_(nullptr),
8970 refs_(nullptr),
8971 next_ref_index_(kFirstReference),
8972 clusters_(nullptr),
8973 is_non_root_unit_(is_non_root_unit),
8974 instructions_table_(InstructionsTable::Handle(thread->zone())) {
8976 ASSERT(instructions_buffer != nullptr);
8977 ASSERT(data_buffer != nullptr);
8978 image_reader_ = new (zone_) ImageReader(data_buffer, instructions_buffer);
8979 }
8980 stream_.SetPosition(offset);
8981}
8982
8984 delete[] clusters_;
8985}
8986
8988 const uint32_t tags = Read<uint32_t>();
8989 const intptr_t cid = UntaggedObject::ClassIdTag::decode(tags);
8990 const bool is_canonical = UntaggedObject::CanonicalBit::decode(tags);
8991 const bool is_immutable = UntaggedObject::ImmutableBit::decode(tags);
8992 Zone* Z = zone_;
8993 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
8994 return new (Z) InstanceDeserializationCluster(
8995 cid, is_canonical, is_immutable, !is_non_root_unit_);
8996 }
8998 ASSERT(!is_canonical);
9000 }
9002 ASSERT(!is_canonical);
9004 }
9005 if (IsTypedDataClassId(cid)) {
9006 ASSERT(!is_canonical);
9008 }
9009
9010#if !defined(DART_COMPRESSED_POINTERS)
9011 if (Snapshot::IncludesCode(kind_)) {
9012 switch (cid) {
9013 case kPcDescriptorsCid:
9014 case kCodeSourceMapCid:
9015 case kCompressedStackMapsCid:
9016 return new (Z)
9017 RODataDeserializationCluster(cid, is_canonical, !is_non_root_unit_);
9018 case kOneByteStringCid:
9019 case kTwoByteStringCid:
9020 case kStringCid:
9021 if (!is_non_root_unit_) {
9022 return new (Z) RODataDeserializationCluster(cid, is_canonical,
9023 !is_non_root_unit_);
9024 }
9025 break;
9026 }
9027 }
9028#endif
9029
9030 switch (cid) {
9031 case kClassCid:
9032 ASSERT(!is_canonical);
9033 return new (Z) ClassDeserializationCluster();
9034 case kTypeParametersCid:
9036 case kTypeArgumentsCid:
9037 return new (Z)
9038 TypeArgumentsDeserializationCluster(is_canonical, !is_non_root_unit_);
9039 case kPatchClassCid:
9040 ASSERT(!is_canonical);
9041 return new (Z) PatchClassDeserializationCluster();
9042 case kFunctionCid:
9043 ASSERT(!is_canonical);
9044 return new (Z) FunctionDeserializationCluster();
9045 case kClosureDataCid:
9046 ASSERT(!is_canonical);
9047 return new (Z) ClosureDataDeserializationCluster();
9048 case kFfiTrampolineDataCid:
9049 ASSERT(!is_canonical);
9051 case kFieldCid:
9052 ASSERT(!is_canonical);
9053 return new (Z) FieldDeserializationCluster();
9054 case kScriptCid:
9055 ASSERT(!is_canonical);
9056 return new (Z) ScriptDeserializationCluster();
9057 case kLibraryCid:
9058 ASSERT(!is_canonical);
9059 return new (Z) LibraryDeserializationCluster();
9060 case kNamespaceCid:
9061 ASSERT(!is_canonical);
9062 return new (Z) NamespaceDeserializationCluster();
9063#if !defined(DART_PRECOMPILED_RUNTIME)
9064 case kKernelProgramInfoCid:
9065 ASSERT(!is_canonical);
9067#endif // !DART_PRECOMPILED_RUNTIME
9068 case kCodeCid:
9069 ASSERT(!is_canonical);
9070 return new (Z) CodeDeserializationCluster();
9071 case kObjectPoolCid:
9072 ASSERT(!is_canonical);
9073 return new (Z) ObjectPoolDeserializationCluster();
9074 case kPcDescriptorsCid:
9075 ASSERT(!is_canonical);
9077 case kCodeSourceMapCid:
9078 ASSERT(!is_canonical);
9080 case kCompressedStackMapsCid:
9081 ASSERT(!is_canonical);
9083 case kExceptionHandlersCid:
9084 ASSERT(!is_canonical);
9086 case kContextCid:
9087 ASSERT(!is_canonical);
9088 return new (Z) ContextDeserializationCluster();
9089 case kContextScopeCid:
9090 ASSERT(!is_canonical);
9092 case kUnlinkedCallCid:
9093 ASSERT(!is_canonical);
9095 case kICDataCid:
9096 ASSERT(!is_canonical);
9097 return new (Z) ICDataDeserializationCluster();
9098 case kMegamorphicCacheCid:
9099 ASSERT(!is_canonical);
9101 case kSubtypeTestCacheCid:
9102 ASSERT(!is_canonical);
9104 case kLoadingUnitCid:
9105 ASSERT(!is_canonical);
9106 return new (Z) LoadingUnitDeserializationCluster();
9107 case kLanguageErrorCid:
9108 ASSERT(!is_canonical);
9110 case kUnhandledExceptionCid:
9111 ASSERT(!is_canonical);
9113 case kLibraryPrefixCid:
9114 ASSERT(!is_canonical);
9116 case kTypeCid:
9117 return new (Z)
9118 TypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9119 case kFunctionTypeCid:
9120 return new (Z)
9121 FunctionTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9122 case kRecordTypeCid:
9123 return new (Z)
9124 RecordTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9125 case kTypeParameterCid:
9126 return new (Z)
9127 TypeParameterDeserializationCluster(is_canonical, !is_non_root_unit_);
9128 case kClosureCid:
9129 return new (Z)
9130 ClosureDeserializationCluster(is_canonical, !is_non_root_unit_);
9131 case kMintCid:
9132 return new (Z)
9133 MintDeserializationCluster(is_canonical, !is_non_root_unit_);
9134 case kDoubleCid:
9135 return new (Z)
9136 DoubleDeserializationCluster(is_canonical, !is_non_root_unit_);
9137 case kInt32x4Cid:
9138 case kFloat32x4Cid:
9139 case kFloat64x2Cid:
9140 return new (Z)
9141 Simd128DeserializationCluster(cid, is_canonical, !is_non_root_unit_);
9142 case kGrowableObjectArrayCid:
9143 ASSERT(!is_canonical);
9145 case kRecordCid:
9146 return new (Z)
9147 RecordDeserializationCluster(is_canonical, !is_non_root_unit_);
9148 case kStackTraceCid:
9149 ASSERT(!is_canonical);
9150 return new (Z) StackTraceDeserializationCluster();
9151 case kRegExpCid:
9152 ASSERT(!is_canonical);
9153 return new (Z) RegExpDeserializationCluster();
9154 case kWeakPropertyCid:
9155 ASSERT(!is_canonical);
9157 case kMapCid:
9158 // We do not have mutable hash maps in snapshots.
9159 UNREACHABLE();
9160 case kConstMapCid:
9161 return new (Z) MapDeserializationCluster(kConstMapCid, is_canonical,
9162 !is_non_root_unit_);
9163 case kSetCid:
9164 // We do not have mutable hash sets in snapshots.
9165 UNREACHABLE();
9166 case kConstSetCid:
9167 return new (Z) SetDeserializationCluster(kConstSetCid, is_canonical,
9168 !is_non_root_unit_);
9169 case kArrayCid:
9170 return new (Z) ArrayDeserializationCluster(kArrayCid, is_canonical,
9171 !is_non_root_unit_);
9172 case kImmutableArrayCid:
9173 return new (Z) ArrayDeserializationCluster(
9174 kImmutableArrayCid, is_canonical, !is_non_root_unit_);
9175 case kWeakArrayCid:
9176 return new (Z) WeakArrayDeserializationCluster();
9177 case kStringCid:
9178 return new (Z) StringDeserializationCluster(
9179 is_canonical,
9180 !is_non_root_unit_ && isolate_group() != Dart::vm_isolate_group());
9181#define CASE_FFI_CID(name) case kFfi##name##Cid:
9183#undef CASE_FFI_CID
9184 return new (Z) InstanceDeserializationCluster(
9185 cid, is_canonical, is_immutable, !is_non_root_unit_);
9186 case kDeltaEncodedTypedDataCid:
9188 default:
9189 break;
9190 }
9191 FATAL("No cluster defined for cid %" Pd, cid);
9192 return nullptr;
9193}
9194
9197 bool deferred,
9198 const InstructionsTable& root_instruction_table,
9199 intptr_t deferred_code_start_index,
9200 intptr_t deferred_code_end_index) {
9201#if defined(DART_PRECOMPILED_RUNTIME)
9202 const uint8_t* table_snapshot_start = stream->AddressOfCurrentPosition();
9203 const intptr_t length = stream->ReadUnsigned();
9204 if (length == 0) return;
9205
9206 const intptr_t first_code_id = stream->ReadUnsigned();
9207 deferred_code_start_index -= first_code_id;
9208 deferred_code_end_index -= first_code_id;
9209
9210 auto const IG = isolate_group();
9211 auto code = IG->object_store()->dispatch_table_null_error_stub();
9212 ASSERT(code != Code::null());
9213 uword null_entry = Code::EntryPointOf(code);
9214
9216 if (deferred) {
9217 table = IG->dispatch_table();
9218 ASSERT(table != nullptr && table->length() == length);
9219 } else {
9220 ASSERT(IG->dispatch_table() == nullptr);
9221 table = new DispatchTable(length);
9222 }
9223 auto const array = table->array();
9224 uword value = 0;
9225 uword recent[kDispatchTableRecentCount] = {0};
9226 intptr_t recent_index = 0;
9227 intptr_t repeat_count = 0;
9228 for (intptr_t i = 0; i < length; i++) {
9229 if (repeat_count > 0) {
9230 array[i] = value;
9231 repeat_count--;
9232 continue;
9233 }
9234 auto const encoded = stream->Read<intptr_t>();
9235 if (encoded == 0) {
9236 value = null_entry;
9237 } else if (encoded < 0) {
9238 intptr_t r = ~encoded;
9239 ASSERT(r < kDispatchTableRecentCount);
9240 value = recent[r];
9241 } else if (encoded <= kDispatchTableMaxRepeat) {
9242 repeat_count = encoded - 1;
9243 } else {
9244 const intptr_t code_index = encoded - kDispatchTableIndexBase;
9245 if (deferred) {
9246 const intptr_t code_id =
9247 CodeIndexToClusterIndex(root_instruction_table, code_index);
9248 if ((deferred_code_start_index <= code_id) &&
9249 (code_id < deferred_code_end_index)) {
9250 auto code = static_cast<CodePtr>(Ref(first_code_id + code_id));
9252 } else {
9253 // Reuse old value from the dispatch table.
9254 value = array[i];
9255 }
9256 } else {
9257 value = GetEntryPointByCodeIndex(code_index);
9258 }
9259 recent[recent_index] = value;
9260 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
9261 }
9262 array[i] = value;
9263 }
9264 ASSERT(repeat_count == 0);
9265
9266 if (!deferred) {
9267 IG->set_dispatch_table(table);
9268 intptr_t table_snapshot_size =
9269 stream->AddressOfCurrentPosition() - table_snapshot_start;
9270 IG->set_dispatch_table_snapshot(table_snapshot_start);
9271 IG->set_dispatch_table_snapshot_size(table_snapshot_size);
9272 }
9273#endif
9274}
9275
9277 if (image_reader_ != nullptr) {
9278 return image_reader_->VerifyAlignment();
9279 }
9280 return ApiError::null();
9281}
9282
9284 IsolateGroup* isolate_group) {
9285 auto prev_position = stream_.Position();
9286 char* error = VerifyVersion();
9287 if (error == nullptr) {
9288 const char* features = nullptr;
9289 intptr_t features_length = 0;
9290 char* error = ReadFeatures(&features, &features_length);
9291 if (error == nullptr) {
9292 if (strstr(features, " no-coverage") != nullptr) {
9293 isolate_group->set_coverage(false);
9294 } else if (strstr(features, " coverage") != nullptr) {
9295 isolate_group->set_coverage(true);
9296 }
9297 }
9298 }
9299
9300 stream_.SetPosition(prev_position);
9301}
9302
9304 IsolateGroup* isolate_group,
9305 intptr_t* offset) {
9306 char* error = VerifyVersion();
9307 if (error == nullptr) {
9308 error = VerifyFeatures(isolate_group);
9309 }
9310 if (error == nullptr) {
9311 *offset = stream_.Position();
9312 }
9313 return error;
9314}
9315
9316char* SnapshotHeaderReader::VerifyVersion() {
9317 // If the version string doesn't match, return an error.
9318 // Note: New things are allocated only if we're going to return an error.
9319
9320 const char* expected_version = Version::SnapshotString();
9321 ASSERT(expected_version != nullptr);
9322 const intptr_t version_len = strlen(expected_version);
9323 if (stream_.PendingBytes() < version_len) {
9324 const intptr_t kMessageBufferSize = 128;
9325 char message_buffer[kMessageBufferSize];
9326 Utils::SNPrint(message_buffer, kMessageBufferSize,
9327 "No full snapshot version found, expected '%s'",
9328 expected_version);
9329 return BuildError(message_buffer);
9330 }
9331
9332 const char* version =
9333 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9334 ASSERT(version != nullptr);
9335 if (strncmp(version, expected_version, version_len) != 0) {
9336 const intptr_t kMessageBufferSize = 256;
9337 char message_buffer[kMessageBufferSize];
9338 char* actual_version = Utils::StrNDup(version, version_len);
9339 Utils::SNPrint(message_buffer, kMessageBufferSize,
9340 "Wrong %s snapshot version, expected '%s' found '%s'",
9341 (Snapshot::IsFull(kind_)) ? "full" : "script",
9342 expected_version, actual_version);
9343 free(actual_version);
9344 return BuildError(message_buffer);
9345 }
9346 stream_.Advance(version_len);
9347
9348 return nullptr;
9349}
9350
9351char* SnapshotHeaderReader::VerifyFeatures(IsolateGroup* isolate_group) {
9352 const char* expected_features =
9353 Dart::FeaturesString(isolate_group, (isolate_group == nullptr), kind_);
9354 ASSERT(expected_features != nullptr);
9355 const intptr_t expected_len = strlen(expected_features);
9356
9357 const char* features = nullptr;
9358 intptr_t features_length = 0;
9359
9360 auto error = ReadFeatures(&features, &features_length);
9361 if (error != nullptr) {
9362 return error;
9363 }
9364
9365 if (features_length != expected_len ||
9366 (strncmp(features, expected_features, expected_len) != 0)) {
9367 const intptr_t kMessageBufferSize = 1024;
9368 char message_buffer[kMessageBufferSize];
9369 char* actual_features = Utils::StrNDup(
9370 features, features_length < 1024 ? features_length : 1024);
9371 Utils::SNPrint(message_buffer, kMessageBufferSize,
9372 "Snapshot not compatible with the current VM configuration: "
9373 "the snapshot requires '%s' but the VM has '%s'",
9374 actual_features, expected_features);
9375 free(const_cast<char*>(expected_features));
9376 free(actual_features);
9377 return BuildError(message_buffer);
9378 }
9379 free(const_cast<char*>(expected_features));
9380 return nullptr;
9381}
9382
9383char* SnapshotHeaderReader::ReadFeatures(const char** features,
9384 intptr_t* features_length) {
9385 const char* cursor =
9386 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9387 const intptr_t length = Utils::StrNLen(cursor, stream_.PendingBytes());
9388 if (length == stream_.PendingBytes()) {
9389 return BuildError(
9390 "The features string in the snapshot was not '\\0'-terminated.");
9391 }
9392 *features = cursor;
9393 *features_length = length;
9394 stream_.Advance(length + 1);
9395 return nullptr;
9396}
9397
9398char* SnapshotHeaderReader::BuildError(const char* message) {
9399 return Utils::StrDup(message);
9400}
9401
9402ApiErrorPtr FullSnapshotReader::ConvertToApiError(char* message) {
9403 // This can also fail while bringing up the VM isolate, so make sure to
9404 // allocate the error message in old space.
9405 const String& msg = String::Handle(String::New(message, Heap::kOld));
9406
9407 // The [message] was constructed with [BuildError] and needs to be freed.
9408 free(message);
9409
9410 return ApiError::New(msg, Heap::kOld);
9411}
9412
9413void Deserializer::ReadInstructions(CodePtr code, bool deferred) {
9414#if defined(DART_PRECOMPILED_RUNTIME)
9415 if (deferred) {
9416 uword entry_point = StubCode::NotLoaded().EntryPoint();
9417 code->untag()->entry_point_ = entry_point;
9418 code->untag()->unchecked_entry_point_ = entry_point;
9419 code->untag()->monomorphic_entry_point_ = entry_point;
9420 code->untag()->monomorphic_unchecked_entry_point_ = entry_point;
9421 code->untag()->instructions_length_ = 0;
9422 return;
9423 }
9424
9425 const uword payload_start = instructions_table_.EntryPointAt(
9426 instructions_table_.rodata()->first_entry_with_code +
9427 instructions_index_);
9428 const uint32_t payload_info = ReadUnsigned();
9429 const uint32_t unchecked_offset = payload_info >> 1;
9430 const bool has_monomorphic_entrypoint = (payload_info & 0x1) == 0x1;
9431
9432 const uword entry_offset =
9433 has_monomorphic_entrypoint ? Instructions::kPolymorphicEntryOffsetAOT : 0;
9434 const uword monomorphic_entry_offset =
9435 has_monomorphic_entrypoint ? Instructions::kMonomorphicEntryOffsetAOT : 0;
9436
9437 const uword entry_point = payload_start + entry_offset;
9438 const uword monomorphic_entry_point =
9439 payload_start + monomorphic_entry_offset;
9440
9441 instructions_table_.SetCodeAt(instructions_index_++, code);
9442
9443 // There are no serialized RawInstructions objects in this mode.
9444 code->untag()->instructions_ = Instructions::null();
9445 code->untag()->entry_point_ = entry_point;
9446 code->untag()->unchecked_entry_point_ = entry_point + unchecked_offset;
9447 code->untag()->monomorphic_entry_point_ = monomorphic_entry_point;
9448 code->untag()->monomorphic_unchecked_entry_point_ =
9449 monomorphic_entry_point + unchecked_offset;
9450#else
9451 ASSERT(!deferred);
9452 InstructionsPtr instr = image_reader_->GetInstructionsAt(Read<uint32_t>());
9453 uint32_t unchecked_offset = ReadUnsigned();
9454 code->untag()->instructions_ = instr;
9455 code->untag()->unchecked_offset_ = unchecked_offset;
9457 const uint32_t active_offset = Read<uint32_t>();
9458 instr = image_reader_->GetInstructionsAt(active_offset);
9459 unchecked_offset = ReadUnsigned();
9460 code->untag()->active_instructions_ = instr;
9461 Code::InitializeCachedEntryPointsFrom(code, instr, unchecked_offset);
9462#endif // defined(DART_PRECOMPILED_RUNTIME)
9463}
9464
9466#if defined(DART_PRECOMPILED_RUNTIME)
9467 if (instructions_table_.IsNull()) {
9468 ASSERT(instructions_index_ == 0);
9469 return;
9470 }
9471
9472 const auto& code_objects =
9473 Array::Handle(instructions_table_.ptr()->untag()->code_objects());
9474 ASSERT(code_objects.Length() == instructions_index_);
9475
9476 uword previous_end = image_reader_->GetBareInstructionsEnd();
9477 for (intptr_t i = instructions_index_ - 1; i >= 0; --i) {
9478 CodePtr code = Code::RawCast(code_objects.At(i));
9480 ASSERT(start <= previous_end);
9481 code->untag()->instructions_length_ = previous_end - start;
9482 previous_end = start;
9483 }
9484
9485 ObjectStore* object_store = IsolateGroup::Current()->object_store();
9486 GrowableObjectArray& tables =
9487 GrowableObjectArray::Handle(zone_, object_store->instructions_tables());
9488 if (tables.IsNull()) {
9490 object_store->set_instructions_tables(tables);
9491 }
9492 if ((tables.Length() == 0) ||
9493 (tables.At(tables.Length() - 1) != instructions_table_.ptr())) {
9494 ASSERT((!is_non_root_unit_ && tables.Length() == 0) ||
9495 (is_non_root_unit_ && tables.Length() > 0));
9496 tables.Add(instructions_table_, Heap::kOld);
9497 }
9498#endif
9499}
9500
9502 return image_reader_->GetObjectAt(offset);
9503}
9504
9506 public:
9509 page_space_(page_space),
9510 freelist_(page_space->DataFreeList()) {
9511 page_space_->AcquireLock(freelist_);
9512 }
9513 ~HeapLocker() { page_space_->ReleaseLock(freelist_); }
9514
9515 private:
9516 PageSpace* page_space_;
9517 FreeList* freelist_;
9518};
9519
9521 const void* clustered_start = AddressOfCurrentPosition();
9522
9523 Array& refs = Array::Handle(zone_);
9524 num_base_objects_ = ReadUnsigned();
9525 num_objects_ = ReadUnsigned();
9526 num_clusters_ = ReadUnsigned();
9527 const intptr_t instructions_table_len = ReadUnsigned();
9528 const uint32_t instruction_table_data_offset = ReadUnsigned();
9529 USE(instruction_table_data_offset);
9530
9531 clusters_ = new DeserializationCluster*[num_clusters_];
9532 refs = Array::New(num_objects_ + kFirstReference, Heap::kOld);
9533
9534#if defined(DART_PRECOMPILED_RUNTIME)
9535 if (instructions_table_len > 0) {
9536 ASSERT(FLAG_precompiled_mode);
9537 const uword start_pc = image_reader_->GetBareInstructionsAt(0);
9538 const uword end_pc = image_reader_->GetBareInstructionsEnd();
9539 uword instruction_table_data = 0;
9540 if (instruction_table_data_offset != 0) {
9541 // NoSafepointScope to satisfy assertion in DataStart. InstructionsTable
9542 // data resides in RO memory and is immovable and immortal making it
9543 // safe to use DataStart result outside of NoSafepointScope.
9544 NoSafepointScope no_safepoint;
9545 instruction_table_data = reinterpret_cast<uword>(
9546 OneByteString::DataStart(String::Handle(static_cast<StringPtr>(
9547 image_reader_->GetObjectAt(instruction_table_data_offset)))));
9548 }
9549 instructions_table_ = InstructionsTable::New(
9550 instructions_table_len, start_pc, end_pc, instruction_table_data);
9551 }
9552#else
9553 ASSERT(instructions_table_len == 0);
9554#endif // defined(DART_PRECOMPILED_RUNTIME)
9555
9556 {
9557 // The deserializer initializes objects without using the write barrier,
9558 // partly for speed since we know all the deserialized objects will be
9559 // long-lived and partly because the target objects can be not yet
9560 // initialized at the time of the write. To make this safe, we must ensure
9561 // there are no other threads mutating this heap, and that incremental
9562 // marking is not in progress. This is normally the case anyway for the
9563 // main snapshot being deserialized at isolate load, but needs checks for
9564 // loading secondary snapshots are part of deferred loading.
9565 HeapIterationScope iter(thread());
9566 // For bump-pointer allocation in old-space.
9567 HeapLocker hl(thread(), heap_->old_space());
9568 // Must not perform any other type of allocation, which might trigger GC
9569 // while there are still uninitialized objects.
9570 NoSafepointScope no_safepoint;
9571 refs_ = refs.ptr();
9572
9573 roots->AddBaseObjects(this);
9574
9575 if (num_base_objects_ != (next_ref_index_ - kFirstReference)) {
9576 FATAL("Snapshot expects %" Pd
9577 " base objects, but deserializer provided %" Pd,
9578 num_base_objects_, next_ref_index_ - kFirstReference);
9579 }
9580
9581 {
9582 TIMELINE_DURATION(thread(), Isolate, "ReadAlloc");
9583 for (intptr_t i = 0; i < num_clusters_; i++) {
9584 clusters_[i] = ReadCluster();
9585 clusters_[i]->ReadAlloc(this);
9586#if defined(DEBUG)
9587 intptr_t serializers_next_ref_index_ = Read<int32_t>();
9588 ASSERT_EQUAL(serializers_next_ref_index_, next_ref_index_);
9589#endif
9590 }
9591 }
9592
9593 // We should have completely filled the ref array.
9594 ASSERT_EQUAL(next_ref_index_ - kFirstReference, num_objects_);
9595
9596 {
9597 TIMELINE_DURATION(thread(), Isolate, "ReadFill");
9598 for (intptr_t i = 0; i < num_clusters_; i++) {
9599 clusters_[i]->ReadFill(this);
9600#if defined(DEBUG)
9601 int32_t section_marker = Read<int32_t>();
9602 ASSERT(section_marker == kSectionMarker);
9603#endif
9604 }
9605 }
9606
9607 roots->ReadRoots(this);
9608
9609#if defined(DEBUG)
9610 int32_t section_marker = Read<int32_t>();
9611 ASSERT(section_marker == kSectionMarker);
9612#endif
9613
9614 refs_ = nullptr;
9615 }
9616
9617 roots->PostLoad(this, refs);
9618
9620#if defined(DEBUG)
9621 isolate_group->ValidateClassTable();
9622 if (isolate_group != Dart::vm_isolate()->group()) {
9623 isolate_group->heap()->Verify("Deserializer::Deserialize");
9624 }
9625#endif
9626
9627 {
9628 TIMELINE_DURATION(thread(), Isolate, "PostLoad");
9629 for (intptr_t i = 0; i < num_clusters_; i++) {
9630 clusters_[i]->PostLoad(this, refs);
9631 }
9632 }
9633
9634 if (isolate_group->snapshot_is_dontneed_safe()) {
9635 size_t clustered_length =
9636 reinterpret_cast<uword>(AddressOfCurrentPosition()) -
9637 reinterpret_cast<uword>(clustered_start);
9638 VirtualMemory::DontNeed(const_cast<void*>(clustered_start),
9639 clustered_length);
9640 }
9641}
9642
9643#if !defined(DART_PRECOMPILED_RUNTIME)
9645 Snapshot::Kind kind,
9648 ImageWriter* vm_image_writer,
9649 ImageWriter* isolate_image_writer)
9650 : thread_(Thread::Current()),
9651 kind_(kind),
9652 vm_snapshot_data_(vm_snapshot_data),
9653 isolate_snapshot_data_(isolate_snapshot_data),
9654 vm_isolate_snapshot_size_(0),
9655 isolate_snapshot_size_(0),
9656 vm_image_writer_(vm_image_writer),
9657 isolate_image_writer_(isolate_image_writer) {
9658 ASSERT(isolate_group() != nullptr);
9659 ASSERT(heap() != nullptr);
9660 ObjectStore* object_store = isolate_group()->object_store();
9661 ASSERT(object_store != nullptr);
9662
9663#if defined(DEBUG)
9664 isolate_group()->ValidateClassTable();
9665#endif // DEBUG
9666
9667#if defined(DART_PRECOMPILER)
9668 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9669 profile_writer_ = new (zone()) V8SnapshotProfileWriter(zone());
9670 }
9671#endif
9672}
9673
9675
9676ZoneGrowableArray<Object*>* FullSnapshotWriter::WriteVMSnapshot() {
9677 TIMELINE_DURATION(thread(), Isolate, "WriteVMSnapshot");
9678
9679 ASSERT(vm_snapshot_data_ != nullptr);
9680 Serializer serializer(thread(), kind_, vm_snapshot_data_, vm_image_writer_,
9681 /*vm=*/true, profile_writer_);
9682
9683 serializer.ReserveHeader();
9684 serializer.WriteVersionAndFeatures(true);
9687 Dart::vm_isolate_group()->object_store()->symbol_table()),
9688 /*should_write_symbols=*/!Snapshot::IncludesStringsInROData(kind_));
9689 ZoneGrowableArray<Object*>* objects = serializer.Serialize(&roots);
9690 serializer.FillHeader(serializer.kind());
9691 clustered_vm_size_ = serializer.bytes_written();
9692 heap_vm_size_ = serializer.bytes_heap_allocated();
9693
9694 if (Snapshot::IncludesCode(kind_)) {
9695 vm_image_writer_->SetProfileWriter(profile_writer_);
9696 vm_image_writer_->Write(serializer.stream(), true);
9697 mapped_data_size_ += vm_image_writer_->data_size();
9698 mapped_text_size_ += vm_image_writer_->text_size();
9699 vm_image_writer_->ResetOffsets();
9700 vm_image_writer_->ClearProfileWriter();
9701 }
9702
9703 // The clustered part + the direct mapped data part.
9704 vm_isolate_snapshot_size_ = serializer.bytes_written();
9705 return objects;
9706}
9707
9708void FullSnapshotWriter::WriteProgramSnapshot(
9709 ZoneGrowableArray<Object*>* objects,
9710 GrowableArray<LoadingUnitSerializationData*>* units) {
9711 TIMELINE_DURATION(thread(), Isolate, "WriteProgramSnapshot");
9712
9713 ASSERT(isolate_snapshot_data_ != nullptr);
9714 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9715 isolate_image_writer_, /*vm=*/false, profile_writer_);
9716 serializer.set_loading_units(units);
9717 serializer.set_current_loading_unit_id(LoadingUnit::kRootId);
9718 ObjectStore* object_store = isolate_group()->object_store();
9719 ASSERT(object_store != nullptr);
9720
9721 // These type arguments must always be retained.
9722 ASSERT(object_store->type_argument_int()->untag()->IsCanonical());
9723 ASSERT(object_store->type_argument_double()->untag()->IsCanonical());
9724 ASSERT(object_store->type_argument_string()->untag()->IsCanonical());
9725 ASSERT(object_store->type_argument_string_dynamic()->untag()->IsCanonical());
9726 ASSERT(object_store->type_argument_string_string()->untag()->IsCanonical());
9727
9728 serializer.ReserveHeader();
9729 serializer.WriteVersionAndFeatures(false);
9730 ProgramSerializationRoots roots(objects, object_store, kind_);
9731 objects = serializer.Serialize(&roots);
9732 if (units != nullptr) {
9733 (*units)[LoadingUnit::kRootId]->set_objects(objects);
9734 }
9735 serializer.FillHeader(serializer.kind());
9736 clustered_isolate_size_ = serializer.bytes_written();
9737 heap_isolate_size_ = serializer.bytes_heap_allocated();
9738
9739 if (Snapshot::IncludesCode(kind_)) {
9740 isolate_image_writer_->SetProfileWriter(profile_writer_);
9741 isolate_image_writer_->Write(serializer.stream(), false);
9742#if defined(DART_PRECOMPILER)
9743 isolate_image_writer_->DumpStatistics();
9744#endif
9745
9746 mapped_data_size_ += isolate_image_writer_->data_size();
9747 mapped_text_size_ += isolate_image_writer_->text_size();
9748 isolate_image_writer_->ResetOffsets();
9749 isolate_image_writer_->ClearProfileWriter();
9750 }
9751
9752 // The clustered part + the direct mapped data part.
9753 isolate_snapshot_size_ = serializer.bytes_written();
9754}
9755
9759 uint32_t program_hash) {
9760 TIMELINE_DURATION(thread(), Isolate, "WriteUnitSnapshot");
9761
9762 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9763 isolate_image_writer_, /*vm=*/false, profile_writer_);
9764 serializer.set_loading_units(units);
9765 serializer.set_current_loading_unit_id(unit->id());
9766
9767 serializer.ReserveHeader();
9768 serializer.WriteVersionAndFeatures(false);
9769 serializer.Write(program_hash);
9770
9772 unit->set_objects(serializer.Serialize(&roots));
9773
9774 serializer.FillHeader(serializer.kind());
9775 clustered_isolate_size_ = serializer.bytes_written();
9776
9777 if (Snapshot::IncludesCode(kind_)) {
9778 isolate_image_writer_->SetProfileWriter(profile_writer_);
9779 isolate_image_writer_->Write(serializer.stream(), false);
9780#if defined(DART_PRECOMPILER)
9781 isolate_image_writer_->DumpStatistics();
9782#endif
9783
9784 mapped_data_size_ += isolate_image_writer_->data_size();
9785 mapped_text_size_ += isolate_image_writer_->text_size();
9786 isolate_image_writer_->ResetOffsets();
9787 isolate_image_writer_->ClearProfileWriter();
9788 }
9789
9790 // The clustered part + the direct mapped data part.
9791 isolate_snapshot_size_ = serializer.bytes_written();
9792}
9793
9797 if (vm_snapshot_data_ != nullptr) {
9798 objects = WriteVMSnapshot();
9799 } else {
9800 objects = nullptr;
9801 }
9802
9803 if (isolate_snapshot_data_ != nullptr) {
9804 WriteProgramSnapshot(objects, data);
9805 }
9806
9807 if (FLAG_print_snapshot_sizes) {
9808 OS::Print("VMIsolate(CodeSize): %" Pd "\n", clustered_vm_size_);
9809 OS::Print("Isolate(CodeSize): %" Pd "\n", clustered_isolate_size_);
9810 OS::Print("ReadOnlyData(CodeSize): %" Pd "\n", mapped_data_size_);
9811 OS::Print("Instructions(CodeSize): %" Pd "\n", mapped_text_size_);
9812 OS::Print("Total(CodeSize): %" Pd "\n",
9813 clustered_vm_size_ + clustered_isolate_size_ + mapped_data_size_ +
9814 mapped_text_size_);
9815 OS::Print("VMIsolate(HeapSize): %" Pd "\n", heap_vm_size_);
9816 OS::Print("Isolate(HeapSize): %" Pd "\n", heap_isolate_size_);
9817 OS::Print("Total(HeapSize): %" Pd "\n", heap_vm_size_ + heap_isolate_size_);
9818 }
9819
9820#if defined(DART_PRECOMPILER)
9821 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9822 profile_writer_->Write(FLAG_write_v8_snapshot_profile_to);
9823 }
9824#endif
9825}
9826#endif // defined(DART_PRECOMPILED_RUNTIME)
9827
9829 const uint8_t* instructions_buffer,
9830 Thread* thread)
9831 : kind_(snapshot->kind()),
9832 thread_(thread),
9833 buffer_(snapshot->Addr()),
9834 size_(snapshot->length()),
9835 data_image_(snapshot->DataImage()),
9836 instructions_image_(instructions_buffer) {}
9837
9839 const Snapshot* snapshot) {
9840 SnapshotHeaderReader header_reader(snapshot);
9841
9842 char* error = header_reader.VerifyVersion();
9843 if (error != nullptr) {
9844 return error;
9845 }
9846
9847 const char* features = nullptr;
9848 intptr_t features_length = 0;
9849 error = header_reader.ReadFeatures(&features, &features_length);
9850 if (error != nullptr) {
9851 return error;
9852 }
9853
9854 ASSERT(features[features_length] == '\0');
9855 const char* cursor = features;
9856 while (*cursor != '\0') {
9857 while (*cursor == ' ') {
9858 cursor++;
9859 }
9860
9861 const char* end = strstr(cursor, " ");
9862 if (end == nullptr) {
9863 end = features + features_length;
9864 }
9865
9866#define SET_FLAG(name) \
9867 if (strncmp(cursor, #name, end - cursor) == 0) { \
9868 FLAG_##name = true; \
9869 cursor = end; \
9870 continue; \
9871 } \
9872 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9873 FLAG_##name = false; \
9874 cursor = end; \
9875 continue; \
9876 }
9877
9878#define CHECK_FLAG(name, mode) \
9879 if (strncmp(cursor, #name, end - cursor) == 0) { \
9880 if (!FLAG_##name) { \
9881 return header_reader.BuildError("Flag " #name \
9882 " is true in snapshot, " \
9883 "but " #name \
9884 " is always false in " mode); \
9885 } \
9886 cursor = end; \
9887 continue; \
9888 } \
9889 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9890 if (FLAG_##name) { \
9891 return header_reader.BuildError("Flag " #name \
9892 " is false in snapshot, " \
9893 "but " #name \
9894 " is always true in " mode); \
9895 } \
9896 cursor = end; \
9897 continue; \
9898 }
9899
9900#define SET_P(name, T, DV, C) SET_FLAG(name)
9901
9902#if defined(PRODUCT)
9903#define SET_OR_CHECK_R(name, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9904#else
9905#define SET_OR_CHECK_R(name, PV, T, DV, C) SET_FLAG(name)
9906#endif
9907
9908#if defined(PRODUCT)
9909#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9910#elif defined(DART_PRECOMPILED_RUNTIME)
9911#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) \
9912 CHECK_FLAG(name, "the precompiled runtime")
9913#else
9914#define SET_OR_CHECK_C(name, PV, T, DV, C) SET_FLAG(name)
9915#endif
9916
9917#if !defined(DEBUG)
9918#define SET_OR_CHECK_D(name, T, DV, C) CHECK_FLAG(name, "non-debug mode")
9919#else
9920#define SET_OR_CHECK_D(name, T, DV, C) SET_FLAG(name)
9921#endif
9922
9924
9925#undef SET_OR_CHECK_D
9926#undef SET_OR_CHECK_C
9927#undef SET_OR_CHECK_R
9928#undef SET_P
9929#undef CHECK_FLAG
9930#undef SET_FLAG
9931
9932 cursor = end;
9933 }
9934
9935 return nullptr;
9936}
9937
9939 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9940
9941 intptr_t offset = 0;
9942 char* error = header_reader.VerifyVersionAndFeatures(
9943 /*isolate_group=*/nullptr, &offset);
9944 if (error != nullptr) {
9945 return ConvertToApiError(error);
9946 }
9947
9948 // Even though there's no concurrent threads we have to guard agains, some
9949 // logic we do in deserialization triggers common code that asserts the
9950 // program lock is held.
9951 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9952
9953 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
9954 instructions_image_, /*is_non_root_unit=*/false,
9955 offset);
9956 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9957 if (api_error != ApiError::null()) {
9958 return api_error;
9959 }
9960
9961 if (Snapshot::IncludesCode(kind_)) {
9962 ASSERT(data_image_ != nullptr);
9963 thread_->isolate_group()->SetupImagePage(data_image_,
9964 /* is_executable */ false);
9965 ASSERT(instructions_image_ != nullptr);
9966 thread_->isolate_group()->SetupImagePage(instructions_image_,
9967 /* is_executable */ true);
9968 }
9969
9971 deserializer.Deserialize(&roots);
9972
9973#if defined(DART_PRECOMPILED_RUNTIME)
9974 // Initialize entries in the VM portion of the BSS segment.
9976 Image image(instructions_image_);
9977 if (auto const bss = image.bss()) {
9978 BSS::Initialize(thread_, bss, /*vm=*/true);
9979 }
9980#endif // defined(DART_PRECOMPILED_RUNTIME)
9981
9982 return ApiError::null();
9983}
9984
9986 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9987 header_reader.SetCoverageFromSnapshotFeatures(thread_->isolate_group());
9988 intptr_t offset = 0;
9989 char* error =
9990 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
9991 if (error != nullptr) {
9992 return ConvertToApiError(error);
9993 }
9994
9995 // Even though there's no concurrent threads we have to guard agains, some
9996 // logic we do in deserialization triggers common code that asserts the
9997 // program lock is held.
9998 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9999
10000 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
10001 instructions_image_, /*is_non_root_unit=*/false,
10002 offset);
10003 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
10004 if (api_error != ApiError::null()) {
10005 return api_error;
10006 }
10007
10008 if (Snapshot::IncludesCode(kind_)) {
10009 ASSERT(data_image_ != nullptr);
10010 thread_->isolate_group()->SetupImagePage(data_image_,
10011 /* is_executable */ false);
10012 ASSERT(instructions_image_ != nullptr);
10013 thread_->isolate_group()->SetupImagePage(instructions_image_,
10014 /* is_executable */ true);
10015 }
10016
10018 deserializer.Deserialize(&roots);
10019
10020 if (Snapshot::IncludesCode(kind_)) {
10021 const auto& units = Array::Handle(
10022 thread_->isolate_group()->object_store()->loading_units());
10023 if (!units.IsNull()) {
10024 const auto& unit = LoadingUnit::Handle(
10026 // Unlike other units, we don't explicitly load the root loading unit,
10027 // so we mark it as loaded here, setting the instructions image as well.
10028 unit.set_load_outstanding();
10029 unit.set_instructions_image(instructions_image_);
10030 unit.set_loaded(true);
10031 }
10032 }
10033
10034 InitializeBSS();
10035
10036 return ApiError::null();
10037}
10038
10040 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
10041 intptr_t offset = 0;
10042 char* error =
10043 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
10044 if (error != nullptr) {
10045 return ConvertToApiError(error);
10046 }
10047
10048 Deserializer deserializer(
10049 thread_, kind_, buffer_, size_, data_image_, instructions_image_,
10050 /*is_non_root_unit=*/unit.id() != LoadingUnit::kRootId, offset);
10051 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
10052 if (api_error != ApiError::null()) {
10053 return api_error;
10054 }
10055 {
10056 Array& units =
10057 Array::Handle(isolate_group()->object_store()->loading_units());
10058 uint32_t main_program_hash = Smi::Value(Smi::RawCast(units.At(0)));
10059 uint32_t unit_program_hash = deserializer.Read<uint32_t>();
10060 if (main_program_hash != unit_program_hash) {
10061 return ApiError::New(String::Handle(
10062 String::New("Deferred loading unit is from a different "
10063 "program than the main loading unit")));
10064 }
10065 }
10066
10067 if (Snapshot::IncludesCode(kind_)) {
10068 ASSERT(data_image_ != nullptr);
10069 thread_->isolate_group()->SetupImagePage(data_image_,
10070 /* is_executable */ false);
10071 ASSERT(instructions_image_ != nullptr);
10072 thread_->isolate_group()->SetupImagePage(instructions_image_,
10073 /* is_executable */ true);
10074 unit.set_instructions_image(instructions_image_);
10075 }
10076
10078 deserializer.Deserialize(&roots);
10079
10080 InitializeBSS();
10081
10082 return ApiError::null();
10083}
10084
10085void FullSnapshotReader::InitializeBSS() {
10086#if defined(DART_PRECOMPILED_RUNTIME)
10087 // Initialize entries in the isolate portion of the BSS segment.
10089 Image image(instructions_image_);
10090 if (auto const bss = image.bss()) {
10091 BSS::Initialize(thread_, bss, /*vm=*/false);
10092 }
10093#endif // defined(DART_PRECOMPILED_RUNTIME)
10094}
10095
10096} // namespace dart
AutoreleasePool pool
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
int count
Definition: FontMgrTest.cpp:50
SkPoint pos
static float prev(float f)
static size_t total_size(SkSBlockAllocator< N > &pool)
static bool skip(SkStream *stream, size_t amount)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
#define IG
#define SET_OR_CHECK_R(name, PV, T, DV, C)
#define SAVE_AND_RESET_ROOT(name, Type, init)
#define AutoTraceObject(obj)
#define PushFromTo(obj,...)
#define RESET_ROOT_LIST(V)
#define DECLARE_OBJECT_STORE_FIELD(Type, Name)
#define CID_CLUSTER(Type)
#define SET_P(name, T, DV, C)
#define ADD_CANONICAL_NEXT(cid)
#define CASE_FFI_CID(name)
#define WriteFromTo(obj,...)
#define SET_OR_CHECK_C(name, PV, T, DV, C)
#define AutoTraceObjectName(obj, str)
#define RESTORE_ROOT(name, Type, init)
#define SET_OR_CHECK_D(name, T, DV, C)
#define WriteCompressedField(obj, name)
#define ADD_NON_CANONICAL_NEXT(cid)
#define DECLARE_FIELD(name, Type, init)
#define WriteFieldValue(field, value)
#define WriteField(obj, field)
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
GLenum type
#define CLASS_LIST_FFI_TYPE_MARKER(V)
Definition: class_id.h:165
AbstractInstanceDeserializationCluster(const char *name, bool is_canonical, bool is_root_unit)
void UpdateTypeTestingStubEntryPoint() const
Definition: object.h:9322
void InitializeTypeTestingStubNonAtomic(const Code &stub) const
Definition: object.cc:21787
void ReadFill(Deserializer *d_) override
ArrayDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
void WriteAlloc(Serializer *s)
void Trace(Serializer *s, ObjectPtr object)
ArraySerializationCluster(bool is_canonical, intptr_t cid)
static intptr_t InstanceSize()
Definition: object.h:10936
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
Definition: object.h:10959
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
Definition: object.h:10818
ObjectPtr At(intptr_t index) const
Definition: object.h:10875
intptr_t Length() const
Definition: object.h:10829
void SetAt(intptr_t index, const Object &value) const
Definition: object.h:10880
static void Initialize(Thread *current, uword *bss, bool vm)
Definition: bss_relocs.cc:30
intptr_t Length() const
Definition: hash_map.h:27
bool HasKey(typename KeyValueTrait::Key key) const
Definition: hash_map.h:52
void Add(const T &value)
const T & At(intptr_t index) const
void Sort(int compare(const T *, const T *))
intptr_t length() const
char * buffer() const
Definition: text_buffer.h:35
void WriteBytes(const void *addr, intptr_t len)
Definition: datastream.h:424
void WriteFixed(T value)
Definition: datastream.h:473
void WriteWordWith32BitWrites(uword value)
Definition: datastream.h:389
intptr_t Align(intptr_t alignment, intptr_t offset=0)
Definition: datastream.h:341
void WriteUnsigned(T value)
Definition: datastream.h:400
DART_FORCE_INLINE intptr_t bytes_written() const
Definition: datastream.h:338
virtual intptr_t Position() const
Definition: datastream.h:339
void WriteRefId(intptr_t value)
Definition: datastream.h:409
static constexpr CallKind decode(intptr_t value)
Definition: bitfield.h:171
static constexpr uword update(ClassIdTagType value, uword original)
Definition: bitfield.h:188
static constexpr uword encode(ClassIdTagType value)
Definition: bitfield.h:165
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static void SetupNativeResolver()
void BuildCanonicalSetFromLayout(Deserializer *d)
CanonicalSetDeserializationCluster(bool is_canonical, bool is_root_unit, const char *name)
void VerifyCanonicalSet(Deserializer *d, const Array &refs, const typename SetType::ArrayHandle &current_table)
CanonicalSetSerializationCluster(intptr_t cid, bool is_canonical, bool represents_canonical_set, const char *name, intptr_t target_instance_size=0)
GrowableArray< PointerType > objects_
virtual bool IsInCanonicalSet(Serializer *s, PointerType ptr)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
void WriteAlloc(Serializer *s)
ClassSerializationCluster(intptr_t num_cids)
ClassPtr At(intptr_t cid) const
Definition: class_table.h:362
intptr_t NumTopLevelCids() const
Definition: class_table.h:450
intptr_t NumCids() const
Definition: class_table.h:447
static bool IsTopLevelCid(intptr_t cid)
Definition: class_table.h:496
static int32_t target_next_field_offset_in_words(const ClassPtr cls)
Definition: object.h:1959
static intptr_t InstanceSize()
Definition: object.h:1685
static int32_t target_type_arguments_field_offset_in_words(const ClassPtr cls)
Definition: object.h:1971
static int32_t target_instance_size_in_words(const ClassPtr cls)
Definition: object.h:1947
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:4316
void ReadFill(Deserializer *d_) override
ClosureDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
ClosureSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:12383
void ReadFill(Deserializer *d, intptr_t start_index, intptr_t stop_index, bool deferred)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAllocOneCode(Deserializer *d)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d) override
static bool AreActive()
static void Sort(Serializer *s, GrowableArray< CodePtr > *codes)
static void Sort(Serializer *s, GrowableArray< Code * > *codes)
void WriteFill(Serializer *s, Snapshot::Kind kind, CodePtr code, bool deferred)
void WriteAlloc(Serializer *s, CodePtr code)
static const char * MakeDisambiguatedCodeName(Serializer *s, CodePtr c)
static void Insert(Serializer *s, GrowableArray< CodeOrderInfo > *order_list, IntMap< intptr_t > *order_map, CodePtr code)
void Trace(Serializer *s, ObjectPtr object)
GrowableArray< CodePtr > * objects()
void TracePool(Serializer *s, ObjectPoolPtr pool, bool only_call_targets)
void WriteAlloc(Serializer *s)
static int CompareCodeOrderInfo(CodeOrderInfo const *a, CodeOrderInfo const *b)
void WriteFill(Serializer *s)
GrowableArray< CodePtr > * deferred_objects()
intptr_t first_deferred_ref() const
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:6229
static uword EntryPointOf(const CodePtr code)
Definition: object.h:6865
static intptr_t InstanceSize()
Definition: object.h:7163
@ kPcRelativeCall
Definition: object.h:6969
@ kPcRelativeTTSCall
Definition: object.h:6970
@ kCallViaCode
Definition: object.h:6972
@ kPcRelativeTailCall
Definition: object.h:6971
static InstructionsPtr InstructionsOf(const CodePtr code)
Definition: object.h:6775
bool IsDisabled() const
Definition: object.h:7257
static uword PayloadStartOf(const CodePtr code)
Definition: object.h:6851
bool HasMonomorphicEntry() const
Definition: object.h:6839
static bool IsDiscarded(const CodePtr code)
Definition: object.h:6834
static void NotifyCodeObservers(const Code &code, bool optimized)
Definition: object.cc:18141
@ kSCallTableCodeOrTypeTarget
Definition: object.h:6982
@ kSCallTableKindAndOffset
Definition: object.h:6981
bool IsUnknownDartCode() const
Definition: object.h:7245
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:6299
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:7535
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:7448
static IsolateGroup * vm_isolate_group()
Definition: dart.h:69
static Isolate * vm_isolate()
Definition: dart.h:68
static char * FeaturesString(IsolateGroup *isolate_group, bool is_vm_snapshot, Snapshot::Kind kind)
Definition: dart.cc:1004
void Trace(Serializer *s, ObjectPtr object)
virtual void ReadFill(Deserializer *deserializer)=0
DeserializationCluster(const char *name, bool is_canonical=false, bool is_immutable=false)
virtual void PostLoad(Deserializer *deserializer, const Array &refs)
void ReadAllocFixedSize(Deserializer *deserializer, intptr_t instance_size)
virtual void ReadAlloc(Deserializer *deserializer)=0
const char * name() const
virtual void ReadRoots(Deserializer *deserializer)=0
virtual void AddBaseObjects(Deserializer *deserializer)=0
virtual void PostLoad(Deserializer *deserializer, const Array &refs)=0
void ReadFromTo(T obj, P &&... params)
ObjectPtr Ref(intptr_t index) const
Local(Deserializer *d)
TokenPosition ReadTokenPosition()
uint64_t ReadUnsigned64()
ObjectPtr Allocate(intptr_t size)
void ReadInstructions(CodePtr code, bool deferred)
intptr_t ReadUnsigned()
void set_code_start_index(intptr_t value)
void AssignRef(ObjectPtr object)
ApiErrorPtr VerifyImageAlignment()
bool is_non_root_unit() const
const InstructionsTable & instructions_table() const
intptr_t next_index() const
const uint8_t * AddressOfCurrentPosition() const
Zone * zone() const
static void InitializeHeader(ObjectPtr raw, intptr_t cid, intptr_t size, bool is_canonical=false)
ObjectPtr ReadRef()
void AddBaseObject(ObjectPtr base_object)
intptr_t num_base_objects() const
ObjectPtr Ref(intptr_t index) const
void Advance(intptr_t value)
CodePtr GetCodeByIndex(intptr_t code_index, uword *entry_point) const
uword GetEntryPointByCodeIndex(intptr_t code_index) const
intptr_t position() const
TokenPosition ReadTokenPosition()
ObjectPtr GetObjectAt(uint32_t offset) const
intptr_t code_start_index() const
Heap * heap() const
static intptr_t CodeIndexToClusterIndex(const InstructionsTable &table, intptr_t code_index)
Snapshot::Kind kind() const
DeserializationCluster * ReadCluster()
uword ReadWordWith32BitReads()
void Align(intptr_t alignment, intptr_t offset=0)
void ReadBytes(uint8_t *addr, intptr_t len)
intptr_t ReadRefId()
Deserializer(Thread *thread, Snapshot::Kind kind, const uint8_t *buffer, intptr_t size, const uint8_t *data_buffer, const uint8_t *instructions_buffer, bool is_non_root_unit, intptr_t offset=0)
intptr_t code_stop_index() const
void set_position(intptr_t p)
void Deserialize(DeserializationRoots *roots)
void set_code_stop_index(intptr_t value)
static void DisassembleStub(const char *name, const Code &code)
static void DisassembleCode(const Function &function, const Code &code, bool optimized)
void ReadFill(Deserializer *d_) override
DoubleDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
DoubleSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:10135
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:6606
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:11740
static constexpr int kDataSerializationAlignment
Definition: object.h:11734
void WriteFill(Serializer *s)
void WriteAlloc(Serializer *s)
FakeSerializationCluster(const char *name, intptr_t num_objects, intptr_t size, intptr_t target_memory_size=0)
void Trace(Serializer *s, ObjectPtr object)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:4376
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
void WriteAlloc(Serializer *s)
void Trace(Serializer *s, ObjectPtr object)
void SetAt(intptr_t index, ObjectPtr raw_instance, bool concurrent_use=false)
Definition: field_table.h:76
ObjectPtr At(intptr_t index, bool concurrent_use=false) const
Definition: field_table.h:62
void AllocateIndex(intptr_t index)
Definition: field_table.cc:91
intptr_t NumFieldIds() const
Definition: field_table.h:40
void set_is_nullable_unsafe(bool val) const
Definition: object.h:4762
@ kUnknownLengthOffset
Definition: object.h:4727
@ kNoFixedLength
Definition: object.h:4729
void InitializeGuardedListLengthInObjectOffset(bool unsafe=false) const
Definition: object.cc:12540
static intptr_t InstanceSize()
Definition: object.h:4558
void set_guarded_list_length_in_object_offset_unsafe(intptr_t offset) const
Definition: object.cc:12114
void set_guarded_cid_unsafe(intptr_t cid) const
Definition: object.h:4665
static intptr_t TargetOffsetOf(FieldPtr field)
Definition: object.h:13255
void set_guarded_list_length_unsafe(intptr_t list_length) const
Definition: object.cc:12105
void set_static_type_exactness_state_unsafe(StaticTypeExactnessState state) const
Definition: object.h:4645
static intptr_t value_offset()
Definition: object.h:11197
static intptr_t InstanceSize()
Definition: object.h:11193
static intptr_t InstanceSize()
Definition: object.h:11262
static intptr_t value_offset()
Definition: object.h:11266
ApiErrorPtr ReadUnitSnapshot(const LoadingUnit &unit)
ApiErrorPtr ReadProgramSnapshot()
FullSnapshotReader(const Snapshot *snapshot, const uint8_t *instructions_buffer, Thread *thread)
ApiErrorPtr ReadVMSnapshot()
FullSnapshotWriter(Snapshot::Kind kind, NonStreamingWriteStream *vm_snapshot_data, NonStreamingWriteStream *isolate_snapshot_data, ImageWriter *vm_image_writer, ImageWriter *iso_image_writer)
void WriteFullSnapshot(GrowableArray< LoadingUnitSerializationData * > *data=nullptr)
void WriteUnitSnapshot(GrowableArray< LoadingUnitSerializationData * > *units, LoadingUnitSerializationData *unit, uint32_t program_hash)
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static const char * MakeDisambiguatedFunctionName(Serializer *s, FunctionPtr f)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
FunctionTypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void PostLoad(Deserializer *d, const Array &refs) override
void Trace(Serializer *s, ObjectPtr object)
FunctionTypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
static intptr_t InstanceSize()
Definition: object.h:9776
CodePtr CurrentCode() const
Definition: object.h:3177
static intptr_t InstanceSize()
Definition: object.h:3985
bool HasCode() const
Definition: object.cc:7936
void ClearCodeSafe() const
Definition: object.cc:7958
void PrintName(const NameFormattingParams &params, BaseTextBuffer *printer) const
Definition: object.cc:11109
void SetInstructionsSafe(const Code &value) const
Definition: object.cc:7920
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
void Add(const Object &value, Heap::Space space=Heap::kNew) const
Definition: object.cc:24991
static GrowableObjectArrayPtr New(Heap::Space space=Heap::kNew)
Definition: object.h:11144
static intptr_t InstanceSize()
Definition: object.h:11140
intptr_t Length() const
Definition: object.h:11072
ObjectPtr At(intptr_t index) const
Definition: object.h:11085
static constexpr double kMaxLoadFactor
Definition: hash_table.h:617
HeapLocker(Thread *thread, PageSpace *page_space)
@ kOld
Definition: heap.h:39
intptr_t GetLoadingUnit(ObjectPtr raw_obj) const
Definition: heap.h:207
PageSpace * old_space()
Definition: heap.h:63
void ResetObjectIdTable()
Definition: heap.cc:899
bool Verify(const char *msg, MarkExpectation mark_expectation=kForbidMarked)
Definition: heap.cc:771
intptr_t GetObjectId(ObjectPtr raw_obj) const
Definition: heap.h:197
void SetObjectId(ObjectPtr raw_obj, intptr_t object_id)
Definition: heap.h:193
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
@ kCachedICDataArrayCount
Definition: object.h:2776
static intptr_t InstanceSize()
Definition: object.h:2576
ObjectPtr GetObjectAt(uint32_t offset) const
InstructionsPtr GetInstructionsAt(uint32_t offset) const
ApiErrorPtr VerifyAlignment() const
intptr_t GetTextObjectCount() const
void Write(NonStreamingWriteStream *clustered_stream, bool vm)
void SetProfileWriter(V8SnapshotProfileWriter *profile_writer)
static const char * TagObjectTypeAsReadOnly(Zone *zone, const char *type)
intptr_t text_size() const
void GetTrampolineInfo(intptr_t *count, intptr_t *size) const
int32_t GetTextOffsetFor(InstructionsPtr instructions, CodePtr code)
uint32_t GetDataOffsetFor(ObjectPtr raw_object)
intptr_t data_size() const
void PrepareForSerialization(GrowableArray< ImageWriterCommand > *commands)
uint32_t AddBytesToData(uint8_t *bytes, intptr_t length)
void ReadAlloc(Deserializer *d) override
InstanceDeserializationCluster(intptr_t cid, bool is_canonical, bool is_immutable, bool is_root_unit)
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
InstanceSerializationCluster(bool is_canonical, intptr_t cid)
static intptr_t NextFieldOffset()
Definition: object.h:8355
static InstructionsTablePtr New(intptr_t length, uword start_pc, uword end_pc, uword rodata)
Definition: object.cc:15518
void SetCodeAt(intptr_t index, CodePtr code) const
Definition: object.cc:15539
const UntaggedInstructionsTable::Data * rodata() const
Definition: object.h:5991
uword EntryPointAt(intptr_t index) const
Definition: object.cc:15641
static intptr_t InstanceSize()
Definition: object.h:11231
static intptr_t value_offset()
Definition: object.h:11235
V Lookup(const Key &key) const
Definition: hash_map.h:548
void Insert(const Key &key, const Value &value)
Definition: hash_map.h:543
Heap * heap() const
Definition: isolate.h:296
ObjectStore * object_store() const
Definition: isolate.h:510
static IsolateGroup * Current()
Definition: isolate.h:539
ClassTable * class_table() const
Definition: isolate.h:496
void SetupImagePage(const uint8_t *snapshot_buffer, bool is_executable)
Definition: isolate.cc:1953
void set_coverage(bool value)
Definition: isolate.h:463
IsolateGroup * group() const
Definition: isolate.h:1037
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:5487
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:8087
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:8463
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:5119
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
void set_objects(ZoneGrowableArray< Object * > *objects)
Definition: app_snapshot.h:58
ZoneGrowableArray< Object * > * objects()
Definition: app_snapshot.h:54
LoadingUnitSerializationData * parent() const
Definition: app_snapshot.h:49
GrowableArray< Code * > * deferred_objects()
Definition: app_snapshot.h:53
intptr_t id() const
Definition: object.h:7985
LoadingUnitPtr parent() const
Definition: object.h:7980
static intptr_t InstanceSize()
Definition: object.h:7973
static constexpr intptr_t kRootId
Definition: object.h:7969
void set_base_objects(const Array &value) const
Definition: object.cc:19700
void set_instructions_image(const uint8_t *value) const
Definition: object.h:8032
uint8_t * Steal(intptr_t *length)
Definition: datastream.h:633
void ReadAlloc(Deserializer *d) override
MapDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
MapSerializationCluster(bool is_canonical, intptr_t cid)
void Trace(Serializer *s, ObjectPtr object)
void WriteAlloc(Serializer *s)
void WriteFill(Serializer *s)
static intptr_t InstanceSize()
Definition: object.h:12111
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:7634
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
MintDeserializationCluster(bool is_canonical, bool is_root_unit)
void WriteFill(Serializer *s)
MintSerializationCluster(bool is_canonical)
void WriteAlloc(Serializer *s)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:10090
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:5453
static uword LinkNativeCallEntry()
DART_FORCE_INLINE void SetPosition(intptr_t value)
Definition: datastream.h:618
uint8_t * buffer() const
Definition: datastream.h:615
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static void Print(const char *format,...) PRINTF_ATTRIBUTE(1
static DART_NORETURN void Abort()
static char * SCreate(Zone *zone, const char *format,...) PRINTF_ATTRIBUTE(2
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static uint8_t EncodeBits(EntryType type, Patchability patchable, SnapshotBehavior snapshot_behavior)
Definition: object.h:5611
static intptr_t InstanceSize()
Definition: object.h:5649
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
intptr_t GetClassIdMayBeSmi() const
static ObjectPtr null()
Definition: object.h:433
ObjectPtr ptr() const
Definition: object.h:332
static Object * ReadOnlyHandle()
Definition: object.h:431
static void set_vm_isolate_snapshot_object_table(const Array &table)
Definition: object.cc:1601
static void FinalizeReadOnlyObject(ObjectPtr object)
Definition: object.cc:1556
virtual const char * ToCString() const
Definition: object.h:366
static constexpr intptr_t RoundedAllocationSize(intptr_t size)
Definition: object.h:758
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static ObjectPtr RawCast(ObjectPtr obj)
Definition: object.h:325
static Object & ZoneHandle()
Definition: object.h:419
const char * FieldNameForOffset(intptr_t cid, intptr_t offset)
static intptr_t InstanceSize()
Definition: object.h:10564
void AcquireLock(FreeList *freelist)
Definition: pages.cc:432
void ReleaseLock(FreeList *freelist)
Definition: pages.cc:436
DART_FORCE_INLINE uword AllocateSnapshotLocked(FreeList *freelist, intptr_t size)
Definition: pages.h:161
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:2291
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:6089
ProgramDeserializationRoots(ObjectStore *object_store)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadRoots(Deserializer *d) override
void AddBaseObjects(Deserializer *d) override
void AddBaseObjects(Serializer *s)
ProgramSerializationRoots(ZoneGrowableArray< Object * > *base_objects, ObjectStore *object_store, Snapshot::Kind snapshot_kind)
virtual const CompressedStackMaps & canonicalized_stack_map_entries() const
void WriteRoots(Serializer *s)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
RODataDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void PostLoad(Deserializer *d, const Array &refs) override
RODataSerializationCluster(Zone *zone, const char *type, intptr_t cid, bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
uword ReadWordWith32BitReads()
Definition: datastream.h:157
void Align(intptr_t alignment, intptr_t offset=0)
Definition: datastream.h:133
intptr_t ReadRefId()
Definition: datastream.h:103
intptr_t Position() const
Definition: datastream.h:127
intptr_t PendingBytes() const
Definition: datastream.h:147
const uint8_t * AddressOfCurrentPosition() const
Definition: datastream.h:140
void Advance(intptr_t value)
Definition: datastream.h:142
void SetPosition(intptr_t value)
Definition: datastream.h:128
void ReadBytes(void *addr, intptr_t len)
Definition: datastream.h:90
void ReadAlloc(Deserializer *d) override
RecordDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
RecordSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
intptr_t AsInt() const
Definition: object.h:11322
intptr_t num_fields() const
Definition: object.h:11314
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
RecordTypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void Trace(Serializer *s, ObjectPtr object)
RecordTypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
static intptr_t InstanceSize()
Definition: object.h:11402
static intptr_t InstanceSize()
Definition: object.h:11460
static intptr_t NumFields(RecordPtr ptr)
Definition: object.h:11426
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:12901
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t line_starts_offset()
Definition: object.h:4964
static intptr_t InstanceSize()
Definition: object.h:5003
const char * name() const
void WriteAndMeasureAlloc(Serializer *serializer)
void WriteAndMeasureFill(Serializer *serializer)
static constexpr intptr_t kSizeVaries
intptr_t target_memory_size() const
const intptr_t target_instance_size_
intptr_t num_objects() const
const char *const name_
virtual void Trace(Serializer *serializer, ObjectPtr object)=0
SerializationCluster(const char *name, intptr_t cid, intptr_t target_instance_size=kSizeVaries, bool is_canonical=false)
virtual void WriteAlloc(Serializer *serializer)=0
virtual void WriteFill(Serializer *serializer)=0
virtual void AddBaseObjects(Serializer *serializer)=0
virtual const CompressedStackMaps & canonicalized_stack_map_entries() const
virtual void WriteRoots(Serializer *serializer)=0
virtual void PushRoots(Serializer *serializer)=0
WritingObjectScope(Serializer *serializer, const char *type, ObjectPtr object, const char *name)
WritingObjectScope(Serializer *serializer, const char *type, ObjectPtr object, StringPtr name)
WritingObjectScope(Serializer *serializer, ObjectPtr object)
intptr_t current_loading_unit_id() const
void WriteCid(intptr_t cid)
void WritePropertyRef(ObjectPtr object, const char *property)
void WriteWordWith32BitWrites(uword value)
NonStreamingWriteStream * stream()
void DumpCombinedCodeStatistics()
DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to)
void TraceDataOffset(uint32_t offset)
void AddBaseObject(ObjectPtr base_object, const char *type=nullptr, const char *name=nullptr)
void WriteVersionAndFeatures(bool is_vm_snapshot)
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj)
Zone * zone() const
void Write(T value)
void WriteRootRef(ObjectPtr object, const char *name=nullptr)
Serializer(Thread *thread, Snapshot::Kind kind, NonStreamingWriteStream *stream, ImageWriter *image_writer_, bool vm_, V8SnapshotProfileWriter *profile_writer=nullptr)
GrowableArray< LoadingUnitSerializationData * > * loading_units() const
bool HasArtificialRef(ObjectPtr object) const
void set_loading_units(GrowableArray< LoadingUnitSerializationData * > *units)
void set_current_loading_unit_id(intptr_t id)
bool HasProfileNode(ObjectPtr object) const
Heap * heap() const
void WriteFromTo(T obj, P &&... args)
void WriteElementRef(ObjectPtr object, intptr_t index)
void FillHeader(Snapshot::Kind kind)
uint32_t GetDataOffset(ObjectPtr object) const
void AttributeReference(ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
bool HasRef(ObjectPtr object) const
bool IsWritten(ObjectPtr object) const
intptr_t AssignArtificialRef(ObjectPtr object=nullptr)
DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to)
void PushWeak(ObjectPtr object)
Snapshot::Kind kind() const
intptr_t RefId(ObjectPtr object) const
intptr_t GetCodeIndex(CodePtr code)
void PushFromTo(T obj, P &&... args)
SerializationCluster * NewClusterForClass(intptr_t cid, bool is_canonical)
bool IsReachable(ObjectPtr object) const
void Trace(ObjectPtr object, intptr_t cid_override)
ZoneGrowableArray< Object * > * Serialize(SerializationRoots *roots)
intptr_t bytes_written()
void RecordDeferredCode(CodePtr ptr)
void WriteBytes(const void *addr, intptr_t len)
void WriteOffsetRef(ObjectPtr object, intptr_t offset)
intptr_t UnsafeRefId(ObjectPtr object) const
void AttributePropertyRef(ObjectPtr object, const char *property)
void WriteUnsigned(intptr_t value)
intptr_t AssignRef(ObjectPtr object)
void WriteTokenPosition(TokenPosition pos)
void Align(intptr_t alignment, intptr_t offset=0)
void PrepareInstructions(const CompressedStackMaps &canonical_smap)
V8SnapshotProfileWriter::ObjectId GetProfileId(ObjectPtr object) const
void WriteDispatchTable(const Array &entries)
V8SnapshotProfileWriter * profile_writer() const
intptr_t GetDataSize() const
void UnexpectedObject(ObjectPtr object, const char *message)
bool CreateArtificialNodeIfNeeded(ObjectPtr obj)
intptr_t bytes_heap_allocated()
void Push(ObjectPtr object, intptr_t cid_override=kIllegalCid)
void WriteRefId(intptr_t value)
void WriteInstructions(InstructionsPtr instr, uint32_t unchecked_offset, CodePtr code, bool deferred)
intptr_t next_ref_index() const
void AttributeElementRef(ObjectPtr object, intptr_t index)
void WriteUnsigned64(uint64_t value)
void ReadAlloc(Deserializer *d) override
SetDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
SetSerializationCluster(bool is_canonical, intptr_t cid)
void Trace(Serializer *s, ObjectPtr object)
void WriteAlloc(Serializer *s)
void WriteFill(Serializer *s)
static intptr_t InstanceSize()
Definition: object.h:12214
void ReadFill(Deserializer *d_) override
Simd128DeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
Simd128SerializationCluster(intptr_t cid, bool is_canonical)
static SmiPtr New(intptr_t value)
Definition: object.h:10006
intptr_t Value() const
Definition: object.h:9990
static bool IsValid(int64_t value)
Definition: object.h:10026
void SetCoverageFromSnapshotFeatures(IsolateGroup *isolate_group)
char * VerifyVersionAndFeatures(IsolateGroup *isolate_group, intptr_t *offset)
static char * InitializeGlobalVMFlagsFromSnapshot(const Snapshot *snapshot)
static bool IsFull(Kind kind)
Definition: snapshot.h:63
static const char * KindToCString(Kind kind)
Definition: snapshot.cc:12
static bool IncludesStringsInROData(Kind kind)
Definition: snapshot.h:71
static bool IncludesCode(Kind kind)
Definition: snapshot.h:67
static constexpr intptr_t kHeaderSize
Definition: snapshot.h:43
ThreadState * thread() const
Definition: allocation.h:33
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:12591
static StaticTypeExactnessState NotTracking()
static intptr_t DecodeLengthAndCid(intptr_t encoded, intptr_t *out_cid)
StringDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
static intptr_t InstanceSize(intptr_t length, intptr_t cid)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Add(uint16_t code_unit)
Definition: object.h:10501
intptr_t Finalize()
Definition: object.h:10517
StringSerializationCluster(bool is_canonical, bool represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t EncodeLengthAndCid(intptr_t length, intptr_t cid)
static StringPtr New(const char *cstr, Heap::Space space=Heap::kNew)
Definition: object.cc:23698
static uint32_t SetCachedHash(StringPtr obj, uint32_t hash)
Definition: object.h:10454
static const Code & EntryAt(intptr_t index)
Definition: stub_code.h:101
static const char * NameAt(intptr_t index)
Definition: stub_code.h:99
static void InitializationDone()
Definition: stub_code.h:44
static intptr_t NumEntries()
Definition: stub_code.h:107
static void EntryAtPut(intptr_t index, Code *entry)
Definition: stub_code.h:102
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:7807
static void InitFromSnapshot(IsolateGroup *isolate_group)
Definition: symbols.cc:127
static StringPtr New(Thread *thread, const char *cstr)
Definition: symbols.h:723
IsolateGroup * isolate_group() const
void DecrementNoSafepointScopeDepth()
Definition: thread.h:733
static Thread * Current()
Definition: thread.h:362
IsolateGroup * isolate_group() const
Definition: thread.h:541
static TokenPosition Deserialize(int32_t value)
static intptr_t InstanceSize()
Definition: object.h:10704
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
TypeArgumentsDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
TypeArgumentsSerializationCluster(bool is_canonical, bool represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:8988
TypeArgumentsPtr Canonicalize(Thread *thread) const
Definition: object.cc:7703
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
TypeParameterDeserializationCluster(bool is_canonical, bool is_root_unit)
TypeParameterSerializationCluster(bool is_canonical, bool cluster_represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:9877
virtual AbstractTypePtr Canonicalize(Thread *thread) const
Definition: object.cc:22859
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:8540
virtual bool IsInCanonicalSet(Serializer *s, TypePtr type)
TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
void WriteAlloc(Serializer *s)
void WriteFill(Serializer *s)
void Trace(Serializer *s, ObjectPtr object)
static CodePtr DefaultCodeForType(const AbstractType &type, bool lazy_specialize=true)
bool IsDeclarationTypeOf(const Class &cls) const
static intptr_t InstanceSize()
Definition: object.h:9422
intptr_t ElementSizeInBytes() const
Definition: object.h:11531
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:11793
static intptr_t InstanceSize()
Definition: object.h:11673
static DART_FORCE_INLINE constexpr intptr_t Length()
Definition: class_table.h:67
DART_FORCE_INLINE bool Get(intptr_t position) const
Definition: class_table.h:51
DART_FORCE_INLINE void Reset()
Definition: class_table.h:65
DART_FORCE_INLINE void Set(intptr_t position)
Definition: class_table.h:55
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:8156
void ReadRoots(Deserializer *d) override
void AddBaseObjects(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
UnitDeserializationRoots(const LoadingUnit &unit)
void PushRoots(Serializer *s)
UnitSerializationRoots(LoadingUnitSerializationData *unit)
void WriteRoots(Serializer *s)
void AddBaseObjects(Serializer *s)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:2409
static constexpr uword update(intptr_t size, uword tag)
Definition: raw_object.h:212
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
bool InVMIsolateHeap() const
Definition: raw_object.cc:20
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static int SNPrint(char *str, size_t size, const char *format,...) PRINTF_ATTRIBUTE(3
static char * StrDup(const char *s)
static intptr_t StrNLen(const char *s, intptr_t n)
static bool IsUint(intptr_t N, T value)
Definition: utils.h:328
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static char * StrNDup(const char *s, intptr_t n)
static const ObjectId kArtificialRootId
void AttributeReferenceTo(const ObjectId &from_object_id, const Reference &reference, const ObjectId &to_object_id)
void SetObjectTypeAndName(const ObjectId &object_id, const char *type, const char *name)
void AddRoot(const ObjectId &object_id, const char *name=nullptr)
void AttributeBytesTo(const ObjectId &object_id, size_t num_bytes)
bool HasId(const ObjectId &object_id)
void ReadRoots(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void AddBaseObjects(Deserializer *d) override
void WriteRoots(Serializer *s)
void PushRoots(Serializer *s)
void AddBaseObjects(Serializer *s)
VMSerializationRoots(const WeakArray &symbols, bool should_write_symbols)
Definition: il.h:75
static const char * SnapshotString()
static void DontNeed(void *address, intptr_t size)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition: object.h:6742
intptr_t Length() const
Definition: object.h:6697
ObjectPtr At(intptr_t index) const
Definition: object.h:6722
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t key_offset()
Definition: object.h:12922
static intptr_t value_offset()
Definition: object.h:12926
static intptr_t InstanceSize()
Definition: object.h:12932
static constexpr intptr_t kNoValue
Definition: weak_table.h:18
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition: zone.cc:313
#define THR_Print(format,...)
Definition: log.h:20
const EmbeddedViewParams * params
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
VkInstance instance
Definition: main.cc:48
static bool b
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
if(end==-1)
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
GAsyncResult * result
uint32_t * target
#define VM_GLOBAL_FLAG_LIST(P, R, C, D)
Definition: flag_list.h:58
const char * charp
Definition: flags.h:12
Dart_NativeFunction function
Definition: fuchsia.cc:51
size_t length
Win32Message message
sk_sp< const SkImage > image
Definition: SkRecords.h:269
const uint8_t * isolate_snapshot_data
Definition: gen_snapshot.cc:69
const uint8_t * vm_snapshot_data
Definition: main_impl.cc:52
static SnapshotKind snapshot_kind
Definition: gen_snapshot.cc:83
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
constexpr word kSmiMax
Definition: runtime_api.h:305
constexpr word kWordMax
Definition: runtime_api.h:295
intptr_t RoundedAllocationSize(intptr_t size)
Definition: runtime_api.h:333
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
bool IsTypedDataViewClassId(intptr_t index)
Definition: class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
static const char *const kObjectStoreFieldNames[]
static constexpr bool IsReachableReference(intptr_t ref)
static void Finish(Thread *thread)
Definition: bootstrap.cc:44
const char *const name
static constexpr intptr_t kCompressedWordSizeLog2
Definition: globals.h:43
static constexpr intptr_t kUnreachableReference
DART_EXPORT bool IsNull(Dart_Handle object)
int32_t classid_t
Definition: globals.h:524
@ kIllegalCid
Definition: class_id.h:214
@ kNumPredefinedCids
Definition: class_id.h:257
@ kNativePointer
Definition: class_id.h:218
@ kVoidCid
Definition: class_id.h:254
@ kDynamicCid
Definition: class_id.h:253
static constexpr intptr_t kUnallocatedReference
static constexpr bool IsArtificialReference(intptr_t ref)
static constexpr bool IsAllocatedReference(intptr_t ref)
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
intptr_t word
Definition: globals.h:500
uintptr_t compressed_uword
Definition: globals.h:44
static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(Serializer *s, intptr_t class_id)
bool ShouldHaveImmutabilityBitSetCid(intptr_t predefined_cid)
Definition: class_id.h:507
static void USE(T &&)
Definition: globals.h:618
constexpr intptr_t kFirstInternalOnlyCid
Definition: class_id.h:288
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
bool IsInternalVMdefinedClassId(intptr_t index)
Definition: class_id.h:549
const intptr_t cid
static constexpr intptr_t kCompressedWordSize
Definition: globals.h:42
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558
static constexpr intptr_t kFirstReference
static DART_FORCE_INLINE CodePtr GetCodeAndEntryPointByIndex(const Deserializer *d, intptr_t code_index, uword *entry_point)
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kObjectAlignment
ArrayOfTuplesView< Code::SCallTableEntry, std::tuple< Smi, Object, Function > > StaticCallsTable
Definition: object.h:13546
static int CompareClusters(SerializationCluster *const *a, SerializationCluster *const *b)
static int8_t data[kExtLength]
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
constexpr intptr_t kLastInternalOnlyCid
Definition: class_id.h:289
bool IsStringClassId(intptr_t index)
Definition: class_id.h:350
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
struct PathData * Data(SkPath *path)
Definition: path_ops.cc:52
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition: switches.h:76
std::function< void()> closure
Definition: closure.h:14
dst
Definition: cp.py:12
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
#define OBJECT_STORE_FIELD_LIST(R_, RW, ARW_RELAXED, ARW_AR, LAZY_CORE, LAZY_ASYNC, LAZY_ISOLATE, LAZY_INTERNAL, LAZY_FFI)
Definition: object_store.h:41
#define Pp
Definition: globals.h:425
#define Px
Definition: globals.h:410
#define Pd64
Definition: globals.h:416
#define Pd
Definition: globals.h:408
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:581
static DecodeResult decode(std::string path)
Definition: png_codec.cpp:124
#define T
Definition: precompiler.cc:65
#define REUSABLE_FUNCTION_HANDLESCOPE(thread)
#define REUSABLE_OBJECT_HANDLESCOPE(thread)
#define REUSABLE_CODE_HANDLESCOPE(thread)
static const char header[]
Definition: skpbench.cpp:88
SeparatedVector2 offset
static NameFormattingParams DisambiguatedUnqualified(Object::NameVisibility visibility)
Definition: object.h:2975
static NameFormattingParams DisambiguatedWithoutClassName(Object::NameVisibility visibility)
Definition: object.h:2968
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static Reference Element(intptr_t offset)
static Reference Property(const char *name)
const uintptr_t id
#define TIMELINE_DURATION(thread, stream, name)
Definition: timeline.h:39
#define NOT_IN_PRECOMPILED(code)
Definition: globals.h:100