Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
app_snapshot.cc
Go to the documentation of this file.
1// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <memory>
6#include <utility>
7
8#include "vm/app_snapshot.h"
9
10#include "platform/assert.h"
11#include "vm/bootstrap.h"
12#include "vm/bss_relocs.h"
13#include "vm/canonical_tables.h"
14#include "vm/class_id.h"
15#include "vm/code_observers.h"
18#include "vm/dart.h"
19#include "vm/dart_entry.h"
20#include "vm/dispatch_table.h"
21#include "vm/flag_list.h"
22#include "vm/growable_array.h"
23#include "vm/heap/heap.h"
24#include "vm/image_snapshot.h"
25#include "vm/native_entry.h"
26#include "vm/object.h"
27#include "vm/object_store.h"
28#include "vm/program_visitor.h"
30#include "vm/stub_code.h"
31#include "vm/symbols.h"
32#include "vm/timeline.h"
34#include "vm/version.h"
35#include "vm/zone_text_buffer.h"
36
37#if !defined(DART_PRECOMPILED_RUNTIME)
41#endif // !defined(DART_PRECOMPILED_RUNTIME)
42
43namespace dart {
44
45#if !defined(DART_PRECOMPILED_RUNTIME)
47 print_cluster_information,
48 false,
49 "Print information about clusters written to snapshot");
50#endif
51
52#if defined(DART_PRECOMPILER)
54 write_v8_snapshot_profile_to,
55 nullptr,
56 "Write a snapshot profile in V8 format to a file.");
57DEFINE_FLAG(bool,
58 print_array_optimization_candidates,
59 false,
60 "Print information about how many array are candidates for Smi and "
61 "ROData optimizations.");
62#endif // defined(DART_PRECOMPILER)
63
64// Forward declarations.
65class Serializer;
66class Deserializer;
67
68namespace {
69
70// Serialized clusters are identified by their CID. So to insert custom clusters
71// we need to assign them a CID that is otherwise never serialized.
72static constexpr intptr_t kDeltaEncodedTypedDataCid = kNativePointer;
73
74// StorageTrait for HashTable which allows to create hash tables backed by
75// zone memory. Used to compute cluster order for canonical clusters.
76struct GrowableArrayStorageTraits {
77 class Array : public ZoneAllocated {
78 public:
79 explicit Array(Zone* zone, intptr_t length)
80 : length_(length), array_(zone->Alloc<ObjectPtr>(length)) {}
81
82 intptr_t Length() const { return length_; }
83 void SetAt(intptr_t index, const Object& value) const {
84 array_[index] = value.ptr();
85 }
86 ObjectPtr At(intptr_t index) const { return array_[index]; }
87
88 private:
89 intptr_t length_ = 0;
90 ObjectPtr* array_ = nullptr;
92 };
93
94 using ArrayPtr = Array*;
95 class ArrayHandle : public ZoneAllocated {
96 public:
97 explicit ArrayHandle(ArrayPtr ptr) : ptr_(ptr) {}
98 ArrayHandle() {}
99
100 void SetFrom(const ArrayHandle& other) { ptr_ = other.ptr_; }
101 void Clear() { ptr_ = nullptr; }
102 bool IsNull() const { return ptr_ == nullptr; }
103 ArrayPtr ptr() { return ptr_; }
104
105 intptr_t Length() const { return ptr_->Length(); }
106 void SetAt(intptr_t index, const Object& value) const {
107 ptr_->SetAt(index, value);
108 }
109 ObjectPtr At(intptr_t index) const { return ptr_->At(index); }
110
111 private:
112 ArrayPtr ptr_ = nullptr;
113 DISALLOW_COPY_AND_ASSIGN(ArrayHandle);
114 };
115
116 static ArrayHandle& PtrToHandle(ArrayPtr ptr) {
117 return *new ArrayHandle(ptr);
118 }
119
120 static void SetHandle(ArrayHandle& dst, const ArrayHandle& src) { // NOLINT
121 dst.SetFrom(src);
122 }
123
124 static void ClearHandle(ArrayHandle& dst) { // NOLINT
125 dst.Clear();
126 }
127
128 static ArrayPtr New(Zone* zone, intptr_t length, Heap::Space space) {
129 return new (zone) Array(zone, length);
130 }
131
132 static bool IsImmutable(const ArrayHandle& handle) { return false; }
133
134 static ObjectPtr At(ArrayHandle* array, intptr_t index) {
135 return array->At(index);
136 }
137
138 static void SetAt(ArrayHandle* array, intptr_t index, const Object& value) {
139 array->SetAt(index, value);
140 }
141};
142} // namespace
143
144#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
145
146static void RelocateCodeObjects(
147 bool is_vm,
148 GrowableArray<CodePtr>* code_objects,
149 GrowableArray<ImageWriterCommand>* image_writer_commands) {
150 auto thread = Thread::Current();
151 auto isolate_group =
152 is_vm ? Dart::vm_isolate()->group() : thread->isolate_group();
153
154 WritableCodePages writable_code_pages(thread, isolate_group);
155 CodeRelocator::Relocate(thread, code_objects, image_writer_commands, is_vm);
156}
157
158#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
159
161 public:
162 static constexpr intptr_t kSizeVaries = -1;
163 explicit SerializationCluster(const char* name,
164 intptr_t cid,
165 intptr_t target_instance_size = kSizeVaries,
166 bool is_canonical = false)
167 : name_(name),
168 cid_(cid),
169 target_instance_size_(target_instance_size),
171 is_immutable_(Object::ShouldHaveImmutabilityBitSet(cid)) {
172 ASSERT(target_instance_size == kSizeVaries || target_instance_size >= 0);
173 }
175
176 // Add [object] to the cluster and push its outgoing references.
177 virtual void Trace(Serializer* serializer, ObjectPtr object) = 0;
178
179 // Write the cluster type and information needed to allocate the cluster's
180 // objects. For fixed sized objects, this is just the object count. For
181 // variable sized objects, this is the object count and length of each object.
182 virtual void WriteAlloc(Serializer* serializer) = 0;
183
184 // Write the byte and reference data of the cluster's objects.
185 virtual void WriteFill(Serializer* serializer) = 0;
186
187 void WriteAndMeasureAlloc(Serializer* serializer);
188 void WriteAndMeasureFill(Serializer* serializer);
189
190 const char* name() const { return name_; }
191 intptr_t cid() const { return cid_; }
192 bool is_canonical() const { return is_canonical_; }
193 bool is_immutable() const { return is_immutable_; }
194 intptr_t size() const { return size_; }
195 intptr_t num_objects() const { return num_objects_; }
196
197 // Returns number of bytes needed for deserialized objects in
198 // this cluster. Printed in --print_snapshot_sizes_verbose statistics.
199 //
200 // In order to calculate this size, clusters of fixed-size objects
201 // can pass instance size as [target_instance_size] constructor parameter.
202 // Otherwise clusters should count [target_memory_size] in
203 // their [WriteAlloc] methods.
204 intptr_t target_memory_size() const { return target_memory_size_; }
205
206 protected:
207 const char* const name_;
208 const intptr_t cid_;
209 const intptr_t target_instance_size_;
210 const bool is_canonical_;
211 const bool is_immutable_;
212 intptr_t size_ = 0;
213 intptr_t num_objects_ = 0;
215};
216
218 public:
219 explicit DeserializationCluster(const char* name,
220 bool is_canonical = false,
221 bool is_immutable = false)
222 : name_(name),
224 is_immutable_(is_immutable),
225 start_index_(-1),
226 stop_index_(-1) {}
228
229 // Allocate memory for all objects in the cluster and write their addresses
230 // into the ref array. Do not touch this memory.
231 virtual void ReadAlloc(Deserializer* deserializer) = 0;
232
233 // Initialize the cluster's objects. Do not touch the memory of other objects.
234 virtual void ReadFill(Deserializer* deserializer) = 0;
235
236 // Complete any action that requires the full graph to be deserialized, such
237 // as rehashing.
238 virtual void PostLoad(Deserializer* deserializer, const Array& refs) {
239 // We only need to worry about how canonical values are handled during
240 // deserialization if there may be multiple loading units, which only
241 // happens in the precompiled runtime.
242#if defined(DART_PRECOMPILED_RUNTIME)
243 if (is_canonical()) {
244 FATAL("%s needs canonicalization but doesn't define PostLoad", name());
245 }
246#endif
247 }
248
249 const char* name() const { return name_; }
250 bool is_canonical() const { return is_canonical_; }
251
252 protected:
253 void ReadAllocFixedSize(Deserializer* deserializer, intptr_t instance_size);
254
255 const char* const name_;
256 const bool is_canonical_;
257 const bool is_immutable_;
258 // The range of the ref array that belongs to this cluster.
259 intptr_t start_index_;
260 intptr_t stop_index_;
261};
262
264 public:
266 virtual void AddBaseObjects(Serializer* serializer) = 0;
267 virtual void PushRoots(Serializer* serializer) = 0;
268 virtual void WriteRoots(Serializer* serializer) = 0;
269
271};
272
274 public:
276 virtual void AddBaseObjects(Deserializer* deserializer) = 0;
277 virtual void ReadRoots(Deserializer* deserializer) = 0;
278 virtual void PostLoad(Deserializer* deserializer, const Array& refs) = 0;
279};
280
281// Reference value for objects that either are not reachable from the roots or
282// should never have a reference in the snapshot (because they are dropped,
283// for example). Should be the default value for Heap::GetObjectId.
284static constexpr intptr_t kUnreachableReference = 0;
286static constexpr intptr_t kFirstReference = 1;
287
288// Reference value for traced objects that have not been allocated their final
289// reference ID.
290static constexpr intptr_t kUnallocatedReference = -1;
291
292static constexpr bool IsAllocatedReference(intptr_t ref) {
293 return ref > kUnreachableReference;
294}
295
296static constexpr bool IsArtificialReference(intptr_t ref) {
297 return ref < kUnallocatedReference;
298}
299
300static constexpr bool IsReachableReference(intptr_t ref) {
301 return ref == kUnallocatedReference || IsAllocatedReference(ref);
302}
303
304class CodeSerializationCluster;
305
307 public:
311 ImageWriter* image_writer_,
312 bool vm_,
314 ~Serializer();
315
316 void AddBaseObject(ObjectPtr base_object,
317 const char* type = nullptr,
318 const char* name = nullptr);
319
320 intptr_t AssignRef(ObjectPtr object);
321 intptr_t AssignArtificialRef(ObjectPtr object = nullptr);
322
323 intptr_t GetCodeIndex(CodePtr code);
324
325 void Push(ObjectPtr object, intptr_t cid_override = kIllegalCid);
326 void PushWeak(ObjectPtr object);
327
328 void AddUntracedRef() { num_written_objects_++; }
329
330 void Trace(ObjectPtr object, intptr_t cid_override);
331
332 void UnexpectedObject(ObjectPtr object, const char* message);
333#if defined(SNAPSHOT_BACKTRACE)
334 ObjectPtr ParentOf(ObjectPtr object) const;
335 ObjectPtr ParentOf(const Object& object) const;
336#endif
337
338 SerializationCluster* NewClusterForClass(intptr_t cid, bool is_canonical);
339
341 // Make room for recording snapshot buffer size.
343 }
344
346 Snapshot* header = reinterpret_cast<Snapshot*>(stream_->buffer());
347 header->set_magic();
348 header->set_length(stream_->bytes_written());
349 header->set_kind(kind);
350 }
351
352 void WriteVersionAndFeatures(bool is_vm_snapshot);
353
355 void PrintSnapshotSizes();
356
357 NonStreamingWriteStream* stream() { return stream_; }
358 intptr_t bytes_written() { return stream_->bytes_written(); }
359 intptr_t bytes_heap_allocated() { return bytes_heap_allocated_; }
360
362 public:
364 const char* type,
365 ObjectPtr object,
366 StringPtr name)
368 serializer,
369 ReserveId(serializer,
370 type,
371 object,
372 String::ToCString(serializer->thread(), name)),
373 object) {}
374
376 const char* type,
377 ObjectPtr object,
378 const char* name)
379 : WritingObjectScope(serializer,
380 ReserveId(serializer, type, object, name),
381 object) {}
382
383 WritingObjectScope(Serializer* serializer,
385 ObjectPtr object = nullptr);
386
388 : WritingObjectScope(serializer,
389 serializer->GetProfileId(object),
390 object) {}
391
393
394 private:
395 static V8SnapshotProfileWriter::ObjectId ReserveId(Serializer* serializer,
396 const char* type,
397 ObjectPtr object,
398 const char* name);
399
400 private:
401 Serializer* const serializer_;
402 const ObjectPtr old_object_;
404 const classid_t old_cid_;
405 };
406
407 // Writes raw data to the stream (basic type).
408 // sizeof(T) must be in {1,2,4,8}.
409 template <typename T>
410 void Write(T value) {
411 BaseWriteStream::Raw<sizeof(T), T>::Write(stream_, value);
412 }
413 void WriteRefId(intptr_t value) { stream_->WriteRefId(value); }
414 void WriteUnsigned(intptr_t value) { stream_->WriteUnsigned(value); }
415 void WriteUnsigned64(uint64_t value) { stream_->WriteUnsigned(value); }
416
420
421 void WriteBytes(const void* addr, intptr_t len) {
422 stream_->WriteBytes(addr, len);
423 }
424 void Align(intptr_t alignment, intptr_t offset = 0) {
425 stream_->Align(alignment, offset);
426 }
427
430
431 void WriteRootRef(ObjectPtr object, const char* name = nullptr) {
432 intptr_t id = RefId(object);
433 WriteRefId(id);
434 if (profile_writer_ != nullptr) {
435 profile_writer_->AddRoot(GetProfileId(object), name);
436 }
437 }
438
439 // Record a reference from the currently written object to the given object
440 // and return reference id for the given object.
441 void AttributeReference(ObjectPtr object,
442 const V8SnapshotProfileWriter::Reference& reference);
443
444 void AttributeElementRef(ObjectPtr object, intptr_t index) {
445 AttributeReference(object,
447 }
448
449 void WriteElementRef(ObjectPtr object, intptr_t index) {
450 AttributeElementRef(object, index);
451 WriteRefId(RefId(object));
452 }
453
454 void AttributePropertyRef(ObjectPtr object, const char* property) {
455 AttributeReference(object,
457 }
458
459 void WritePropertyRef(ObjectPtr object, const char* property) {
460 AttributePropertyRef(object, property);
461 WriteRefId(RefId(object));
462 }
463
464 void WriteOffsetRef(ObjectPtr object, intptr_t offset) {
465 intptr_t id = RefId(object);
466 WriteRefId(id);
467 if (profile_writer_ != nullptr) {
468 if (auto const property = offsets_table_->FieldNameForOffset(
469 object_currently_writing_.cid_, offset)) {
470 AttributePropertyRef(object, property);
471 } else {
473 }
474 }
475 }
476
477 template <typename T, typename... P>
478 void WriteFromTo(T obj, P&&... args) {
479 auto* from = obj->untag()->from();
480 auto* to = obj->untag()->to_snapshot(kind(), args...);
481 WriteRange(obj, from, to);
482 }
483
484 template <typename T>
485 DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to) {
486 for (auto* p = from; p <= to; p++) {
488 p->Decompress(obj->heap_base()),
489 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(obj->untag()));
490 }
491 }
492
493 template <typename T, typename... P>
494 void PushFromTo(T obj, P&&... args) {
495 auto* from = obj->untag()->from();
496 auto* to = obj->untag()->to_snapshot(kind(), args...);
497 PushRange(obj, from, to);
498 }
499
500 template <typename T>
501 DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to) {
502 for (auto* p = from; p <= to; p++) {
503 Push(p->Decompress(obj->heap_base()));
504 }
505 }
506
508
509 void WriteCid(intptr_t cid) {
511 Write<int32_t>(cid);
512 }
513
514 // Sorts Code objects and reorders instructions before writing snapshot.
515 // Builds binary search table for stack maps.
516 void PrepareInstructions(const CompressedStackMaps& canonical_smap);
517
518 void WriteInstructions(InstructionsPtr instr,
519 uint32_t unchecked_offset,
520 CodePtr code,
521 bool deferred);
522 uint32_t GetDataOffset(ObjectPtr object) const;
523 void TraceDataOffset(uint32_t offset);
524 intptr_t GetDataSize() const;
525
526 void WriteDispatchTable(const Array& entries);
527
528 Heap* heap() const { return heap_; }
529 Zone* zone() const { return zone_; }
530 Snapshot::Kind kind() const { return kind_; }
531 intptr_t next_ref_index() const { return next_ref_index_; }
532
534
535 V8SnapshotProfileWriter* profile_writer() const { return profile_writer_; }
536
537 // If the given [obj] was not included into the snapshot and have not
538 // yet gotten an artificial node created for it create an artificial node
539 // in the profile representing this object.
540 // Returns true if [obj] has an artificial profile node associated with it.
542
544 void RecordDeferredCode(CodePtr ptr);
546 return loading_units_;
547 }
549 loading_units_ = units;
550 }
551 intptr_t current_loading_unit_id() const { return current_loading_unit_id_; }
552 void set_current_loading_unit_id(intptr_t id) {
553 current_loading_unit_id_ = id;
554 }
555
556 // Returns the reference ID for the object. Fails for objects that have not
557 // been allocated a reference ID yet, so should be used only after all
558 // WriteAlloc calls.
559 intptr_t RefId(ObjectPtr object) const;
560
561 // Same as RefId, but allows artificial and unreachable references. Still
562 // fails for unallocated references.
563 intptr_t UnsafeRefId(ObjectPtr object) const;
564
565 // Whether the object is reachable.
566 bool IsReachable(ObjectPtr object) const {
567 return IsReachableReference(heap_->GetObjectId(object));
568 }
569 // Whether the object has an allocated reference.
570 bool HasRef(ObjectPtr object) const {
571 return IsAllocatedReference(heap_->GetObjectId(object));
572 }
573 // Whether the object only appears in the V8 snapshot profile.
574 bool HasArtificialRef(ObjectPtr object) const {
575 return IsArtificialReference(heap_->GetObjectId(object));
576 }
577 // Whether a node for the object already has been added to the V8 snapshot
578 // profile.
579 bool HasProfileNode(ObjectPtr object) const {
580 ASSERT(profile_writer_ != nullptr);
581 return profile_writer_->HasId(GetProfileId(object));
582 }
583 bool IsWritten(ObjectPtr object) const {
584 return heap_->GetObjectId(object) > num_base_objects_;
585 }
586
587 private:
588 const char* ReadOnlyObjectType(intptr_t cid);
589 void FlushProfile();
590
591 Heap* heap_;
592 Zone* zone_;
593 Snapshot::Kind kind_;
595 ImageWriter* image_writer_;
596 SerializationCluster** canonical_clusters_by_cid_;
597 SerializationCluster** clusters_by_cid_;
598 CodeSerializationCluster* code_cluster_ = nullptr;
599
600 struct StackEntry {
601 ObjectPtr obj;
602 intptr_t cid_override;
603 };
604 GrowableArray<StackEntry> stack_;
605
606 intptr_t num_cids_;
607 intptr_t num_tlc_cids_;
608 intptr_t num_base_objects_;
609 intptr_t num_written_objects_;
610 intptr_t next_ref_index_;
611
612 intptr_t dispatch_table_size_ = 0;
613 intptr_t bytes_heap_allocated_ = 0;
614 intptr_t instructions_table_len_ = 0;
615 intptr_t instructions_table_rodata_offset_ = 0;
616
617 // True if writing VM snapshot, false for Isolate snapshot.
618 bool vm_;
619
620 V8SnapshotProfileWriter* profile_writer_ = nullptr;
621 struct ProfilingObject {
622 ObjectPtr object_ = nullptr;
623 // Unless within a WritingObjectScope, any bytes written are attributed to
624 // the artificial root.
625 V8SnapshotProfileWriter::ObjectId id_ =
627 intptr_t last_stream_position_ = 0;
628 intptr_t cid_ = -1;
629 } object_currently_writing_;
630 OffsetsTable* offsets_table_ = nullptr;
631
632#if defined(SNAPSHOT_BACKTRACE)
633 ObjectPtr current_parent_;
634 GrowableArray<Object*> parent_pairs_;
635#endif
636
637#if defined(DART_PRECOMPILER)
638 IntMap<intptr_t> deduped_instructions_sources_;
639 IntMap<intptr_t> code_index_;
640#endif
641
642 intptr_t current_loading_unit_id_ = 0;
643 GrowableArray<LoadingUnitSerializationData*>* loading_units_ = nullptr;
644 ZoneGrowableArray<Object*>* objects_ = new ZoneGrowableArray<Object*>();
645
647};
648
649#define AutoTraceObject(obj) \
650 Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)
651
652#define AutoTraceObjectName(obj, str) \
653 Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, str)
654
655#define WriteFieldValue(field, value) s->WritePropertyRef(value, #field);
656
657#define WriteFromTo(obj, ...) s->WriteFromTo(obj, ##__VA_ARGS__);
658
659#define PushFromTo(obj, ...) s->PushFromTo(obj, ##__VA_ARGS__);
660
661#define WriteField(obj, field) s->WritePropertyRef(obj->untag()->field, #field)
662#define WriteCompressedField(obj, name) \
663 s->WritePropertyRef(obj->untag()->name(), #name "_")
664
666 public:
669 const uint8_t* buffer,
670 intptr_t size,
671 const uint8_t* data_buffer,
672 const uint8_t* instructions_buffer,
673 bool is_non_root_unit,
674 intptr_t offset = 0);
676
677 // Verifies the image alignment.
678 //
679 // Returns ApiError::null() on success and an ApiError with an an appropriate
680 // message otherwise.
681 ApiErrorPtr VerifyImageAlignment();
682
683 ObjectPtr Allocate(intptr_t size);
685 intptr_t cid,
686 intptr_t size,
687 bool is_canonical = false) {
688 InitializeHeader(raw, cid, size, is_canonical,
690 }
691 static void InitializeHeader(ObjectPtr raw,
692 intptr_t cid,
693 intptr_t size,
694 bool is_canonical,
695 bool is_immutable);
696
697 // Reads raw data (for basic types).
698 // sizeof(T) must be in {1,2,4,8}.
699 template <typename T>
700 T Read() {
701 return ReadStream::Raw<sizeof(T), T>::Read(&stream_);
702 }
703 intptr_t ReadRefId() { return stream_.ReadRefId(); }
704 intptr_t ReadUnsigned() { return stream_.ReadUnsigned(); }
705 uint64_t ReadUnsigned64() { return stream_.ReadUnsigned<uint64_t>(); }
706 void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); }
707
709
710 intptr_t position() const { return stream_.Position(); }
711 void set_position(intptr_t p) { stream_.SetPosition(p); }
712 const uint8_t* AddressOfCurrentPosition() const {
713 return stream_.AddressOfCurrentPosition();
714 }
715
716 void Advance(intptr_t value) { stream_.Advance(value); }
717 void Align(intptr_t alignment, intptr_t offset = 0) {
718 stream_.Align(alignment, offset);
719 }
720
721 void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
722
723 void AssignRef(ObjectPtr object) {
724 ASSERT(next_ref_index_ <= num_objects_);
725 refs_->untag()->data()[next_ref_index_] = object;
726 next_ref_index_++;
727 }
728
729 ObjectPtr Ref(intptr_t index) const {
730 ASSERT(index > 0);
731 ASSERT(index <= num_objects_);
732 return refs_->untag()->element(index);
733 }
734
735 CodePtr GetCodeByIndex(intptr_t code_index, uword* entry_point) const;
736 uword GetEntryPointByCodeIndex(intptr_t code_index) const;
737
738 // If |code_index| corresponds to a non-discarded Code object returns
739 // index within the code cluster that corresponds to this Code object.
740 // Otherwise, if |code_index| corresponds to the discarded Code then
741 // returns -1.
742 static intptr_t CodeIndexToClusterIndex(const InstructionsTable& table,
743 intptr_t code_index);
744
746
748 return TokenPosition::Deserialize(Read<int32_t>());
749 }
750
751 intptr_t ReadCid() {
753 return Read<int32_t>();
754 }
755
756 void ReadInstructions(CodePtr code, bool deferred);
757 void EndInstructions();
758 ObjectPtr GetObjectAt(uint32_t offset) const;
759
761
763
765 ReadDispatchTable(&stream_, /*deferred=*/false, InstructionsTable::Handle(),
766 -1, -1);
767 }
768 void ReadDispatchTable(ReadStream* stream,
769 bool deferred,
770 const InstructionsTable& root_instruction_table,
771 intptr_t deferred_code_start_index,
772 intptr_t deferred_code_end_index);
773
774 intptr_t next_index() const { return next_ref_index_; }
775 Heap* heap() const { return heap_; }
776 Zone* zone() const { return zone_; }
778#if defined(DART_PRECOMPILED_RUNTIME)
779 return Snapshot::kFullAOT;
780#else
781 return kind_;
782#endif
783 }
784 bool is_non_root_unit() const { return is_non_root_unit_; }
785 void set_code_start_index(intptr_t value) { code_start_index_ = value; }
786 intptr_t code_start_index() const { return code_start_index_; }
787 void set_code_stop_index(intptr_t value) { code_stop_index_ = value; }
788 intptr_t code_stop_index() const { return code_stop_index_; }
790 return instructions_table_;
791 }
792 intptr_t num_base_objects() const { return num_base_objects_; }
793
794 // This serves to make the snapshot cursor, ref table and null be locals
795 // during ReadFill, which allows the C compiler to see they are not aliased
796 // and can be kept in registers.
797 class Local : public ReadStream {
798 public:
800 : ReadStream(d->stream_.buffer_, d->stream_.current_, d->stream_.end_),
801 d_(d),
802 refs_(d->refs_),
803 null_(Object::null()) {
804#if defined(DEBUG)
805 // Can't mix use of Deserializer::Read*.
806 d->stream_.current_ = nullptr;
807#endif
808 }
809 ~Local() { d_->stream_.current_ = current_; }
810
811 ObjectPtr Ref(intptr_t index) const {
812 ASSERT(index > 0);
813 ASSERT(index <= d_->num_objects_);
814 return refs_->untag()->element(index);
815 }
816
817 template <typename T>
818 T Read() {
819 return ReadStream::Raw<sizeof(T), T>::Read(this);
820 }
821 uint64_t ReadUnsigned64() { return ReadUnsigned<uint64_t>(); }
822
825 return TokenPosition::Deserialize(Read<int32_t>());
826 }
827
828 intptr_t ReadCid() {
830 return Read<int32_t>();
831 }
832
833 template <typename T, typename... P>
834 void ReadFromTo(T obj, P&&... params) {
835 auto* from = obj->untag()->from();
836 auto* to_snapshot = obj->untag()->to_snapshot(d_->kind(), params...);
837 auto* to = obj->untag()->to(params...);
838 for (auto* p = from; p <= to_snapshot; p++) {
839 *p = ReadRef();
840 }
841 // This is necessary because, unlike Object::Allocate, the clustered
842 // deserializer allocates object without null-initializing them. Instead,
843 // each deserialization cluster is responsible for initializing every
844 // field, ensuring that every field is written to exactly once.
845 for (auto* p = to_snapshot + 1; p <= to; p++) {
846 *p = null_;
847 }
848 }
849
850 private:
851 Deserializer* const d_;
852 const ArrayPtr refs_;
853 const ObjectPtr null_;
854 };
855
856 private:
857 Heap* heap_;
858 PageSpace* old_space_;
859 FreeList* freelist_;
860 Zone* zone_;
861 Snapshot::Kind kind_;
862 ReadStream stream_;
863 ImageReader* image_reader_;
864 intptr_t num_base_objects_;
865 intptr_t num_objects_;
866 intptr_t num_clusters_;
867 ArrayPtr refs_;
868 intptr_t next_ref_index_;
869 intptr_t code_start_index_ = 0;
870 intptr_t code_stop_index_ = 0;
871 intptr_t instructions_index_ = 0;
872 DeserializationCluster** clusters_;
873 const bool is_non_root_unit_;
874 InstructionsTable& instructions_table_;
875};
876
877DART_FORCE_INLINE
880 old_space_->AllocateSnapshotLocked(freelist_, size));
881}
882
884 intptr_t class_id,
885 intptr_t size,
886 bool is_canonical,
887 bool is_immutable) {
889 uword tags = 0;
890 tags = UntaggedObject::ClassIdTag::update(class_id, tags);
891 tags = UntaggedObject::SizeTag::update(size, tags);
892 tags = UntaggedObject::CanonicalBit::update(is_canonical, tags);
893 tags = UntaggedObject::AlwaysSetBit::update(true, tags);
894 tags = UntaggedObject::NotMarkedBit::update(true, tags);
896 tags = UntaggedObject::NewBit::update(false, tags);
897 tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
898 raw->untag()->tags_ = tags;
899}
900
901#if !defined(DART_PRECOMPILED_RUNTIME)
903 intptr_t start_size = serializer->bytes_written();
904 intptr_t start_data = serializer->GetDataSize();
905 intptr_t start_objects = serializer->next_ref_index();
909 serializer->Write<uint32_t>(tags);
910 WriteAlloc(serializer);
911 intptr_t stop_size = serializer->bytes_written();
912 intptr_t stop_data = serializer->GetDataSize();
913 intptr_t stop_objects = serializer->next_ref_index();
914 if (FLAG_print_cluster_information) {
915 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), ", start_size,
916 stop_size - start_size);
917 OS::PrintErr("Data 0x%" Pp " (%" Pd "): ", start_data,
918 stop_data - start_data);
919 OS::PrintErr("Alloc %s (%" Pd ")\n", name(), stop_objects - start_objects);
920 }
921 size_ += (stop_size - start_size) + (stop_data - start_data);
922 num_objects_ += (stop_objects - start_objects);
925 }
926}
927
929 intptr_t start = serializer->bytes_written();
930 WriteFill(serializer);
931 intptr_t stop = serializer->bytes_written();
932 if (FLAG_print_cluster_information) {
933 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n", start, stop - start,
934 name());
935 }
936 size_ += (stop - start);
937}
938#endif // !DART_PRECOMPILED_RUNTIME
939
940DART_NOINLINE
942 intptr_t instance_size) {
943 start_index_ = d->next_index();
944 intptr_t count = d->ReadUnsigned();
945 for (intptr_t i = 0; i < count; i++) {
946 d->AssignRef(d->Allocate(instance_size));
947 }
948 stop_index_ = d->next_index();
949}
950
951#if !defined(DART_PRECOMPILED_RUNTIME)
953 Serializer* s,
954 intptr_t class_id) {
955 const auto unboxed_fields_bitmap_host =
956 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(class_id);
957
958 UnboxedFieldBitmap unboxed_fields_bitmap;
959 if (unboxed_fields_bitmap_host.IsEmpty() ||
960 kWordSize == compiler::target::kWordSize) {
961 unboxed_fields_bitmap = unboxed_fields_bitmap_host;
962 } else {
963 ASSERT(kWordSize == 8 && compiler::target::kWordSize == 4);
964 // A new bitmap is built if the word sizes in the target and
965 // host are different
966 unboxed_fields_bitmap.Reset();
967 intptr_t target_i = 0, host_i = 0;
968
969 while (host_i < UnboxedFieldBitmap::Length()) {
970 // Each unboxed field has constant length, therefore the number of
971 // words used by it should double when compiling from 64-bit to 32-bit.
972 if (unboxed_fields_bitmap_host.Get(host_i++)) {
973 unboxed_fields_bitmap.Set(target_i++);
974 unboxed_fields_bitmap.Set(target_i++);
975 } else {
976 // For object pointers, the field is always one word length
977 target_i++;
978 }
979 }
980 }
981
982 return unboxed_fields_bitmap;
983}
984
986 public:
987 explicit ClassSerializationCluster(intptr_t num_cids)
988 : SerializationCluster("Class",
989 kClassCid,
990 compiler::target::Class::InstanceSize()),
991 predefined_(kNumPredefinedCids),
992 objects_(num_cids) {}
994
995 void Trace(Serializer* s, ObjectPtr object) {
996 ClassPtr cls = Class::RawCast(object);
997 intptr_t class_id = cls->untag()->id_;
998
999 if (class_id == kIllegalCid) {
1000 // Classes expected to be dropped by the precompiler should not be traced.
1001 s->UnexpectedObject(cls, "Class with illegal cid");
1002 }
1003 if (class_id < kNumPredefinedCids) {
1004 // These classes are allocated by Object::Init or Object::InitOnce, so the
1005 // deserializer must find them in the class table instead of allocating
1006 // them.
1007 predefined_.Add(cls);
1008 } else {
1009 objects_.Add(cls);
1010 }
1011
1012 PushFromTo(cls);
1013 }
1014
1016 intptr_t count = predefined_.length();
1017 s->WriteUnsigned(count);
1018 for (intptr_t i = 0; i < count; i++) {
1019 ClassPtr cls = predefined_[i];
1020 s->AssignRef(cls);
1021 AutoTraceObject(cls);
1022 intptr_t class_id = cls->untag()->id_;
1023 s->WriteCid(class_id);
1024 }
1025 count = objects_.length();
1026 s->WriteUnsigned(count);
1027 for (intptr_t i = 0; i < count; i++) {
1028 ClassPtr cls = objects_[i];
1029 s->AssignRef(cls);
1030 }
1031 }
1032
1034 intptr_t count = predefined_.length();
1035 for (intptr_t i = 0; i < count; i++) {
1036 WriteClass(s, predefined_[i]);
1037 }
1038 count = objects_.length();
1039 for (intptr_t i = 0; i < count; i++) {
1040 WriteClass(s, objects_[i]);
1041 }
1042 }
1043
1044 private:
1045 void WriteClass(Serializer* s, ClassPtr cls) {
1046 AutoTraceObjectName(cls, cls->untag()->name());
1047 WriteFromTo(cls);
1048 intptr_t class_id = cls->untag()->id_;
1049 if (class_id == kIllegalCid) {
1050 s->UnexpectedObject(cls, "Class with illegal cid");
1051 }
1052 s->WriteCid(class_id);
1053 if (s->kind() != Snapshot::kFullAOT) {
1054 s->Write<uint32_t>(cls->untag()->kernel_offset_);
1055 }
1056 s->Write<int32_t>(Class::target_instance_size_in_words(cls));
1057 s->Write<int32_t>(Class::target_next_field_offset_in_words(cls));
1059 s->Write<int16_t>(cls->untag()->num_type_arguments_);
1060 s->Write<uint16_t>(cls->untag()->num_native_fields_);
1061 if (s->kind() != Snapshot::kFullAOT) {
1062 s->WriteTokenPosition(cls->untag()->token_pos_);
1063 s->WriteTokenPosition(cls->untag()->end_token_pos_);
1064 s->WriteCid(cls->untag()->implementor_cid_);
1065 }
1066 s->Write<uint32_t>(cls->untag()->state_bits_);
1067
1068 if (!ClassTable::IsTopLevelCid(class_id)) {
1069 const auto unboxed_fields_map =
1071 s->WriteUnsigned64(unboxed_fields_map.Value());
1072 }
1073 }
1074
1075 GrowableArray<ClassPtr> predefined_;
1076 GrowableArray<ClassPtr> objects_;
1077};
1078#endif // !DART_PRECOMPILED_RUNTIME
1079
1081 public:
1084
1085 void ReadAlloc(Deserializer* d) override {
1086 predefined_start_index_ = d->next_index();
1087 intptr_t count = d->ReadUnsigned();
1088 ClassTable* table = d->isolate_group()->class_table();
1089 for (intptr_t i = 0; i < count; i++) {
1090 intptr_t class_id = d->ReadCid();
1091 ASSERT(table->HasValidClassAt(class_id));
1092 ClassPtr cls = table->At(class_id);
1093 ASSERT(cls != nullptr);
1094 d->AssignRef(cls);
1095 }
1096 predefined_stop_index_ = d->next_index();
1097
1098 start_index_ = d->next_index();
1099 count = d->ReadUnsigned();
1100 for (intptr_t i = 0; i < count; i++) {
1101 d->AssignRef(d->Allocate(Class::InstanceSize()));
1102 }
1103 stop_index_ = d->next_index();
1104 }
1105
1106 void ReadFill(Deserializer* d_) override {
1108
1109 for (intptr_t id = predefined_start_index_; id < predefined_stop_index_;
1110 id++) {
1111 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1112 d.ReadFromTo(cls);
1113 intptr_t class_id = d.ReadCid();
1114 cls->untag()->id_ = class_id;
1115#if !defined(DART_PRECOMPILED_RUNTIME)
1116 ASSERT(d_->kind() != Snapshot::kFullAOT);
1117 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1118#endif
1119 if (!IsInternalVMdefinedClassId(class_id)) {
1120 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1121 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1122#if defined(DART_PRECOMPILER)
1123 // Only one pair is serialized. The target field only exists when
1124 // DART_PRECOMPILER is defined
1125 cls->untag()->target_instance_size_in_words_ =
1126 cls->untag()->host_instance_size_in_words_;
1127 cls->untag()->target_next_field_offset_in_words_ =
1128 cls->untag()->host_next_field_offset_in_words_;
1129#endif // defined(DART_PRECOMPILER)
1130 } else {
1131 d.Read<int32_t>(); // Skip.
1132 d.Read<int32_t>(); // Skip.
1133 }
1134 cls->untag()->host_type_arguments_field_offset_in_words_ =
1135 d.Read<int32_t>();
1136#if defined(DART_PRECOMPILER)
1137 cls->untag()->target_type_arguments_field_offset_in_words_ =
1138 cls->untag()->host_type_arguments_field_offset_in_words_;
1139#endif // defined(DART_PRECOMPILER)
1140 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1141 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1142#if !defined(DART_PRECOMPILED_RUNTIME)
1143 ASSERT(d_->kind() != Snapshot::kFullAOT);
1144 cls->untag()->token_pos_ = d.ReadTokenPosition();
1145 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1146 cls->untag()->implementor_cid_ = d.ReadCid();
1147#endif // !defined(DART_PRECOMPILED_RUNTIME)
1148 cls->untag()->state_bits_ = d.Read<uint32_t>();
1149 d.ReadUnsigned64(); // Skip unboxed fields bitmap.
1150 }
1151
1153 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1154 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1156 d.ReadFromTo(cls);
1157
1158 intptr_t class_id = d.ReadCid();
1159 ASSERT(class_id >= kNumPredefinedCids);
1160 cls->untag()->id_ = class_id;
1161
1162#if !defined(DART_PRECOMPILED_RUNTIME)
1163 ASSERT(d_->kind() != Snapshot::kFullAOT);
1164 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1165#endif
1166 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1167 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1168 cls->untag()->host_type_arguments_field_offset_in_words_ =
1169 d.Read<int32_t>();
1170#if defined(DART_PRECOMPILER)
1171 cls->untag()->target_instance_size_in_words_ =
1172 cls->untag()->host_instance_size_in_words_;
1173 cls->untag()->target_next_field_offset_in_words_ =
1174 cls->untag()->host_next_field_offset_in_words_;
1175 cls->untag()->target_type_arguments_field_offset_in_words_ =
1176 cls->untag()->host_type_arguments_field_offset_in_words_;
1177#endif // defined(DART_PRECOMPILER)
1178 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1179 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1180#if !defined(DART_PRECOMPILED_RUNTIME)
1181 ASSERT(d_->kind() != Snapshot::kFullAOT);
1182 cls->untag()->token_pos_ = d.ReadTokenPosition();
1183 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1184 cls->untag()->implementor_cid_ = d.ReadCid();
1185#endif // !defined(DART_PRECOMPILED_RUNTIME)
1186 cls->untag()->state_bits_ = d.Read<uint32_t>();
1187
1188 table->AllocateIndex(class_id);
1189 table->SetAt(class_id, cls);
1190
1191 if (!ClassTable::IsTopLevelCid(class_id)) {
1192 const UnboxedFieldBitmap unboxed_fields_map(d.ReadUnsigned64());
1193 table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
1194 }
1195 }
1196 }
1197
1198 private:
1199 intptr_t predefined_start_index_;
1200 intptr_t predefined_stop_index_;
1201};
1202
1203// Super classes for writing out clusters which contain objects grouped into
1204// a canonical set (e.g. String, Type, TypeArguments, etc).
1205// To save space in the snapshot we avoid writing such canonical sets
1206// explicitly as Array objects into the snapshot and instead utilize a different
1207// encoding: objects in a cluster representing a canonical set are sorted
1208// to appear in the same order they appear in the Array representing the set,
1209// and we additionally write out array of values describing gaps between
1210// objects.
1211//
1212// In some situations not all canonical objects of the some type need to
1213// be added to the resulting canonical set because they are cached in some
1214// special way (see Type::Canonicalize as an example, which caches declaration
1215// types in a special way). In this case subclass can set
1216// kAllCanonicalObjectsAreIncludedIntoSet to |false| and override
1217// IsInCanonicalSet filter.
1218#if !defined(DART_PRECOMPILED_RUNTIME)
1219template <typename SetType,
1220 typename HandleType,
1221 typename PointerType,
1222 bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1224 protected:
1226 bool is_canonical,
1227 bool represents_canonical_set,
1228 const char* name,
1229 intptr_t target_instance_size = 0)
1230 : SerializationCluster(name, cid, target_instance_size, is_canonical),
1231 represents_canonical_set_(represents_canonical_set) {}
1232
1233 virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) {
1234 // Must override this function if kAllCanonicalObjectsAreIncludedIntoSet
1235 // is set to |false|.
1236 ASSERT(kAllCanonicalObjectsAreIncludedIntoSet);
1237 return true;
1238 }
1239
1241 if (!represents_canonical_set_) {
1242 return;
1243 }
1244
1245 // Sort objects before writing them out so that they appear in the same
1246 // order as they would appear in a CanonicalStringSet.
1247 using ZoneCanonicalSet =
1249
1250 // Compute required capacity for the hashtable (to avoid overallocating).
1251 intptr_t required_capacity = 0;
1252 for (auto ptr : objects_) {
1253 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1254 required_capacity++;
1255 }
1256 }
1257 // Over-allocate capacity so a few inserts can happen at startup without
1258 // causing a rehash.
1259 const intptr_t kSpareCapacity = 32;
1260 required_capacity = static_cast<intptr_t>(
1261 static_cast<double>(required_capacity + kSpareCapacity) /
1263
1264 intptr_t num_occupied = 0;
1265
1266 // Build canonical set out of objects that should belong to it.
1267 // Objects that don't belong to it are copied to the prefix of objects_.
1268 ZoneCanonicalSet table(
1269 s->zone(), HashTables::New<ZoneCanonicalSet>(required_capacity));
1270 HandleType& element = HandleType::Handle(s->zone());
1271 for (auto ptr : objects_) {
1272 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1273 element ^= ptr;
1274 intptr_t entry = -1;
1275 const bool present = table.FindKeyOrDeletedOrUnused(element, &entry);
1276 ASSERT(!present);
1277 table.InsertKey(entry, element);
1278 } else {
1279 objects_[num_occupied++] = ptr;
1280 }
1281 }
1282
1283 const auto prefix_length = num_occupied;
1284
1285 // Compute objects_ order and gaps based on canonical set layout.
1286 auto& arr = table.Release();
1287 intptr_t last_occupied = ZoneCanonicalSet::kFirstKeyIndex - 1;
1288 for (intptr_t i = ZoneCanonicalSet::kFirstKeyIndex, length = arr.Length();
1289 i < length; i++) {
1290 ObjectPtr v = arr.At(i);
1291 ASSERT(v != ZoneCanonicalSet::DeletedMarker().ptr());
1292 if (v != ZoneCanonicalSet::UnusedMarker().ptr()) {
1293 const intptr_t unused_run_length = (i - 1) - last_occupied;
1294 gaps_.Add(unused_run_length);
1295 objects_[num_occupied++] = static_cast<PointerType>(v);
1296 last_occupied = i;
1297 }
1298 }
1299 ASSERT(num_occupied == objects_.length());
1300 ASSERT(prefix_length == (objects_.length() - gaps_.length()));
1301 table_length_ = arr.Length();
1302 }
1303
1305 if (represents_canonical_set_) {
1306 s->WriteUnsigned(table_length_);
1307 s->WriteUnsigned(objects_.length() - gaps_.length());
1308 for (auto gap : gaps_) {
1309 s->WriteUnsigned(gap);
1310 }
1312 compiler::target::Array::InstanceSize(table_length_);
1313 }
1314 }
1315
1317
1318 private:
1319 const bool represents_canonical_set_;
1321 intptr_t table_length_ = 0;
1322};
1323#endif
1324
1325template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1327 public:
1329 bool is_root_unit,
1330 const char* name)
1332 is_root_unit_(is_root_unit),
1333 table_(SetType::ArrayHandle::Handle()) {}
1334
1336 if (!is_root_unit_ || !is_canonical()) {
1337 return;
1338 }
1339
1340 const auto table_length = d->ReadUnsigned();
1341 first_element_ = d->ReadUnsigned();
1342 const intptr_t count = stop_index_ - (start_index_ + first_element_);
1343 auto table = StartDeserialization(d, table_length, count);
1344 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1345 table.FillGap(d->ReadUnsigned());
1346 table.WriteElement(d, d->Ref(i));
1347 }
1348 table_ = table.Finish();
1349 }
1350
1351 protected:
1352 const bool is_root_unit_;
1354 typename SetType::ArrayHandle& table_;
1355
1357 const Array& refs,
1358 const typename SetType::ArrayHandle& current_table) {
1359#if defined(DEBUG)
1360 // First check that we are not overwriting a table and loosing information.
1361 if (!current_table.IsNull()) {
1362 SetType current_set(d->zone(), current_table.ptr());
1363 ASSERT(current_set.NumOccupied() == 0);
1364 current_set.Release();
1365 }
1366
1367 // Now check that manually created table behaves correctly as a canonical
1368 // set.
1369 SetType canonical_set(d->zone(), table_.ptr());
1371 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1372 key = refs.At(i);
1373 ASSERT(canonical_set.GetOrNull(key) != Object::null());
1374 }
1375 canonical_set.Release();
1376#endif // defined(DEBUG)
1377 }
1378
1379 private:
1380 struct DeserializationFinger {
1381 typename SetType::ArrayPtr table;
1382 intptr_t current_index;
1383 ObjectPtr gap_element;
1384
1385 void FillGap(int length) {
1386 for (intptr_t j = 0; j < length; j++) {
1387 table->untag()->data()[current_index + j] = gap_element;
1388 }
1389 current_index += length;
1390 }
1391
1392 void WriteElement(Deserializer* d, ObjectPtr object) {
1393 table->untag()->data()[current_index++] = object;
1394 }
1395
1396 typename SetType::ArrayPtr Finish() {
1397 if (table != SetType::ArrayHandle::null()) {
1398 FillGap(Smi::Value(table->untag()->length()) - current_index);
1399 }
1400 auto result = table;
1401 table = SetType::ArrayHandle::null();
1402 return result;
1403 }
1404 };
1405
1406 static DeserializationFinger StartDeserialization(Deserializer* d,
1407 intptr_t length,
1408 intptr_t count) {
1409 const intptr_t instance_size = SetType::ArrayHandle::InstanceSize(length);
1410 typename SetType::ArrayPtr table =
1411 static_cast<typename SetType::ArrayPtr>(d->Allocate(instance_size));
1412 Deserializer::InitializeHeader(table, SetType::Storage::ArrayCid,
1413 instance_size);
1414 if ((SetType::Storage::ArrayCid == kArrayCid) &&
1416 table->untag()->SetCardRememberedBitUnsynchronized();
1417 }
1418 InitTypeArgsOrNext(table);
1419 table->untag()->length_ = Smi::New(length);
1420 for (intptr_t i = 0; i < SetType::kFirstKeyIndex; i++) {
1421 table->untag()->data()[i] = Smi::New(0);
1422 }
1423 table->untag()->data()[SetType::kOccupiedEntriesIndex] = Smi::New(count);
1424 return {table, SetType::kFirstKeyIndex, SetType::UnusedMarker().ptr()};
1425 }
1426
1427 static void InitTypeArgsOrNext(ArrayPtr table) {
1428 table->untag()->type_arguments_ = TypeArguments::null();
1429 }
1430 static void InitTypeArgsOrNext(WeakArrayPtr table) {
1431 table->untag()->next_seen_by_gc_ = WeakArray::null();
1432 }
1433};
1434
1435#if !defined(DART_PRECOMPILED_RUNTIME)
1437 public:
1439 : SerializationCluster("TypeParameters",
1440 kTypeParametersCid,
1441 compiler::target::TypeParameters::InstanceSize()) {
1442 }
1444
1445 void Trace(Serializer* s, ObjectPtr object) {
1446 TypeParametersPtr type_params = TypeParameters::RawCast(object);
1447 objects_.Add(type_params);
1448 PushFromTo(type_params);
1449 }
1450
1452 const intptr_t count = objects_.length();
1453 s->WriteUnsigned(count);
1454 for (intptr_t i = 0; i < count; i++) {
1455 TypeParametersPtr type_params = objects_[i];
1456 s->AssignRef(type_params);
1457 }
1458 }
1459
1461 const intptr_t count = objects_.length();
1462 for (intptr_t i = 0; i < count; i++) {
1463 TypeParametersPtr type_params = objects_[i];
1464 AutoTraceObject(type_params);
1465 WriteFromTo(type_params);
1466 }
1467 }
1468
1469 private:
1471};
1472#endif // !DART_PRECOMPILED_RUNTIME
1473
1475 public:
1479
1483
1484 void ReadFill(Deserializer* d_) override {
1486
1487 ASSERT(!is_canonical()); // Never canonical.
1488 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1489 TypeParametersPtr type_params = static_cast<TypeParametersPtr>(d.Ref(id));
1490 Deserializer::InitializeHeader(type_params, kTypeParametersCid,
1492 d.ReadFromTo(type_params);
1493 }
1494 }
1495};
1496
1497#if !defined(DART_PRECOMPILED_RUNTIME)
1499 : public CanonicalSetSerializationCluster<CanonicalTypeArgumentsSet,
1500 TypeArguments,
1501 TypeArgumentsPtr> {
1502 public:
1504 bool represents_canonical_set)
1505 : CanonicalSetSerializationCluster(kTypeArgumentsCid,
1507 represents_canonical_set,
1508 "TypeArguments") {}
1510
1511 void Trace(Serializer* s, ObjectPtr object) {
1512 TypeArgumentsPtr type_args = TypeArguments::RawCast(object);
1513 objects_.Add(type_args);
1514
1515 s->Push(type_args->untag()->instantiations());
1516 const intptr_t length = Smi::Value(type_args->untag()->length());
1517 for (intptr_t i = 0; i < length; i++) {
1518 s->Push(type_args->untag()->element(i));
1519 }
1520 }
1521
1523 const intptr_t count = objects_.length();
1524 s->WriteUnsigned(count);
1526 for (intptr_t i = 0; i < count; i++) {
1527 TypeArgumentsPtr type_args = objects_[i];
1528 s->AssignRef(type_args);
1529 AutoTraceObject(type_args);
1530 const intptr_t length = Smi::Value(type_args->untag()->length());
1531 s->WriteUnsigned(length);
1533 compiler::target::TypeArguments::InstanceSize(length);
1534 }
1536 }
1537
1539 const intptr_t count = objects_.length();
1540 for (intptr_t i = 0; i < count; i++) {
1541 TypeArgumentsPtr type_args = objects_[i];
1542 AutoTraceObject(type_args);
1543 const intptr_t length = Smi::Value(type_args->untag()->length());
1544 s->WriteUnsigned(length);
1545 intptr_t hash = Smi::Value(type_args->untag()->hash());
1546 s->Write<int32_t>(hash);
1547 const intptr_t nullability =
1548 Smi::Value(type_args->untag()->nullability());
1549 s->WriteUnsigned(nullability);
1550 WriteField(type_args, instantiations());
1551 for (intptr_t j = 0; j < length; j++) {
1552 s->WriteElementRef(type_args->untag()->element(j), j);
1553 }
1554 }
1555 }
1556};
1557#endif // !DART_PRECOMPILED_RUNTIME
1558
1560 : public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> {
1561 public:
1563 bool is_root_unit)
1565 is_root_unit,
1566 "TypeArguments") {}
1568
1569 void ReadAlloc(Deserializer* d) override {
1570 start_index_ = d->next_index();
1571 const intptr_t count = d->ReadUnsigned();
1572 for (intptr_t i = 0; i < count; i++) {
1573 const intptr_t length = d->ReadUnsigned();
1574 d->AssignRef(d->Allocate(TypeArguments::InstanceSize(length)));
1575 }
1576 stop_index_ = d->next_index();
1578 }
1579
1580 void ReadFill(Deserializer* d_) override {
1582
1583 const bool mark_canonical = is_root_unit_ && is_canonical();
1584 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1585 TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d.Ref(id));
1586 const intptr_t length = d.ReadUnsigned();
1587 Deserializer::InitializeHeader(type_args, kTypeArgumentsCid,
1589 mark_canonical);
1590 type_args->untag()->length_ = Smi::New(length);
1591 type_args->untag()->hash_ = Smi::New(d.Read<int32_t>());
1592 type_args->untag()->nullability_ = Smi::New(d.ReadUnsigned());
1593 type_args->untag()->instantiations_ = static_cast<ArrayPtr>(d.ReadRef());
1594 for (intptr_t j = 0; j < length; j++) {
1595 type_args->untag()->types()[j] =
1596 static_cast<AbstractTypePtr>(d.ReadRef());
1597 }
1598 }
1599 }
1600
1601 void PostLoad(Deserializer* d, const Array& refs) override {
1602 if (!table_.IsNull()) {
1603 auto object_store = d->isolate_group()->object_store();
1605 d, refs, Array::Handle(object_store->canonical_type_arguments()));
1606 object_store->set_canonical_type_arguments(table_);
1607 } else if (!is_root_unit_ && is_canonical()) {
1608 TypeArguments& type_arg = TypeArguments::Handle(d->zone());
1609 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1610 type_arg ^= refs.At(i);
1611 type_arg = type_arg.Canonicalize(d->thread());
1612 refs.SetAt(i, type_arg);
1613 }
1614 }
1615 }
1616};
1617
1618#if !defined(DART_PRECOMPILED_RUNTIME)
1620 public:
1622 : SerializationCluster("PatchClass",
1623 kPatchClassCid,
1624 compiler::target::PatchClass::InstanceSize()) {}
1626
1627 void Trace(Serializer* s, ObjectPtr object) {
1628 PatchClassPtr cls = PatchClass::RawCast(object);
1629 objects_.Add(cls);
1630 PushFromTo(cls);
1631 }
1632
1634 const intptr_t count = objects_.length();
1635 s->WriteUnsigned(count);
1636 for (intptr_t i = 0; i < count; i++) {
1637 PatchClassPtr cls = objects_[i];
1638 s->AssignRef(cls);
1639 }
1640 }
1641
1643 const intptr_t count = objects_.length();
1644 for (intptr_t i = 0; i < count; i++) {
1645 PatchClassPtr cls = objects_[i];
1646 AutoTraceObject(cls);
1647 WriteFromTo(cls);
1648 if (s->kind() != Snapshot::kFullAOT) {
1649 s->Write<int32_t>(cls->untag()->kernel_library_index_);
1650 }
1651 }
1652 }
1653
1654 private:
1656};
1657#endif // !DART_PRECOMPILED_RUNTIME
1658
1660 public:
1663
1667
1668 void ReadFill(Deserializer* d_) override {
1670
1671 ASSERT(!is_canonical()); // Never canonical.
1672 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1673 PatchClassPtr cls = static_cast<PatchClassPtr>(d.Ref(id));
1674 Deserializer::InitializeHeader(cls, kPatchClassCid,
1676 d.ReadFromTo(cls);
1677#if !defined(DART_PRECOMPILED_RUNTIME)
1678 ASSERT(d_->kind() != Snapshot::kFullAOT);
1679 cls->untag()->kernel_library_index_ = d.Read<int32_t>();
1680#endif
1681 }
1682 }
1683};
1684
1685#if !defined(DART_PRECOMPILED_RUNTIME)
1687 public:
1689 : SerializationCluster("Function",
1690 kFunctionCid,
1691 compiler::target::Function::InstanceSize()) {}
1693
1694 void Trace(Serializer* s, ObjectPtr object) {
1695 Snapshot::Kind kind = s->kind();
1696 FunctionPtr func = Function::RawCast(object);
1697 objects_.Add(func);
1698
1699 PushFromTo(func);
1700 if (kind == Snapshot::kFullAOT) {
1701 s->Push(func->untag()->code());
1702 } else if (kind == Snapshot::kFullJIT) {
1703 NOT_IN_PRECOMPILED(s->Push(func->untag()->unoptimized_code()));
1704 s->Push(func->untag()->code());
1705 s->Push(func->untag()->ic_data_array());
1706 }
1707 if (kind != Snapshot::kFullAOT) {
1708 NOT_IN_PRECOMPILED(s->Push(func->untag()->positional_parameter_names()));
1709 }
1710 }
1711
1713 const intptr_t count = objects_.length();
1714 s->WriteUnsigned(count);
1715 for (intptr_t i = 0; i < count; i++) {
1716 FunctionPtr func = objects_[i];
1717 s->AssignRef(func);
1718 }
1719 }
1720
1722 Snapshot::Kind kind = s->kind();
1723 const intptr_t count = objects_.length();
1724 for (intptr_t i = 0; i < count; i++) {
1725 FunctionPtr func = objects_[i];
1727 WriteFromTo(func);
1728 if (kind == Snapshot::kFullAOT) {
1729#if defined(DART_PRECOMPILER)
1730 CodePtr code = func->untag()->code();
1731 const auto code_index = s->GetCodeIndex(code);
1732 s->WriteUnsigned(code_index);
1733 s->AttributePropertyRef(code, "code_");
1734#else
1735 UNREACHABLE();
1736#endif
1737 } else if (s->kind() == Snapshot::kFullJIT) {
1738 NOT_IN_PRECOMPILED(WriteCompressedField(func, unoptimized_code));
1739 WriteCompressedField(func, code);
1740 WriteCompressedField(func, ic_data_array);
1741 }
1742
1743 if (kind != Snapshot::kFullAOT) {
1745 WriteCompressedField(func, positional_parameter_names));
1746 }
1747
1748#if defined(DART_PRECOMPILER) && !defined(PRODUCT)
1749 TokenPosition token_pos = func->untag()->token_pos_;
1750 if (kind == Snapshot::kFullAOT) {
1751 // We use then token_pos property to store the line number
1752 // in AOT snapshots.
1753 intptr_t line = -1;
1754 const Function& function = Function::Handle(func);
1755 const Script& script = Script::Handle(function.script());
1756 if (!script.IsNull()) {
1757 script.GetTokenLocation(token_pos, &line, nullptr);
1758 }
1759 token_pos = line == -1 ? TokenPosition::kNoSource
1761 }
1762 s->WriteTokenPosition(token_pos);
1763#else
1764 if (kind != Snapshot::kFullAOT) {
1765 s->WriteTokenPosition(func->untag()->token_pos_);
1766 }
1767#endif
1768 if (kind != Snapshot::kFullAOT) {
1769 s->WriteTokenPosition(func->untag()->end_token_pos_);
1770 s->Write<uint32_t>(func->untag()->kernel_offset_);
1771 s->Write<uint32_t>(func->untag()->packed_fields_);
1772 }
1773 s->Write<uint32_t>(func->untag()->kind_tag_);
1774 }
1775 }
1776
1778 FunctionPtr f) {
1779 if (s->profile_writer() == nullptr) {
1780 return nullptr;
1781 }
1782
1784 Function& fun = reused_function_handle.Handle();
1785 fun = f;
1786 ZoneTextBuffer printer(s->thread()->zone());
1789 &printer);
1790 return printer.buffer();
1791 }
1792
1793 private:
1795};
1796#endif // !DART_PRECOMPILED_RUNTIME
1797
1798template <bool need_entry_point_for_non_discarded>
1799DART_FORCE_INLINE static CodePtr GetCodeAndEntryPointByIndex(
1800 const Deserializer* d,
1801 intptr_t code_index,
1802 uword* entry_point) {
1803 code_index -= 1; // 0 is reserved for LazyCompile stub.
1804
1805 // In root unit and VM isolate snapshot code_indices are self-contained
1806 // they point into instruction table and/or into the code cluster.
1807 // In non-root units we might also refer to code objects from the
1808 // parent unit which means code_index is biased by num_base_objects_
1809 const intptr_t base = d->is_non_root_unit() ? d->num_base_objects() : 0;
1810 if (code_index < base) {
1811 CodePtr code = static_cast<CodePtr>(d->Ref(code_index));
1812 if (need_entry_point_for_non_discarded) {
1813 *entry_point = Code::EntryPointOf(code);
1814 }
1815 return code;
1816 }
1817 code_index -= base;
1818
1819 // At this point code_index is referring to a code object which is either
1820 // discarded or exists in the Code cluster. Non-discarded Code objects
1821 // are associated with the tail of the instruction table and have the
1822 // same order there and in the Code cluster. This means that
1823 // subtracting first_entry_with_code yields index into the Code cluster.
1824 // This also works for deferred code objects in root unit's snapshot
1825 // due to the choice of encoding (see Serializer::GetCodeIndex).
1826 const intptr_t first_entry_with_code =
1827 d->instructions_table().rodata()->first_entry_with_code;
1828 if (code_index < first_entry_with_code) {
1829 *entry_point = d->instructions_table().EntryPointAt(code_index);
1830 return StubCode::UnknownDartCode().ptr();
1831 } else {
1832 const intptr_t cluster_index = code_index - first_entry_with_code;
1833 CodePtr code =
1834 static_cast<CodePtr>(d->Ref(d->code_start_index() + cluster_index));
1835 if (need_entry_point_for_non_discarded) {
1836 *entry_point = Code::EntryPointOf(code);
1837 }
1838 return code;
1839 }
1840}
1841
1842CodePtr Deserializer::GetCodeByIndex(intptr_t code_index,
1843 uword* entry_point) const {
1844 // See Serializer::GetCodeIndex for how code_index is encoded.
1845 if (code_index == 0) {
1846 return StubCode::LazyCompile().ptr();
1847 } else if (FLAG_precompiled_mode) {
1849 /*need_entry_point_for_non_discarded=*/false>(this, code_index,
1850 entry_point);
1851 } else {
1852 // -1 below because 0 is reserved for LazyCompile stub.
1853 const intptr_t ref = code_start_index_ + code_index - 1;
1854 ASSERT(code_start_index_ <= ref && ref < code_stop_index_);
1855 return static_cast<CodePtr>(Ref(ref));
1856 }
1857}
1858
1860 intptr_t code_index) {
1861 // Note: code indices we are interpreting here originate from the root
1862 // loading unit which means base is equal to 0.
1863 // See comments which clarify the connection between code_index and
1864 // index into the Code cluster.
1865 ASSERT(FLAG_precompiled_mode);
1866 const intptr_t first_entry_with_code = table.rodata()->first_entry_with_code;
1867 return code_index - 1 - first_entry_with_code;
1868}
1869
1871 // See Deserializer::GetCodeByIndex which this code repeats.
1872 ASSERT(FLAG_precompiled_mode);
1873 uword entry_point = 0;
1874 GetCodeAndEntryPointByIndex</*need_entry_point_for_non_discarded=*/true>(
1875 this, code_index, &entry_point);
1876 return entry_point;
1877}
1878
1880 public:
1883
1887
1888 void ReadFill(Deserializer* d_) override {
1890
1891 ASSERT(!is_canonical()); // Never canonical.
1892 Snapshot::Kind kind = d_->kind();
1893
1894 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1895 FunctionPtr func = static_cast<FunctionPtr>(d.Ref(id));
1896 Deserializer::InitializeHeader(func, kFunctionCid,
1898 d.ReadFromTo(func);
1899
1900#if defined(DEBUG)
1901 func->untag()->entry_point_ = 0;
1902 func->untag()->unchecked_entry_point_ = 0;
1903#endif
1904
1905#if defined(DART_PRECOMPILED_RUNTIME)
1906 ASSERT(kind == Snapshot::kFullAOT);
1907 const intptr_t code_index = d.ReadUnsigned();
1908 uword entry_point = 0;
1909 CodePtr code = d_->GetCodeByIndex(code_index, &entry_point);
1910 func->untag()->code_ = code;
1911 if (entry_point != 0) {
1912 func->untag()->entry_point_ = entry_point;
1913 func->untag()->unchecked_entry_point_ = entry_point;
1914 }
1915#else
1916 ASSERT(kind != Snapshot::kFullAOT);
1917 if (kind == Snapshot::kFullJIT) {
1918 func->untag()->unoptimized_code_ = static_cast<CodePtr>(d.ReadRef());
1919 func->untag()->code_ = static_cast<CodePtr>(d.ReadRef());
1920 func->untag()->ic_data_array_ = static_cast<ArrayPtr>(d.ReadRef());
1921 }
1922#endif
1923
1924#if !defined(DART_PRECOMPILED_RUNTIME)
1925 ASSERT(kind != Snapshot::kFullAOT);
1926 func->untag()->positional_parameter_names_ =
1927 static_cast<ArrayPtr>(d.ReadRef());
1928#endif
1929#if !defined(DART_PRECOMPILED_RUNTIME) || \
1930 (defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT))
1931 func->untag()->token_pos_ = d.ReadTokenPosition();
1932#endif
1933#if !defined(DART_PRECOMPILED_RUNTIME)
1934 func->untag()->end_token_pos_ = d.ReadTokenPosition();
1935 func->untag()->kernel_offset_ = d.Read<uint32_t>();
1936 func->untag()->unboxed_parameters_info_.Reset();
1937 func->untag()->packed_fields_ = d.Read<uint32_t>();
1938#endif
1939
1940 func->untag()->kind_tag_ = d.Read<uint32_t>();
1941#if !defined(DART_PRECOMPILED_RUNTIME)
1942 func->untag()->usage_counter_ = 0;
1943 func->untag()->optimized_instruction_count_ = 0;
1944 func->untag()->optimized_call_site_count_ = 0;
1945 func->untag()->deoptimization_counter_ = 0;
1946 func->untag()->state_bits_ = 0;
1947 func->untag()->inlining_depth_ = 0;
1948#endif
1949 }
1950 }
1951
1952 void PostLoad(Deserializer* d, const Array& refs) override {
1953 if (d->kind() == Snapshot::kFullAOT) {
1954 Function& func = Function::Handle(d->zone());
1955 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1956 func ^= refs.At(i);
1957 auto const code = func.ptr()->untag()->code();
1958 ASSERT(code->IsCode());
1959 if (!Code::IsUnknownDartCode(code)) {
1960 uword entry_point = code->untag()->entry_point_;
1961 ASSERT(entry_point != 0);
1962 func.ptr()->untag()->entry_point_ = entry_point;
1963 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
1964 ASSERT(unchecked_entry_point != 0);
1965 func.ptr()->untag()->unchecked_entry_point_ = unchecked_entry_point;
1966 }
1967 }
1968 } else if (d->kind() == Snapshot::kFullJIT) {
1969 Function& func = Function::Handle(d->zone());
1970 Code& code = Code::Handle(d->zone());
1971 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1972 func ^= refs.At(i);
1973 code = func.CurrentCode();
1974 if (func.HasCode() && !code.IsDisabled()) {
1975 func.SetInstructionsSafe(code); // Set entrypoint.
1976 func.SetWasCompiled(true);
1977 } else {
1978 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub
1979 }
1980 }
1981 } else {
1982 Function& func = Function::Handle(d->zone());
1983 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1984 func ^= refs.At(i);
1985 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub.
1986 }
1987 }
1988 }
1989};
1990
1991#if !defined(DART_PRECOMPILED_RUNTIME)
1993 public:
1995 : SerializationCluster("ClosureData",
1996 kClosureDataCid,
1997 compiler::target::ClosureData::InstanceSize()) {}
1999
2000 void Trace(Serializer* s, ObjectPtr object) {
2001 ClosureDataPtr data = ClosureData::RawCast(object);
2002 objects_.Add(data);
2003
2004 if (s->kind() != Snapshot::kFullAOT) {
2005 s->Push(data->untag()->context_scope());
2006 }
2007 s->Push(data->untag()->parent_function());
2008 s->Push(data->untag()->closure());
2009 }
2010
2012 const intptr_t count = objects_.length();
2013 s->WriteUnsigned(count);
2014 for (intptr_t i = 0; i < count; i++) {
2015 ClosureDataPtr data = objects_[i];
2016 s->AssignRef(data);
2017 }
2018 }
2019
2021 const intptr_t count = objects_.length();
2022 for (intptr_t i = 0; i < count; i++) {
2023 ClosureDataPtr data = objects_[i];
2025 if (s->kind() != Snapshot::kFullAOT) {
2026 WriteCompressedField(data, context_scope);
2027 }
2028 WriteCompressedField(data, parent_function);
2029 WriteCompressedField(data, closure);
2030 s->WriteUnsigned(static_cast<uint32_t>(data->untag()->packed_fields_));
2031 }
2032 }
2033
2034 private:
2036};
2037#endif // !DART_PRECOMPILED_RUNTIME
2038
2040 public:
2043
2047
2048 void ReadFill(Deserializer* d_) override {
2050
2051 ASSERT(!is_canonical()); // Never canonical.
2052 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2053 ClosureDataPtr data = static_cast<ClosureDataPtr>(d.Ref(id));
2054 Deserializer::InitializeHeader(data, kClosureDataCid,
2056 if (d_->kind() == Snapshot::kFullAOT) {
2057 data->untag()->context_scope_ = ContextScope::null();
2058 } else {
2059 data->untag()->context_scope_ =
2060 static_cast<ContextScopePtr>(d.ReadRef());
2061 }
2062 data->untag()->parent_function_ = static_cast<FunctionPtr>(d.ReadRef());
2063 data->untag()->closure_ = static_cast<ClosurePtr>(d.ReadRef());
2064 data->untag()->packed_fields_ = d.ReadUnsigned<uint32_t>();
2065 }
2066 }
2067};
2068
2069#if !defined(DART_PRECOMPILED_RUNTIME)
2071 public:
2074 "FfiTrampolineData",
2075 kFfiTrampolineDataCid,
2076 compiler::target::FfiTrampolineData::InstanceSize()) {}
2078
2079 void Trace(Serializer* s, ObjectPtr object) {
2080 FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object);
2081 objects_.Add(data);
2083 }
2084
2086 const intptr_t count = objects_.length();
2087 s->WriteUnsigned(count);
2088 for (intptr_t i = 0; i < count; i++) {
2089 s->AssignRef(objects_[i]);
2090 }
2091 }
2092
2094 const intptr_t count = objects_.length();
2095 for (intptr_t i = 0; i < count; i++) {
2096 FfiTrampolineDataPtr const data = objects_[i];
2099 s->Write<int32_t>(data->untag()->callback_id_);
2100 s->Write<uint8_t>(data->untag()->ffi_function_kind_);
2101 }
2102 }
2103
2104 private:
2106};
2107#endif // !DART_PRECOMPILED_RUNTIME
2108
2110 public:
2114
2118
2119 void ReadFill(Deserializer* d_) override {
2121
2122 ASSERT(!is_canonical()); // Never canonical.
2123 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2124 FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d.Ref(id));
2125 Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
2127 d.ReadFromTo(data);
2128 data->untag()->callback_id_ = d.Read<int32_t>();
2129 data->untag()->ffi_function_kind_ = d.Read<uint8_t>();
2130 }
2131 }
2132};
2133
2134#if !defined(DART_PRECOMPILED_RUNTIME)
2136 public:
2138 : SerializationCluster("Field",
2139 kFieldCid,
2140 compiler::target::Field::InstanceSize()) {}
2142
2143 void Trace(Serializer* s, ObjectPtr object) {
2144 FieldPtr field = Field::RawCast(object);
2145 objects_.Add(field);
2146
2147 Snapshot::Kind kind = s->kind();
2148
2149 s->Push(field->untag()->name());
2150 s->Push(field->untag()->owner());
2151 s->Push(field->untag()->type());
2152 // Write out the initializer function
2153 s->Push(field->untag()->initializer_function());
2154
2155 if (kind != Snapshot::kFullAOT) {
2156 s->Push(field->untag()->guarded_list_length());
2157 }
2158 if (kind == Snapshot::kFullJIT) {
2159 s->Push(field->untag()->dependent_code());
2160 }
2161 // Write out either the initial static value or field offset.
2162 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2163 s->Push(field->untag()->host_offset_or_field_id());
2164 } else {
2165 s->Push(Smi::New(Field::TargetOffsetOf(field)));
2166 }
2167 }
2168
2170 const intptr_t count = objects_.length();
2171 s->WriteUnsigned(count);
2172 for (intptr_t i = 0; i < count; i++) {
2173 FieldPtr field = objects_[i];
2174 s->AssignRef(field);
2175 }
2176 }
2177
2179 Snapshot::Kind kind = s->kind();
2180 const intptr_t count = objects_.length();
2181 for (intptr_t i = 0; i < count; i++) {
2182 FieldPtr field = objects_[i];
2183 AutoTraceObjectName(field, field->untag()->name());
2184
2185 WriteCompressedField(field, name);
2186 WriteCompressedField(field, owner);
2187 WriteCompressedField(field, type);
2188 // Write out the initializer function and initial value if not in AOT.
2189 WriteCompressedField(field, initializer_function);
2190 if (kind != Snapshot::kFullAOT) {
2191 WriteCompressedField(field, guarded_list_length);
2192 }
2193 if (kind == Snapshot::kFullJIT) {
2194 WriteCompressedField(field, dependent_code);
2195 }
2196
2197 if (kind != Snapshot::kFullAOT) {
2198 s->WriteTokenPosition(field->untag()->token_pos_);
2199 s->WriteTokenPosition(field->untag()->end_token_pos_);
2200 s->WriteCid(field->untag()->guarded_cid_);
2201 s->WriteCid(field->untag()->is_nullable_);
2202 s->Write<int8_t>(field->untag()->static_type_exactness_state_);
2203 s->Write<uint32_t>(field->untag()->kernel_offset_);
2204 }
2205 s->Write<uint16_t>(field->untag()->kind_bits_);
2206
2207 // Write out either the initial static value or field offset.
2208 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2209 WriteFieldValue("id", field->untag()->host_offset_or_field_id());
2210 } else {
2212 }
2213 }
2214 }
2215
2216 private:
2217 GrowableArray<FieldPtr> objects_;
2218};
2219#endif // !DART_PRECOMPILED_RUNTIME
2220
2222 public:
2225
2229
2230 void ReadFill(Deserializer* d_) override {
2232
2233 ASSERT(!is_canonical()); // Never canonical.
2234#if !defined(DART_PRECOMPILED_RUNTIME)
2235 Snapshot::Kind kind = d_->kind();
2236#endif
2237 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2238 FieldPtr field = static_cast<FieldPtr>(d.Ref(id));
2240 d.ReadFromTo(field);
2241#if !defined(DART_PRECOMPILED_RUNTIME)
2242 ASSERT(d_->kind() != Snapshot::kFullAOT);
2243 field->untag()->guarded_list_length_ = static_cast<SmiPtr>(d.ReadRef());
2244 if (kind == Snapshot::kFullJIT) {
2245 field->untag()->dependent_code_ =
2246 static_cast<WeakArrayPtr>(d.ReadRef());
2247 }
2248 field->untag()->token_pos_ = d.ReadTokenPosition();
2249 field->untag()->end_token_pos_ = d.ReadTokenPosition();
2250 field->untag()->guarded_cid_ = d.ReadCid();
2251 field->untag()->is_nullable_ = d.ReadCid();
2252 const int8_t static_type_exactness_state = d.Read<int8_t>();
2253#if defined(TARGET_ARCH_X64)
2254 field->untag()->static_type_exactness_state_ =
2255 static_type_exactness_state;
2256#else
2257 // We might produce core snapshots using X64 VM and then consume
2258 // them in IA32 or ARM VM. In which case we need to simply ignore
2259 // static type exactness state written into snapshot because non-X64
2260 // builds don't have this feature enabled.
2261 // TODO(dartbug.com/34170) Support other architectures.
2262 USE(static_type_exactness_state);
2263 field->untag()->static_type_exactness_state_ =
2265#endif // defined(TARGET_ARCH_X64)
2266 field->untag()->kernel_offset_ = d.Read<uint32_t>();
2267#endif
2268 field->untag()->kind_bits_ = d.Read<uint16_t>();
2269
2270 field->untag()->host_offset_or_field_id_ =
2271 static_cast<SmiPtr>(d.ReadRef());
2272#if !defined(DART_PRECOMPILED_RUNTIME)
2273 field->untag()->target_offset_ =
2274 Smi::Value(field->untag()->host_offset_or_field_id());
2275#endif // !defined(DART_PRECOMPILED_RUNTIME)
2276 }
2277 }
2278
2279 void PostLoad(Deserializer* d, const Array& refs) override {
2280 Field& field = Field::Handle(d->zone());
2281 if (!IsolateGroup::Current()->use_field_guards()) {
2282 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2283 field ^= refs.At(i);
2285 field.set_is_nullable_unsafe(true);
2291 }
2292 } else {
2293 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2294 field ^= refs.At(i);
2295 field.InitializeGuardedListLengthInObjectOffset(/*unsafe=*/true);
2296 }
2297 }
2298 }
2299};
2300
2301#if !defined(DART_PRECOMPILED_RUNTIME)
2303 public:
2305 : SerializationCluster("Script",
2306 kScriptCid,
2307 compiler::target::Script::InstanceSize()) {}
2309
2310 void Trace(Serializer* s, ObjectPtr object) {
2311 ScriptPtr script = Script::RawCast(object);
2312 objects_.Add(script);
2313 auto* from = script->untag()->from();
2314 auto* to = script->untag()->to_snapshot(s->kind());
2315 for (auto* p = from; p <= to; p++) {
2316 const intptr_t offset =
2317 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(script->untag());
2318 const ObjectPtr obj = p->Decompress(script->heap_base());
2320 // Line starts are delta encoded.
2321 s->Push(obj, kDeltaEncodedTypedDataCid);
2322 } else {
2323 s->Push(obj);
2324 }
2325 }
2326 }
2327
2329 const intptr_t count = objects_.length();
2330 s->WriteUnsigned(count);
2331 for (intptr_t i = 0; i < count; i++) {
2332 ScriptPtr script = objects_[i];
2333 s->AssignRef(script);
2334 }
2335 }
2336
2338 const intptr_t count = objects_.length();
2339 for (intptr_t i = 0; i < count; i++) {
2340 ScriptPtr script = objects_[i];
2341 AutoTraceObjectName(script, script->untag()->url());
2342 WriteFromTo(script);
2343 if (s->kind() != Snapshot::kFullAOT) {
2344 // Clear out the max position cache in snapshots to ensure no
2345 // differences in the snapshot due to triggering caching vs. not.
2346 int32_t written_flags =
2348 0, script->untag()->flags_and_max_position_);
2350 false, written_flags);
2351 s->Write<int32_t>(written_flags);
2352 }
2353 s->Write<int32_t>(script->untag()->kernel_script_index_);
2354 }
2355 }
2356
2357 private:
2358 GrowableArray<ScriptPtr> objects_;
2359};
2360#endif // !DART_PRECOMPILED_RUNTIME
2361
2363 public:
2366
2370
2371 void ReadFill(Deserializer* d_) override {
2373
2374 ASSERT(!is_canonical()); // Never canonical.
2375 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2376 ScriptPtr script = static_cast<ScriptPtr>(d.Ref(id));
2377 Deserializer::InitializeHeader(script, kScriptCid,
2379 d.ReadFromTo(script);
2380#if !defined(DART_PRECOMPILED_RUNTIME)
2381 script->untag()->flags_and_max_position_ = d.Read<int32_t>();
2382#endif
2383 script->untag()->kernel_script_index_ = d.Read<int32_t>();
2384 script->untag()->load_timestamp_ = 0;
2385 }
2386 }
2387};
2388
2389#if !defined(DART_PRECOMPILED_RUNTIME)
2391 public:
2393 : SerializationCluster("Library",
2394 kLibraryCid,
2395 compiler::target::Library::InstanceSize()) {}
2397
2398 void Trace(Serializer* s, ObjectPtr object) {
2399 LibraryPtr lib = Library::RawCast(object);
2400 objects_.Add(lib);
2401 PushFromTo(lib);
2402 }
2403
2405 const intptr_t count = objects_.length();
2406 s->WriteUnsigned(count);
2407 for (intptr_t i = 0; i < count; i++) {
2408 LibraryPtr lib = objects_[i];
2409 s->AssignRef(lib);
2410 }
2411 }
2412
2414 const intptr_t count = objects_.length();
2415 for (intptr_t i = 0; i < count; i++) {
2416 LibraryPtr lib = objects_[i];
2417 AutoTraceObjectName(lib, lib->untag()->url());
2418 WriteFromTo(lib);
2419 s->Write<int32_t>(lib->untag()->index_);
2420 s->Write<uint16_t>(lib->untag()->num_imports_);
2421 s->Write<int8_t>(lib->untag()->load_state_);
2422 s->Write<uint8_t>(lib->untag()->flags_);
2423 if (s->kind() != Snapshot::kFullAOT) {
2424 s->Write<uint32_t>(lib->untag()->kernel_library_index_);
2425 }
2426 }
2427 }
2428
2429 private:
2431};
2432#endif // !DART_PRECOMPILED_RUNTIME
2433
2435 public:
2438
2442
2443 void ReadFill(Deserializer* d_) override {
2445
2446 ASSERT(!is_canonical()); // Never canonical.
2447 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2448 LibraryPtr lib = static_cast<LibraryPtr>(d.Ref(id));
2450 d.ReadFromTo(lib);
2451 lib->untag()->native_entry_resolver_ = nullptr;
2452 lib->untag()->native_entry_symbol_resolver_ = nullptr;
2453 lib->untag()->ffi_native_resolver_ = nullptr;
2454 lib->untag()->index_ = d.Read<int32_t>();
2455 lib->untag()->num_imports_ = d.Read<uint16_t>();
2456 lib->untag()->load_state_ = d.Read<int8_t>();
2457 lib->untag()->flags_ =
2458 UntaggedLibrary::InFullSnapshotBit::update(true, d.Read<uint8_t>());
2459#if !defined(DART_PRECOMPILED_RUNTIME)
2460 ASSERT(d_->kind() != Snapshot::kFullAOT);
2461 lib->untag()->kernel_library_index_ = d.Read<uint32_t>();
2462#endif
2463 }
2464 }
2465};
2466
2467#if !defined(DART_PRECOMPILED_RUNTIME)
2469 public:
2471 : SerializationCluster("Namespace",
2472 kNamespaceCid,
2473 compiler::target::Namespace::InstanceSize()) {}
2475
2476 void Trace(Serializer* s, ObjectPtr object) {
2477 NamespacePtr ns = Namespace::RawCast(object);
2478 objects_.Add(ns);
2479 PushFromTo(ns);
2480 }
2481
2483 const intptr_t count = objects_.length();
2484 s->WriteUnsigned(count);
2485 for (intptr_t i = 0; i < count; i++) {
2486 NamespacePtr ns = objects_[i];
2487 s->AssignRef(ns);
2488 }
2489 }
2490
2492 const intptr_t count = objects_.length();
2493 for (intptr_t i = 0; i < count; i++) {
2494 NamespacePtr ns = objects_[i];
2495 AutoTraceObject(ns);
2496 WriteFromTo(ns);
2497 }
2498 }
2499
2500 private:
2502};
2503#endif // !DART_PRECOMPILED_RUNTIME
2504
2506 public:
2509
2513
2514 void ReadFill(Deserializer* d_) override {
2516
2517 ASSERT(!is_canonical()); // Never canonical.
2518 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2519 NamespacePtr ns = static_cast<NamespacePtr>(d.Ref(id));
2520 Deserializer::InitializeHeader(ns, kNamespaceCid,
2522 d.ReadFromTo(ns);
2523 }
2524 }
2525};
2526
2527#if !defined(DART_PRECOMPILED_RUNTIME)
2528// KernelProgramInfo objects are not written into a full AOT snapshot.
2530 public:
2533 "KernelProgramInfo",
2534 kKernelProgramInfoCid,
2535 compiler::target::KernelProgramInfo::InstanceSize()) {}
2537
2538 void Trace(Serializer* s, ObjectPtr object) {
2539 KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object);
2540 objects_.Add(info);
2542 }
2543
2545 const intptr_t count = objects_.length();
2546 s->WriteUnsigned(count);
2547 for (intptr_t i = 0; i < count; i++) {
2548 KernelProgramInfoPtr info = objects_[i];
2549 s->AssignRef(info);
2550 }
2551 }
2552
2554 const intptr_t count = objects_.length();
2555 for (intptr_t i = 0; i < count; i++) {
2556 KernelProgramInfoPtr info = objects_[i];
2559 }
2560 }
2561
2562 private:
2564};
2565
2566// Since KernelProgramInfo objects are not written into full AOT snapshots,
2567// one will never need to read them from a full AOT snapshot.
2569 public:
2573
2577
2578 void ReadFill(Deserializer* d_) override {
2580
2581 ASSERT(!is_canonical()); // Never canonical.
2582 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2583 KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d.Ref(id));
2584 Deserializer::InitializeHeader(info, kKernelProgramInfoCid,
2586 d.ReadFromTo(info);
2587 }
2588 }
2589
2590 void PostLoad(Deserializer* d, const Array& refs) override {
2591 Array& array = Array::Handle(d->zone());
2593 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2594 info ^= refs.At(id);
2595 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2596 info.set_libraries_cache(array);
2597 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2598 info.set_classes_cache(array);
2599 }
2600 }
2601};
2602
2604 public:
2606 : SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {}
2608
2609 void Trace(Serializer* s, ObjectPtr object) {
2610 CodePtr code = Code::RawCast(object);
2611
2612 const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code);
2613 if (is_deferred) {
2614 s->RecordDeferredCode(code);
2615 } else {
2616 objects_.Add(code);
2617 }
2618
2619 // Even if this code object is itself deferred we still need to scan
2620 // the pool for references to other code objects (which might reside
2621 // in the current loading unit).
2622 ObjectPoolPtr pool = code->untag()->object_pool_;
2623 if (s->kind() == Snapshot::kFullAOT) {
2624 TracePool(s, pool, /*only_call_targets=*/is_deferred);
2625 } else {
2626 if (s->InCurrentLoadingUnitOrRoot(pool)) {
2627 s->Push(pool);
2628 } else {
2629 TracePool(s, pool, /*only_call_targets=*/true);
2630 }
2631 }
2632
2633 if (s->kind() == Snapshot::kFullJIT) {
2634 s->Push(code->untag()->deopt_info_array_);
2635 s->Push(code->untag()->static_calls_target_table_);
2636 s->Push(code->untag()->compressed_stackmaps_);
2637 } else if (s->kind() == Snapshot::kFullAOT) {
2638 // Note: we don't trace compressed_stackmaps_ because we are going to emit
2639 // a separate mapping table into RO data which is not going to be a real
2640 // heap object.
2641#if defined(DART_PRECOMPILER)
2642 auto const calls_array = code->untag()->static_calls_target_table_;
2643 if (calls_array != Array::null()) {
2644 // Some Code entries in the static calls target table may only be
2645 // accessible via here, so push the Code objects.
2646 array_ = calls_array;
2647 for (auto entry : StaticCallsTable(array_)) {
2648 auto kind = Code::KindField::decode(
2650 switch (kind) {
2651 case Code::kCallViaCode:
2652 // Code object in the pool.
2653 continue;
2655 // TTS will be reachable through type object which itself is
2656 // in the pool.
2657 continue;
2660 auto destination = entry.Get<Code::kSCallTableCodeOrTypeTarget>();
2661 ASSERT(destination->IsHeapObject() && destination->IsCode());
2662 s->Push(destination);
2663 }
2664 }
2665 }
2666#else
2667 UNREACHABLE();
2668#endif
2669 }
2670
2671 if (Code::IsDiscarded(code)) {
2672 ASSERT(s->kind() == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2673 !FLAG_retain_code_objects);
2674 // Only object pool and static call table entries and the compressed
2675 // stack maps should be pushed.
2676 return;
2677 }
2678
2679 s->Push(code->untag()->owner_);
2680 s->Push(code->untag()->exception_handlers_);
2681 s->Push(code->untag()->pc_descriptors_);
2682 s->Push(code->untag()->catch_entry_);
2683 if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) {
2684 s->Push(code->untag()->inlined_id_to_function_);
2685 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2686 s->Push(code->untag()->code_source_map_);
2687 }
2688 }
2689#if !defined(PRODUCT)
2690 s->Push(code->untag()->return_address_metadata_);
2691 if (FLAG_code_comments) {
2692 s->Push(code->untag()->comments_);
2693 }
2694#endif
2695 }
2696
2697 void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_call_targets) {
2698 if (pool == ObjectPool::null()) {
2699 return;
2700 }
2701
2702 const intptr_t length = pool->untag()->length_;
2703 uint8_t* entry_bits = pool->untag()->entry_bits();
2704 for (intptr_t i = 0; i < length; i++) {
2705 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
2706 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
2707 const ObjectPtr target = pool->untag()->data()[i].raw_obj_;
2708 // A field is a call target because its initializer may be called
2709 // indirectly by passing the field to the runtime. A const closure
2710 // is a call target because its function may be called indirectly
2711 // via a closure call.
2712 intptr_t cid = target->GetClassIdMayBeSmi();
2713 if (!only_call_targets || (cid == kCodeCid) || (cid == kFunctionCid) ||
2714 (cid == kFieldCid) || (cid == kClosureCid)) {
2715 s->Push(target);
2716 } else if (cid >= kNumPredefinedCids) {
2717 s->Push(s->isolate_group()->class_table()->At(cid));
2718 }
2719 }
2720 }
2721 }
2722
2724 CodePtr code;
2725 intptr_t not_discarded; // 1 if this code was not discarded and
2726 // 0 otherwise.
2728 };
2729
2730 // We sort code objects in such a way that code objects with the same
2731 // instructions are grouped together and ensure that all instructions
2732 // without associated code objects are grouped together at the beginning of
2733 // the code section. InstructionsTable encoding assumes that all
2734 // instructions with non-discarded Code objects are grouped at the end.
2735 //
2736 // Note that in AOT mode we expect that all Code objects pointing to
2737 // the same instructions are deduplicated, as in bare instructions mode
2738 // there is no way to identify which specific Code object (out of those
2739 // which point to the specific instructions range) actually corresponds
2740 // to a particular frame.
2742 CodeOrderInfo const* b) {
2743 if (a->not_discarded < b->not_discarded) return -1;
2744 if (a->not_discarded > b->not_discarded) return 1;
2745 if (a->instructions_id < b->instructions_id) return -1;
2746 if (a->instructions_id > b->instructions_id) return 1;
2747 return 0;
2748 }
2749
2750 static void Insert(Serializer* s,
2751 GrowableArray<CodeOrderInfo>* order_list,
2752 IntMap<intptr_t>* order_map,
2753 CodePtr code) {
2754 InstructionsPtr instr = code->untag()->instructions_;
2755 intptr_t key = static_cast<intptr_t>(instr);
2756 intptr_t instructions_id = 0;
2757
2758 if (order_map->HasKey(key)) {
2759 // We are expected to merge code objects which point to the same
2760 // instructions in the precompiled mode.
2761 RELEASE_ASSERT(!FLAG_precompiled_mode);
2762 instructions_id = order_map->Lookup(key);
2763 } else {
2764 instructions_id = order_map->Length() + 1;
2765 order_map->Insert(key, instructions_id);
2766 }
2768 info.code = code;
2769 info.instructions_id = instructions_id;
2770 info.not_discarded = Code::IsDiscarded(code) ? 0 : 1;
2771 order_list->Add(info);
2772 }
2773
2774 static void Sort(Serializer* s, GrowableArray<CodePtr>* codes) {
2776 IntMap<intptr_t> order_map;
2777 for (intptr_t i = 0; i < codes->length(); i++) {
2778 Insert(s, &order_list, &order_map, (*codes)[i]);
2779 }
2780 order_list.Sort(CompareCodeOrderInfo);
2781 ASSERT(order_list.length() == codes->length());
2782 for (intptr_t i = 0; i < order_list.length(); i++) {
2783 (*codes)[i] = order_list[i].code;
2784 }
2785 }
2786
2787 static void Sort(Serializer* s, GrowableArray<Code*>* codes) {
2789 IntMap<intptr_t> order_map;
2790 for (intptr_t i = 0; i < codes->length(); i++) {
2791 Insert(s, &order_list, &order_map, (*codes)[i]->ptr());
2792 }
2793 order_list.Sort(CompareCodeOrderInfo);
2794 ASSERT(order_list.length() == codes->length());
2795 for (intptr_t i = 0; i < order_list.length(); i++) {
2796 *(*codes)[i] = order_list[i].code;
2797 }
2798 }
2799
2801 intptr_t count = 0;
2802 for (auto code : objects_) {
2803 if (!Code::IsDiscarded(code)) {
2804 count++;
2805 }
2806 }
2807 return count;
2808 }
2809
2811 const intptr_t non_discarded_count = NonDiscardedCodeCount();
2812 const intptr_t count = objects_.length();
2813 ASSERT(count == non_discarded_count || (s->kind() == Snapshot::kFullAOT));
2814
2815 first_ref_ = s->next_ref_index();
2816 s->WriteUnsigned(non_discarded_count);
2817 for (auto code : objects_) {
2818 if (!Code::IsDiscarded(code)) {
2819 WriteAlloc(s, code);
2820 } else {
2821 // Mark discarded code unreachable, so that we could later
2822 // assign artificial references to it.
2823 s->heap()->SetObjectId(code, kUnreachableReference);
2824 }
2825 }
2826
2827 s->WriteUnsigned(deferred_objects_.length());
2828 first_deferred_ref_ = s->next_ref_index();
2829 for (auto code : deferred_objects_) {
2830 ASSERT(!Code::IsDiscarded(code));
2831 WriteAlloc(s, code);
2832 }
2833 last_ref_ = s->next_ref_index() - 1;
2834 }
2835
2836 void WriteAlloc(Serializer* s, CodePtr code) {
2837 ASSERT(!Code::IsDiscarded(code));
2838 s->AssignRef(code);
2840 const int32_t state_bits = code->untag()->state_bits_;
2841 s->Write<int32_t>(state_bits);
2842 target_memory_size_ += compiler::target::Code::InstanceSize(0);
2843 }
2844
2846 Snapshot::Kind kind = s->kind();
2847 const intptr_t count = objects_.length();
2848 for (intptr_t i = 0; i < count; i++) {
2849 CodePtr code = objects_[i];
2850#if defined(DART_PRECOMPILER)
2851 if (FLAG_write_v8_snapshot_profile_to != nullptr &&
2852 Code::IsDiscarded(code)) {
2853 s->CreateArtificialNodeIfNeeded(code);
2854 }
2855#endif
2856 // Note: for discarded code this function will not write anything out
2857 // it is only called to produce information into snapshot profile.
2858 WriteFill(s, kind, code, /*deferred=*/false);
2859 }
2860 const intptr_t deferred_count = deferred_objects_.length();
2861 for (intptr_t i = 0; i < deferred_count; i++) {
2862 CodePtr code = deferred_objects_[i];
2863 WriteFill(s, kind, code, /*deferred=*/true);
2864 }
2865 }
2866
2868 Snapshot::Kind kind,
2869 CodePtr code,
2870 bool deferred) {
2871 const intptr_t bytes_written = s->bytes_written();
2873
2874 intptr_t pointer_offsets_length =
2875 Code::PtrOffBits::decode(code->untag()->state_bits_);
2876 if (pointer_offsets_length != 0) {
2877 FATAL("Cannot serialize code with embedded pointers");
2878 }
2879 if (kind == Snapshot::kFullAOT && Code::IsDisabled(code)) {
2880 // Disabled code is fatal in AOT since we cannot recompile.
2881 s->UnexpectedObject(code, "Disabled code");
2882 }
2883
2884 s->WriteInstructions(code->untag()->instructions_,
2885 code->untag()->unchecked_offset_, code, deferred);
2886 if (kind == Snapshot::kFullJIT) {
2887 // TODO(rmacnak): Fix references to disabled code before serializing.
2888 // For now, we may write the FixCallersTarget or equivalent stub. This
2889 // will cause a fixup if this code is called.
2890 const uint32_t active_unchecked_offset =
2891 code->untag()->unchecked_entry_point_ - code->untag()->entry_point_;
2892 s->WriteInstructions(code->untag()->active_instructions_,
2893 active_unchecked_offset, code, deferred);
2894 }
2895
2896#if defined(DART_PRECOMPILER)
2897 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2898 // If we are writing V8 snapshot profile then attribute references going
2899 // through the object pool and static calls to the code object itself.
2900 if (kind == Snapshot::kFullAOT &&
2901 code->untag()->object_pool_ != ObjectPool::null()) {
2902 ObjectPoolPtr pool = code->untag()->object_pool_;
2903 // Non-empty per-code object pools should not be reachable in this mode.
2904 ASSERT(!s->HasRef(pool) || pool == Object::empty_object_pool().ptr());
2905 s->CreateArtificialNodeIfNeeded(pool);
2906 s->AttributePropertyRef(pool, "object_pool_");
2907 }
2908 if (kind != Snapshot::kFullJIT &&
2909 code->untag()->static_calls_target_table_ != Array::null()) {
2910 auto const table = code->untag()->static_calls_target_table_;
2911 // Non-empty static call target tables shouldn't be reachable in this
2912 // mode.
2913 ASSERT(!s->HasRef(table) || table == Object::empty_array().ptr());
2914 s->CreateArtificialNodeIfNeeded(table);
2915 s->AttributePropertyRef(table, "static_calls_target_table_");
2916 }
2917 }
2918#endif // defined(DART_PRECOMPILER)
2919
2920 if (Code::IsDiscarded(code)) {
2921 // No bytes should be written to represent this code.
2922 ASSERT(s->bytes_written() == bytes_written);
2923 // Only write instructions, compressed stackmaps and state bits
2924 // for the discarded Code objects.
2925 ASSERT(kind == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2926 !FLAG_retain_code_objects);
2927#if defined(DART_PRECOMPILER)
2928 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2929 // Keep the owner as a (possibly artificial) node for snapshot analysis.
2930 const auto& owner = code->untag()->owner_;
2931 s->CreateArtificialNodeIfNeeded(owner);
2932 s->AttributePropertyRef(owner, "owner_");
2933 }
2934#endif
2935 return;
2936 }
2937
2938 // No need to write object pool out if we are producing full AOT
2939 // snapshot with bare instructions.
2940 if (kind != Snapshot::kFullAOT) {
2941 if (s->InCurrentLoadingUnitOrRoot(code->untag()->object_pool_)) {
2942 WriteField(code, object_pool_);
2943 } else {
2944 WriteFieldValue(object_pool_, ObjectPool::null());
2945 }
2946 }
2947 WriteField(code, owner_);
2948 WriteField(code, exception_handlers_);
2949 WriteField(code, pc_descriptors_);
2950 WriteField(code, catch_entry_);
2951 if (s->kind() == Snapshot::kFullJIT) {
2952 WriteField(code, compressed_stackmaps_);
2953 }
2954 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) {
2955 WriteFieldValue(inlined_id_to_function_, Array::null());
2956 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2957 } else {
2958 WriteField(code, inlined_id_to_function_);
2959 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2960 WriteField(code, code_source_map_);
2961 } else {
2962 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2963 }
2964 }
2965 if (kind == Snapshot::kFullJIT) {
2966 WriteField(code, deopt_info_array_);
2967 WriteField(code, static_calls_target_table_);
2968 }
2969
2970#if !defined(PRODUCT)
2971 WriteField(code, return_address_metadata_);
2972 if (FLAG_code_comments) {
2973 WriteField(code, comments_);
2974 }
2975#endif
2976 }
2977
2978 GrowableArray<CodePtr>* objects() { return &objects_; }
2979 GrowableArray<CodePtr>* deferred_objects() { return &deferred_objects_; }
2980
2981 static const char* MakeDisambiguatedCodeName(Serializer* s, CodePtr c) {
2982 if (s->profile_writer() == nullptr) {
2983 return nullptr;
2984 }
2985
2986 REUSABLE_CODE_HANDLESCOPE(s->thread());
2987 Code& code = reused_code_handle.Handle();
2988 code = c;
2989 return code.QualifiedName(
2992 }
2993
2994 intptr_t first_ref() const { return first_ref_; }
2995 intptr_t first_deferred_ref() const { return first_deferred_ref_; }
2996 intptr_t last_ref() const { return last_ref_; }
2997
2998 private:
2999 intptr_t first_ref_;
3000 intptr_t first_deferred_ref_;
3001 intptr_t last_ref_;
3002 GrowableArray<CodePtr> objects_;
3003 GrowableArray<CodePtr> deferred_objects_;
3004 Array& array_;
3005};
3006#endif // !DART_PRECOMPILED_RUNTIME
3007
3009 public:
3012
3013 void ReadAlloc(Deserializer* d) override {
3014 start_index_ = d->next_index();
3015 d->set_code_start_index(start_index_);
3016 const intptr_t count = d->ReadUnsigned();
3017 for (intptr_t i = 0; i < count; i++) {
3019 }
3020 stop_index_ = d->next_index();
3021 d->set_code_stop_index(stop_index_);
3022 deferred_start_index_ = d->next_index();
3023 const intptr_t deferred_count = d->ReadUnsigned();
3024 for (intptr_t i = 0; i < deferred_count; i++) {
3026 }
3027 deferred_stop_index_ = d->next_index();
3028 }
3029
3031 const int32_t state_bits = d->Read<int32_t>();
3032 ASSERT(!Code::DiscardedBit::decode(state_bits));
3033 auto code = static_cast<CodePtr>(d->Allocate(Code::InstanceSize(0)));
3034 d->AssignRef(code);
3035 code->untag()->state_bits_ = state_bits;
3036 }
3037
3038 void ReadFill(Deserializer* d) override {
3039 ASSERT(!is_canonical()); // Never canonical.
3041#if defined(DART_PRECOMPILED_RUNTIME)
3042 ReadFill(d, deferred_start_index_, deferred_stop_index_, true);
3043#else
3044 ASSERT(deferred_start_index_ == deferred_stop_index_);
3045#endif
3046 }
3047
3049 intptr_t start_index,
3050 intptr_t stop_index,
3051 bool deferred) {
3052 for (intptr_t id = start_index, n = stop_index; id < n; id++) {
3053 auto const code = static_cast<CodePtr>(d->Ref(id));
3054
3056
3058 ASSERT(!Code::IsDiscarded(code));
3059
3060 d->ReadInstructions(code, deferred);
3061
3062#if !defined(DART_PRECOMPILED_RUNTIME)
3063 ASSERT(d->kind() == Snapshot::kFullJIT);
3064 code->untag()->object_pool_ = static_cast<ObjectPoolPtr>(d->ReadRef());
3065#else
3066 ASSERT(d->kind() == Snapshot::kFullAOT);
3067 // There is a single global pool.
3068 code->untag()->object_pool_ = ObjectPool::null();
3069#endif
3070 code->untag()->owner_ = d->ReadRef();
3071 code->untag()->exception_handlers_ =
3072 static_cast<ExceptionHandlersPtr>(d->ReadRef());
3073 code->untag()->pc_descriptors_ =
3074 static_cast<PcDescriptorsPtr>(d->ReadRef());
3075 code->untag()->catch_entry_ = d->ReadRef();
3076#if !defined(DART_PRECOMPILED_RUNTIME)
3077 ASSERT(d->kind() == Snapshot::kFullJIT);
3078 code->untag()->compressed_stackmaps_ =
3079 static_cast<CompressedStackMapsPtr>(d->ReadRef());
3080#else
3081 ASSERT(d->kind() == Snapshot::kFullAOT);
3082 code->untag()->compressed_stackmaps_ = CompressedStackMaps::null();
3083#endif
3084 code->untag()->inlined_id_to_function_ =
3085 static_cast<ArrayPtr>(d->ReadRef());
3086 code->untag()->code_source_map_ =
3087 static_cast<CodeSourceMapPtr>(d->ReadRef());
3088
3089#if !defined(DART_PRECOMPILED_RUNTIME)
3090 ASSERT(d->kind() == Snapshot::kFullJIT);
3091 code->untag()->deopt_info_array_ = static_cast<ArrayPtr>(d->ReadRef());
3092 code->untag()->static_calls_target_table_ =
3093 static_cast<ArrayPtr>(d->ReadRef());
3094#endif // !DART_PRECOMPILED_RUNTIME
3095
3096#if !defined(PRODUCT)
3097 code->untag()->return_address_metadata_ = d->ReadRef();
3098 code->untag()->var_descriptors_ = LocalVarDescriptors::null();
3099 code->untag()->comments_ = FLAG_code_comments
3100 ? static_cast<ArrayPtr>(d->ReadRef())
3101 : Array::null();
3102 code->untag()->compile_timestamp_ = 0;
3103#endif
3104 }
3105 }
3106
3107 void PostLoad(Deserializer* d, const Array& refs) override {
3108 d->EndInstructions();
3109
3110#if !defined(PRODUCT)
3111 if (!CodeObservers::AreActive() && !FLAG_support_disassembler) return;
3112#endif
3113 Code& code = Code::Handle(d->zone());
3114#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3115 Object& owner = Object::Handle(d->zone());
3116#endif
3117 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3118 code ^= refs.At(id);
3119#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT)
3121 Code::NotifyCodeObservers(code, code.is_optimized());
3122 }
3123#endif
3124#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3125 owner = code.owner();
3126 if (owner.IsFunction()) {
3127 if ((FLAG_disassemble ||
3128 (code.is_optimized() && FLAG_disassemble_optimized)) &&
3129 compiler::PrintFilter::ShouldPrint(Function::Cast(owner))) {
3130 Disassembler::DisassembleCode(Function::Cast(owner), code,
3131 code.is_optimized());
3132 }
3133 } else if (FLAG_disassemble_stubs) {
3134 Disassembler::DisassembleStub(code.Name(), code);
3135 }
3136#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3137 }
3138 }
3139
3140 private:
3141 intptr_t deferred_start_index_;
3142 intptr_t deferred_stop_index_;
3143};
3144
3145#if !defined(DART_PRECOMPILED_RUNTIME)
3147 public:
3149 : SerializationCluster("ObjectPool", kObjectPoolCid) {}
3151
3152 void Trace(Serializer* s, ObjectPtr object) {
3153 ObjectPoolPtr pool = ObjectPool::RawCast(object);
3154 objects_.Add(pool);
3155
3156 if (s->kind() != Snapshot::kFullAOT) {
3157 const intptr_t length = pool->untag()->length_;
3158 uint8_t* entry_bits = pool->untag()->entry_bits();
3159 for (intptr_t i = 0; i < length; i++) {
3160 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
3161 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
3162 s->Push(pool->untag()->data()[i].raw_obj_);
3163 }
3164 }
3165 }
3166 }
3167
3169 const intptr_t count = objects_.length();
3170 s->WriteUnsigned(count);
3171 for (intptr_t i = 0; i < count; i++) {
3172 ObjectPoolPtr pool = objects_[i];
3173 s->AssignRef(pool);
3175 const intptr_t length = pool->untag()->length_;
3176 s->WriteUnsigned(length);
3177 target_memory_size_ += compiler::target::ObjectPool::InstanceSize(length);
3178 }
3179 }
3180
3182 bool weak = s->kind() == Snapshot::kFullAOT;
3183
3184 const intptr_t count = objects_.length();
3185 for (intptr_t i = 0; i < count; i++) {
3186 ObjectPoolPtr pool = objects_[i];
3188 const intptr_t length = pool->untag()->length_;
3189 s->WriteUnsigned(length);
3190 uint8_t* entry_bits = pool->untag()->entry_bits();
3191 for (intptr_t j = 0; j < length; j++) {
3192 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3193 uint8_t bits = entry_bits[j];
3195 auto snapshot_behavior = ObjectPool::SnapshotBehaviorBits::decode(bits);
3196 ASSERT(snapshot_behavior !=
3197 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3198 s->Write<uint8_t>(bits);
3199 if (snapshot_behavior != ObjectPool::SnapshotBehavior::kSnapshotable) {
3200 // The deserializer will reset this to a specific value, no need to
3201 // write anything.
3202 continue;
3203 }
3204 switch (type) {
3205 case ObjectPool::EntryType::kTaggedObject: {
3206 if (weak && !s->HasRef(entry.raw_obj_)) {
3207 // Any value will do, but null has the shortest id.
3208 s->WriteElementRef(Object::null(), j);
3209 } else {
3210 s->WriteElementRef(entry.raw_obj_, j);
3211 }
3212 break;
3213 }
3214 case ObjectPool::EntryType::kImmediate: {
3215 s->Write<intptr_t>(entry.raw_value_);
3216 break;
3217 }
3218 case ObjectPool::EntryType::kNativeFunction: {
3219 // Write nothing. Will initialize with the lazy link entry.
3220 break;
3221 }
3222 default:
3223 UNREACHABLE();
3224 }
3225 }
3226 }
3227 }
3228
3229 private:
3231};
3232#endif // !DART_PRECOMPILED_RUNTIME
3233
3235 public:
3238
3239 void ReadAlloc(Deserializer* d) override {
3240 start_index_ = d->next_index();
3241 const intptr_t count = d->ReadUnsigned();
3242 for (intptr_t i = 0; i < count; i++) {
3243 const intptr_t length = d->ReadUnsigned();
3244 d->AssignRef(d->Allocate(ObjectPool::InstanceSize(length)));
3245 }
3246 stop_index_ = d->next_index();
3247 }
3248
3249 void ReadFill(Deserializer* d_) override {
3251
3252 ASSERT(!is_canonical()); // Never canonical.
3253 fill_position_ = d.Position();
3254#if defined(DART_PRECOMPILED_RUNTIME)
3255 const uint8_t immediate_bits = ObjectPool::EncodeBits(
3256 ObjectPool::EntryType::kImmediate, ObjectPool::Patchability::kPatchable,
3257 ObjectPool::SnapshotBehavior::kSnapshotable);
3258 uword switchable_call_miss_entry_point =
3259 StubCode::SwitchableCallMiss().MonomorphicEntryPoint();
3260#endif // defined(DART_PRECOMPILED_RUNTIME)
3261
3262 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3263 const intptr_t length = d.ReadUnsigned();
3264 ObjectPoolPtr pool = static_cast<ObjectPoolPtr>(d.Ref(id));
3265 Deserializer::InitializeHeader(pool, kObjectPoolCid,
3267 pool->untag()->length_ = length;
3268 for (intptr_t j = 0; j < length; j++) {
3269 const uint8_t entry_bits = d.Read<uint8_t>();
3270 pool->untag()->entry_bits()[j] = entry_bits;
3271 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3272 const auto snapshot_behavior =
3274 ASSERT(snapshot_behavior !=
3275 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3276 switch (snapshot_behavior) {
3277 case ObjectPool::SnapshotBehavior::kSnapshotable:
3278 // Handled below.
3279 break;
3280 case ObjectPool::SnapshotBehavior::kResetToBootstrapNative:
3281 entry.raw_obj_ = StubCode::CallBootstrapNative().ptr();
3282 continue;
3283#if defined(DART_PRECOMPILED_RUNTIME)
3284 case ObjectPool::SnapshotBehavior::
3285 kResetToSwitchableCallMissEntryPoint:
3286 pool->untag()->entry_bits()[j] = immediate_bits;
3287 entry.raw_value_ =
3288 static_cast<intptr_t>(switchable_call_miss_entry_point);
3289 continue;
3290#endif // defined(DART_PRECOMPILED_RUNTIME)
3291 case ObjectPool::SnapshotBehavior::kSetToZero:
3292 entry.raw_value_ = 0;
3293 continue;
3294 default:
3295 FATAL("Unexpected snapshot behavior: %d\n", snapshot_behavior);
3296 }
3297 switch (ObjectPool::TypeBits::decode(entry_bits)) {
3298 case ObjectPool::EntryType::kTaggedObject:
3299 entry.raw_obj_ = d.ReadRef();
3300 break;
3301 case ObjectPool::EntryType::kImmediate:
3302 entry.raw_value_ = d.Read<intptr_t>();
3303 break;
3304 case ObjectPool::EntryType::kNativeFunction: {
3305 // Read nothing. Initialize with the lazy link entry.
3307 entry.raw_value_ = static_cast<intptr_t>(new_entry);
3308 break;
3309 }
3310 default:
3311 UNREACHABLE();
3312 }
3313 }
3314 }
3315 }
3316
3317 void PostLoad(Deserializer* d, const Array& refs) override {
3318#if defined(DART_PRECOMPILED_RUNTIME) && \
3319 (!defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER))
3320 if (FLAG_disassemble) {
3322 d->isolate_group()->object_store()->global_object_pool());
3323 THR_Print("Global object pool:\n");
3324 pool.DebugPrint();
3325 }
3326#endif
3327 }
3328
3329 private:
3330 intptr_t fill_position_ = 0;
3331};
3332
3333#if defined(DART_PRECOMPILER)
3334class WeakSerializationReferenceSerializationCluster
3335 : public SerializationCluster {
3336 public:
3337 WeakSerializationReferenceSerializationCluster()
3338 : SerializationCluster(
3339 "WeakSerializationReference",
3340 compiler::target::WeakSerializationReference::InstanceSize()) {}
3341 ~WeakSerializationReferenceSerializationCluster() {}
3342
3343 void Trace(Serializer* s, ObjectPtr object) {
3344 ASSERT(s->kind() == Snapshot::kFullAOT);
3345 objects_.Add(WeakSerializationReference::RawCast(object));
3346 }
3347
3348 void RetraceEphemerons(Serializer* s) {
3349 for (intptr_t i = 0; i < objects_.length(); i++) {
3350 WeakSerializationReferencePtr weak = objects_[i];
3351 if (!s->IsReachable(weak->untag()->target())) {
3352 s->Push(weak->untag()->replacement());
3353 }
3354 }
3355 }
3356
3357 intptr_t Count(Serializer* s) { return objects_.length(); }
3358
3359 void CreateArtificialTargetNodesIfNeeded(Serializer* s) {
3360 for (intptr_t i = 0; i < objects_.length(); i++) {
3361 WeakSerializationReferencePtr weak = objects_[i];
3362 s->CreateArtificialNodeIfNeeded(weak->untag()->target());
3363 }
3364 }
3365
3366 void WriteAlloc(Serializer* s) {
3367 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3368 }
3369
3370 void WriteFill(Serializer* s) {
3371 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3372 }
3373
3374 private:
3375 GrowableArray<WeakSerializationReferencePtr> objects_;
3376};
3377#endif
3378
3379#if !defined(DART_PRECOMPILED_RUNTIME)
3381 public:
3383 : SerializationCluster("PcDescriptors", kPcDescriptorsCid) {}
3385
3386 void Trace(Serializer* s, ObjectPtr object) {
3387 PcDescriptorsPtr desc = PcDescriptors::RawCast(object);
3388 objects_.Add(desc);
3389 }
3390
3392 const intptr_t count = objects_.length();
3393 s->WriteUnsigned(count);
3394 for (intptr_t i = 0; i < count; i++) {
3395 PcDescriptorsPtr desc = objects_[i];
3396 s->AssignRef(desc);
3397 AutoTraceObject(desc);
3398 const intptr_t length = desc->untag()->length_;
3399 s->WriteUnsigned(length);
3401 compiler::target::PcDescriptors::InstanceSize(length);
3402 }
3403 }
3404
3406 const intptr_t count = objects_.length();
3407 for (intptr_t i = 0; i < count; i++) {
3408 PcDescriptorsPtr desc = objects_[i];
3409 AutoTraceObject(desc);
3410 const intptr_t length = desc->untag()->length_;
3411 s->WriteUnsigned(length);
3412 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3413 s->WriteBytes(cdata, length);
3414 }
3415 }
3416
3417 private:
3419};
3420#endif // !DART_PRECOMPILED_RUNTIME
3421
3423 public:
3427
3428 void ReadAlloc(Deserializer* d) override {
3429 start_index_ = d->next_index();
3430 const intptr_t count = d->ReadUnsigned();
3431 for (intptr_t i = 0; i < count; i++) {
3432 const intptr_t length = d->ReadUnsigned();
3433 d->AssignRef(d->Allocate(PcDescriptors::InstanceSize(length)));
3434 }
3435 stop_index_ = d->next_index();
3436 }
3437
3438 void ReadFill(Deserializer* d_) override {
3440
3441 ASSERT(!is_canonical()); // Never canonical.
3442 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3443 const intptr_t length = d.ReadUnsigned();
3444 PcDescriptorsPtr desc = static_cast<PcDescriptorsPtr>(d.Ref(id));
3445 Deserializer::InitializeHeader(desc, kPcDescriptorsCid,
3447 desc->untag()->length_ = length;
3448 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3449 d.ReadBytes(cdata, length);
3450 }
3451 }
3452};
3453
3454#if !defined(DART_PRECOMPILED_RUNTIME)
3456 public:
3458 : SerializationCluster("CodeSourceMap", kCodeSourceMapCid) {}
3460
3461 void Trace(Serializer* s, ObjectPtr object) {
3462 CodeSourceMapPtr map = CodeSourceMap::RawCast(object);
3463 objects_.Add(map);
3464 }
3465
3467 const intptr_t count = objects_.length();
3468 s->WriteUnsigned(count);
3469 for (intptr_t i = 0; i < count; i++) {
3470 CodeSourceMapPtr map = objects_[i];
3471 s->AssignRef(map);
3472 AutoTraceObject(map);
3473 const intptr_t length = map->untag()->length_;
3474 s->WriteUnsigned(length);
3476 compiler::target::PcDescriptors::InstanceSize(length);
3477 }
3478 }
3479
3481 const intptr_t count = objects_.length();
3482 for (intptr_t i = 0; i < count; i++) {
3483 CodeSourceMapPtr map = objects_[i];
3484 AutoTraceObject(map);
3485 const intptr_t length = map->untag()->length_;
3486 s->WriteUnsigned(length);
3487 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3488 s->WriteBytes(cdata, length);
3489 }
3490 }
3491
3492 private:
3494};
3495#endif // !DART_PRECOMPILED_RUNTIME
3496
3498 public:
3502
3503 void ReadAlloc(Deserializer* d) override {
3504 start_index_ = d->next_index();
3505 const intptr_t count = d->ReadUnsigned();
3506 for (intptr_t i = 0; i < count; i++) {
3507 const intptr_t length = d->ReadUnsigned();
3508 d->AssignRef(d->Allocate(CodeSourceMap::InstanceSize(length)));
3509 }
3510 stop_index_ = d->next_index();
3511 }
3512
3513 void ReadFill(Deserializer* d_) override {
3515
3516 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3517 const intptr_t length = d.ReadUnsigned();
3518 CodeSourceMapPtr map = static_cast<CodeSourceMapPtr>(d.Ref(id));
3519 Deserializer::InitializeHeader(map, kPcDescriptorsCid,
3521 map->untag()->length_ = length;
3522 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3523 d.ReadBytes(cdata, length);
3524 }
3525 }
3526};
3527
3528#if !defined(DART_PRECOMPILED_RUNTIME)
3530 public:
3532 : SerializationCluster("CompressedStackMaps", kCompressedStackMapsCid) {}
3534
3535 void Trace(Serializer* s, ObjectPtr object) {
3536 CompressedStackMapsPtr desc = CompressedStackMaps::RawCast(object);
3537 objects_.Add(desc);
3538 }
3539
3541 const intptr_t count = objects_.length();
3542 s->WriteUnsigned(count);
3543 for (intptr_t i = 0; i < count; i++) {
3544 CompressedStackMapsPtr map = objects_[i];
3545 s->AssignRef(map);
3546 AutoTraceObject(map);
3548 map->untag()->payload()->flags_and_size());
3549 s->WriteUnsigned(length);
3551 compiler::target::CompressedStackMaps::InstanceSize(length);
3552 }
3553 }
3554
3556 const intptr_t count = objects_.length();
3557 for (intptr_t i = 0; i < count; i++) {
3558 CompressedStackMapsPtr map = objects_[i];
3559 AutoTraceObject(map);
3560 s->WriteUnsigned(map->untag()->payload()->flags_and_size());
3562 map->untag()->payload()->flags_and_size());
3563 uint8_t* cdata =
3564 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3565 s->WriteBytes(cdata, length);
3566 }
3567 }
3568
3569 private:
3571};
3572#endif // !DART_PRECOMPILED_RUNTIME
3573
3575 : public DeserializationCluster {
3576 public:
3580
3581 void ReadAlloc(Deserializer* d) override {
3582 start_index_ = d->next_index();
3583 const intptr_t count = d->ReadUnsigned();
3584 for (intptr_t i = 0; i < count; i++) {
3585 const intptr_t length = d->ReadUnsigned();
3586 d->AssignRef(d->Allocate(CompressedStackMaps::InstanceSize(length)));
3587 }
3588 stop_index_ = d->next_index();
3589 }
3590
3591 void ReadFill(Deserializer* d_) override {
3593
3594 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3595 const intptr_t flags_and_size = d.ReadUnsigned();
3596 const intptr_t length =
3598 CompressedStackMapsPtr map =
3599 static_cast<CompressedStackMapsPtr>(d.Ref(id));
3600 Deserializer::InitializeHeader(map, kCompressedStackMapsCid,
3602 map->untag()->payload()->set_flags_and_size(flags_and_size);
3603 uint8_t* cdata =
3604 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3605 d.ReadBytes(cdata, length);
3606 }
3607 }
3608};
3609
3610#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_COMPRESSED_POINTERS)
3611// PcDescriptor, CompressedStackMaps, OneByteString, TwoByteString
3613 : public CanonicalSetSerializationCluster<CanonicalStringSet,
3614 String,
3615 ObjectPtr> {
3616 public:
3618 const char* type,
3619 intptr_t cid,
3620 bool is_canonical)
3622 cid,
3625 ImageWriter::TagObjectTypeAsReadOnly(zone, type)),
3626 zone_(zone),
3627 cid_(cid),
3628 type_(type) {}
3630
3631 void Trace(Serializer* s, ObjectPtr object) {
3632 // A string's hash must already be computed when we write it because it
3633 // will be loaded into read-only memory. Extra bytes due to allocation
3634 // rounding need to be deterministically set for reliable deduplication in
3635 // shared images.
3636 if (object->untag()->InVMIsolateHeap() ||
3637 s->heap()->old_space()->IsObjectFromImagePages(object)) {
3638 // This object is already read-only.
3639 } else {
3641 }
3642
3643 objects_.Add(object);
3644 }
3645
3647 const bool is_string_cluster = IsStringClassId(cid_);
3648
3649 intptr_t count = objects_.length();
3650 s->WriteUnsigned(count);
3652
3653 uint32_t running_offset = 0;
3654 for (intptr_t i = 0; i < count; i++) {
3655 ObjectPtr object = objects_[i];
3656 s->AssignRef(object);
3657 const StringPtr name =
3658 is_string_cluster ? String::RawCast(object) : nullptr;
3659 Serializer::WritingObjectScope scope(s, type_, object, name);
3660 uint32_t offset = s->GetDataOffset(object);
3661 s->TraceDataOffset(offset);
3664 ASSERT(offset > running_offset);
3665 s->WriteUnsigned((offset - running_offset) >>
3667 running_offset = offset;
3668 }
3670 }
3671
3673 // No-op.
3674 }
3675
3676 private:
3677 Zone* zone_;
3678 const intptr_t cid_;
3679 const char* const type_;
3680};
3681#endif // !DART_PRECOMPILED_RUNTIME && !DART_COMPRESSED_POINTERS
3682
3683#if !defined(DART_COMPRESSED_POINTERS)
3685 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
3686 public:
3688 bool is_canonical,
3689 bool is_root_unit)
3691 is_root_unit,
3692 "ROData"),
3693 cid_(cid) {}
3695
3696 void ReadAlloc(Deserializer* d) override {
3697 start_index_ = d->next_index();
3698 intptr_t count = d->ReadUnsigned();
3699 uint32_t running_offset = 0;
3700 for (intptr_t i = 0; i < count; i++) {
3701 running_offset += d->ReadUnsigned() << kObjectAlignmentLog2;
3702 ObjectPtr object = d->GetObjectAt(running_offset);
3703 d->AssignRef(object);
3704 }
3705 stop_index_ = d->next_index();
3706 if (cid_ == kStringCid) {
3708 }
3709 }
3710
3711 void ReadFill(Deserializer* d_) override {
3713
3714 // No-op.
3715 }
3716
3717 void PostLoad(Deserializer* d, const Array& refs) override {
3718 if (!table_.IsNull()) {
3719 auto object_store = d->isolate_group()->object_store();
3720 VerifyCanonicalSet(d, refs,
3721 WeakArray::Handle(object_store->symbol_table()));
3722 object_store->set_symbol_table(table_);
3723 if (d->isolate_group() == Dart::vm_isolate_group()) {
3724 Symbols::InitFromSnapshot(d->isolate_group());
3725 }
3726 } else if (!is_root_unit_ && is_canonical()) {
3727 FATAL("Cannot recanonicalize RO objects.");
3728 }
3729 }
3730
3731 private:
3732 const intptr_t cid_;
3733};
3734#endif // !DART_COMPRESSED_POINTERS
3735
3736#if !defined(DART_PRECOMPILED_RUNTIME)
3738 public:
3740 : SerializationCluster("ExceptionHandlers", kExceptionHandlersCid) {}
3742
3743 void Trace(Serializer* s, ObjectPtr object) {
3744 ExceptionHandlersPtr handlers = ExceptionHandlers::RawCast(object);
3745 objects_.Add(handlers);
3746
3747 s->Push(handlers->untag()->handled_types_data());
3748 }
3749
3751 const intptr_t count = objects_.length();
3752 s->WriteUnsigned(count);
3753 for (intptr_t i = 0; i < count; i++) {
3754 ExceptionHandlersPtr handlers = objects_[i];
3755 s->AssignRef(handlers);
3756 AutoTraceObject(handlers);
3757 const intptr_t length = handlers->untag()->num_entries();
3758 s->WriteUnsigned(length);
3760 compiler::target::ExceptionHandlers::InstanceSize(length);
3761 }
3762 }
3763
3765 const intptr_t count = objects_.length();
3766 for (intptr_t i = 0; i < count; i++) {
3767 ExceptionHandlersPtr handlers = objects_[i];
3768 AutoTraceObject(handlers);
3769 const intptr_t packed_fields = handlers->untag()->packed_fields_;
3770 const intptr_t length =
3772 s->WriteUnsigned(packed_fields);
3773 WriteCompressedField(handlers, handled_types_data);
3774 for (intptr_t j = 0; j < length; j++) {
3775 const ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3776 s->Write<uint32_t>(info.handler_pc_offset);
3777 s->Write<int16_t>(info.outer_try_index);
3778 s->Write<int8_t>(info.needs_stacktrace);
3779 s->Write<int8_t>(info.has_catch_all);
3780 s->Write<int8_t>(info.is_generated);
3781 }
3782 }
3783 }
3784
3785 private:
3787};
3788#endif // !DART_PRECOMPILED_RUNTIME
3789
3791 public:
3795
3796 void ReadAlloc(Deserializer* d) override {
3797 start_index_ = d->next_index();
3798 const intptr_t count = d->ReadUnsigned();
3799 for (intptr_t i = 0; i < count; i++) {
3800 const intptr_t length = d->ReadUnsigned();
3801 d->AssignRef(d->Allocate(ExceptionHandlers::InstanceSize(length)));
3802 }
3803 stop_index_ = d->next_index();
3804 }
3805
3806 void ReadFill(Deserializer* d_) override {
3808
3809 ASSERT(!is_canonical()); // Never canonical.
3810 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3811 ExceptionHandlersPtr handlers =
3812 static_cast<ExceptionHandlersPtr>(d.Ref(id));
3813 const intptr_t packed_fields = d.ReadUnsigned();
3814 const intptr_t length =
3816 Deserializer::InitializeHeader(handlers, kExceptionHandlersCid,
3818 handlers->untag()->packed_fields_ = packed_fields;
3819 handlers->untag()->handled_types_data_ =
3820 static_cast<ArrayPtr>(d.ReadRef());
3821 for (intptr_t j = 0; j < length; j++) {
3822 ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3823 info.handler_pc_offset = d.Read<uint32_t>();
3824 info.outer_try_index = d.Read<int16_t>();
3825 info.needs_stacktrace = d.Read<int8_t>();
3826 info.has_catch_all = d.Read<int8_t>();
3827 info.is_generated = d.Read<int8_t>();
3828 }
3829 }
3830 }
3831};
3832
3833#if !defined(DART_PRECOMPILED_RUNTIME)
3835 public:
3837 : SerializationCluster("Context", kContextCid) {}
3839
3840 void Trace(Serializer* s, ObjectPtr object) {
3841 ContextPtr context = Context::RawCast(object);
3842 objects_.Add(context);
3843
3844 s->Push(context->untag()->parent());
3845 const intptr_t length = context->untag()->num_variables_;
3846 for (intptr_t i = 0; i < length; i++) {
3847 s->Push(context->untag()->element(i));
3848 }
3849 }
3850
3852 const intptr_t count = objects_.length();
3853 s->WriteUnsigned(count);
3854 for (intptr_t i = 0; i < count; i++) {
3855 ContextPtr context = objects_[i];
3856 s->AssignRef(context);
3857 AutoTraceObject(context);
3858 const intptr_t length = context->untag()->num_variables_;
3859 s->WriteUnsigned(length);
3860 target_memory_size_ += compiler::target::Context::InstanceSize(length);
3861 }
3862 }
3863
3865 const intptr_t count = objects_.length();
3866 for (intptr_t i = 0; i < count; i++) {
3867 ContextPtr context = objects_[i];
3868 AutoTraceObject(context);
3869 const intptr_t length = context->untag()->num_variables_;
3870 s->WriteUnsigned(length);
3871 WriteField(context, parent());
3872 for (intptr_t j = 0; j < length; j++) {
3873 s->WriteElementRef(context->untag()->element(j), j);
3874 }
3875 }
3876 }
3877
3878 private:
3880};
3881#endif // !DART_PRECOMPILED_RUNTIME
3882
3884 public:
3887
3888 void ReadAlloc(Deserializer* d) override {
3889 start_index_ = d->next_index();
3890 const intptr_t count = d->ReadUnsigned();
3891 for (intptr_t i = 0; i < count; i++) {
3892 const intptr_t length = d->ReadUnsigned();
3893 d->AssignRef(d->Allocate(Context::InstanceSize(length)));
3894 }
3895 stop_index_ = d->next_index();
3896 }
3897
3898 void ReadFill(Deserializer* d_) override {
3900
3901 ASSERT(!is_canonical()); // Never canonical.
3902 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3903 ContextPtr context = static_cast<ContextPtr>(d.Ref(id));
3904 const intptr_t length = d.ReadUnsigned();
3905 Deserializer::InitializeHeader(context, kContextCid,
3907 context->untag()->num_variables_ = length;
3908 context->untag()->parent_ = static_cast<ContextPtr>(d.ReadRef());
3909 for (intptr_t j = 0; j < length; j++) {
3910 context->untag()->data()[j] = d.ReadRef();
3911 }
3912 }
3913 }
3914};
3915
3916#if !defined(DART_PRECOMPILED_RUNTIME)
3918 public:
3920 : SerializationCluster("ContextScope", kContextScopeCid) {}
3922
3923 void Trace(Serializer* s, ObjectPtr object) {
3924 ContextScopePtr scope = ContextScope::RawCast(object);
3925 objects_.Add(scope);
3926
3927 const intptr_t length = scope->untag()->num_variables_;
3928 PushFromTo(scope, length);
3929 }
3930
3932 const intptr_t count = objects_.length();
3933 s->WriteUnsigned(count);
3934 for (intptr_t i = 0; i < count; i++) {
3935 ContextScopePtr scope = objects_[i];
3936 s->AssignRef(scope);
3937 AutoTraceObject(scope);
3938 const intptr_t length = scope->untag()->num_variables_;
3939 s->WriteUnsigned(length);
3941 compiler::target::ContextScope::InstanceSize(length);
3942 }
3943 }
3944
3946 const intptr_t count = objects_.length();
3947 for (intptr_t i = 0; i < count; i++) {
3948 ContextScopePtr scope = objects_[i];
3949 AutoTraceObject(scope);
3950 const intptr_t length = scope->untag()->num_variables_;
3951 s->WriteUnsigned(length);
3952 s->Write<bool>(scope->untag()->is_implicit_);
3953 WriteFromTo(scope, length);
3954 }
3955 }
3956
3957 private:
3959};
3960#endif // !DART_PRECOMPILED_RUNTIME
3961
3963 public:
3967
3968 void ReadAlloc(Deserializer* d) override {
3969 start_index_ = d->next_index();
3970 const intptr_t count = d->ReadUnsigned();
3971 for (intptr_t i = 0; i < count; i++) {
3972 const intptr_t length = d->ReadUnsigned();
3973 d->AssignRef(d->Allocate(ContextScope::InstanceSize(length)));
3974 }
3975 stop_index_ = d->next_index();
3976 }
3977
3978 void ReadFill(Deserializer* d_) override {
3980
3981 ASSERT(!is_canonical()); // Never canonical.
3982 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3983 ContextScopePtr scope = static_cast<ContextScopePtr>(d.Ref(id));
3984 const intptr_t length = d.ReadUnsigned();
3985 Deserializer::InitializeHeader(scope, kContextScopeCid,
3987 scope->untag()->num_variables_ = length;
3988 scope->untag()->is_implicit_ = d.Read<bool>();
3989 d.ReadFromTo(scope, length);
3990 }
3991 }
3992};
3993
3994#if !defined(DART_PRECOMPILED_RUNTIME)
3996 public:
3998 : SerializationCluster("UnlinkedCall",
3999 kUnlinkedCallCid,
4000 compiler::target::UnlinkedCall::InstanceSize()) {}
4002
4003 void Trace(Serializer* s, ObjectPtr object) {
4004 UnlinkedCallPtr unlinked = UnlinkedCall::RawCast(object);
4005 objects_.Add(unlinked);
4006 PushFromTo(unlinked);
4007 }
4008
4010 const intptr_t count = objects_.length();
4011 s->WriteUnsigned(count);
4012 for (intptr_t i = 0; i < count; i++) {
4013 UnlinkedCallPtr unlinked = objects_[i];
4014 s->AssignRef(unlinked);
4015 }
4016 }
4017
4019 const intptr_t count = objects_.length();
4020 for (intptr_t i = 0; i < count; i++) {
4021 UnlinkedCallPtr unlinked = objects_[i];
4022 AutoTraceObjectName(unlinked, unlinked->untag()->target_name_);
4023 WriteFromTo(unlinked);
4024 s->Write<bool>(unlinked->untag()->can_patch_to_monomorphic_);
4025 }
4026 }
4027
4028 private:
4030};
4031#endif // !DART_PRECOMPILED_RUNTIME
4032
4034 public:
4038
4042
4043 void ReadFill(Deserializer* d_) override {
4045
4046 ASSERT(!is_canonical()); // Never canonical.
4047 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4048 UnlinkedCallPtr unlinked = static_cast<UnlinkedCallPtr>(d.Ref(id));
4049 Deserializer::InitializeHeader(unlinked, kUnlinkedCallCid,
4051 d.ReadFromTo(unlinked);
4052 unlinked->untag()->can_patch_to_monomorphic_ = d.Read<bool>();
4053 }
4054 }
4055};
4056
4057#if !defined(DART_PRECOMPILED_RUNTIME)
4059 public:
4061 : SerializationCluster("ICData",
4062 kICDataCid,
4063 compiler::target::ICData::InstanceSize()) {}
4065
4066 void Trace(Serializer* s, ObjectPtr object) {
4067 ICDataPtr ic = ICData::RawCast(object);
4068 objects_.Add(ic);
4069 PushFromTo(ic);
4070 }
4071
4073 const intptr_t count = objects_.length();
4074 s->WriteUnsigned(count);
4075 for (intptr_t i = 0; i < count; i++) {
4076 ICDataPtr ic = objects_[i];
4077 s->AssignRef(ic);
4078 }
4079 }
4080
4082 Snapshot::Kind kind = s->kind();
4083 const intptr_t count = objects_.length();
4084 for (intptr_t i = 0; i < count; i++) {
4085 ICDataPtr ic = objects_[i];
4086 AutoTraceObjectName(ic, ic->untag()->target_name_);
4087 WriteFromTo(ic);
4088 if (kind != Snapshot::kFullAOT) {
4089 NOT_IN_PRECOMPILED(s->Write<int32_t>(ic->untag()->deopt_id_));
4090 }
4091 s->Write<uint32_t>(ic->untag()->state_bits_);
4092 }
4093 }
4094
4095 private:
4096 GrowableArray<ICDataPtr> objects_;
4097};
4098#endif // !DART_PRECOMPILED_RUNTIME
4099
4101 public:
4104
4108
4109 void ReadFill(Deserializer* d_) override {
4111
4112 ASSERT(!is_canonical()); // Never canonical.
4113 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4114 ICDataPtr ic = static_cast<ICDataPtr>(d.Ref(id));
4116 d.ReadFromTo(ic);
4117 NOT_IN_PRECOMPILED(ic->untag()->deopt_id_ = d.Read<int32_t>());
4118 ic->untag()->state_bits_ = d.Read<int32_t>();
4119 }
4120 }
4121};
4122
4123#if !defined(DART_PRECOMPILED_RUNTIME)
4125 public:
4128 "MegamorphicCache",
4129 kMegamorphicCacheCid,
4130 compiler::target::MegamorphicCache::InstanceSize()) {}
4132
4133 void Trace(Serializer* s, ObjectPtr object) {
4134 MegamorphicCachePtr cache = MegamorphicCache::RawCast(object);
4135 objects_.Add(cache);
4136 PushFromTo(cache);
4137 }
4138
4140 const intptr_t count = objects_.length();
4141 s->WriteUnsigned(count);
4142 for (intptr_t i = 0; i < count; i++) {
4143 MegamorphicCachePtr cache = objects_[i];
4144 s->AssignRef(cache);
4145 }
4146 }
4147
4149 const intptr_t count = objects_.length();
4150 for (intptr_t i = 0; i < count; i++) {
4151 MegamorphicCachePtr cache = objects_[i];
4152 AutoTraceObjectName(cache, cache->untag()->target_name_);
4153 WriteFromTo(cache);
4154 s->Write<int32_t>(cache->untag()->filled_entry_count_);
4155 }
4156 }
4157
4158 private:
4160};
4161#endif // !DART_PRECOMPILED_RUNTIME
4162
4164 public:
4168
4172
4173 void ReadFill(Deserializer* d_) override {
4175
4176 ASSERT(!is_canonical()); // Never canonical.
4177 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4178 MegamorphicCachePtr cache = static_cast<MegamorphicCachePtr>(d.Ref(id));
4179 Deserializer::InitializeHeader(cache, kMegamorphicCacheCid,
4181 d.ReadFromTo(cache);
4182 cache->untag()->filled_entry_count_ = d.Read<int32_t>();
4183 }
4184 }
4185};
4186
4187#if !defined(DART_PRECOMPILED_RUNTIME)
4189 public:
4192 "SubtypeTestCache",
4193 kSubtypeTestCacheCid,
4194 compiler::target::SubtypeTestCache::InstanceSize()) {}
4196
4197 void Trace(Serializer* s, ObjectPtr object) {
4198 SubtypeTestCachePtr cache = SubtypeTestCache::RawCast(object);
4199 objects_.Add(cache);
4200 s->Push(cache->untag()->cache_);
4201 }
4202
4204 const intptr_t count = objects_.length();
4205 s->WriteUnsigned(count);
4206 for (intptr_t i = 0; i < count; i++) {
4207 SubtypeTestCachePtr cache = objects_[i];
4208 s->AssignRef(cache);
4209 }
4210 }
4211
4213 const intptr_t count = objects_.length();
4214 for (intptr_t i = 0; i < count; i++) {
4215 SubtypeTestCachePtr cache = objects_[i];
4216 AutoTraceObject(cache);
4217 WriteField(cache, cache_);
4218 s->Write<uint32_t>(cache->untag()->num_inputs_);
4219 s->Write<uint32_t>(cache->untag()->num_occupied_);
4220 }
4221 }
4222
4223 private:
4225};
4226#endif // !DART_PRECOMPILED_RUNTIME
4227
4229 public:
4233
4237
4238 void ReadFill(Deserializer* d_) override {
4240
4241 ASSERT(!is_canonical()); // Never canonical.
4242 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4243 SubtypeTestCachePtr cache = static_cast<SubtypeTestCachePtr>(d.Ref(id));
4244 Deserializer::InitializeHeader(cache, kSubtypeTestCacheCid,
4246 cache->untag()->cache_ = static_cast<ArrayPtr>(d.ReadRef());
4247 cache->untag()->num_inputs_ = d.Read<uint32_t>();
4248 cache->untag()->num_occupied_ = d.Read<uint32_t>();
4249 }
4250 }
4251};
4252
4253#if !defined(DART_PRECOMPILED_RUNTIME)
4255 public:
4257 : SerializationCluster("LoadingUnit",
4258 kLoadingUnitCid,
4259 compiler::target::LoadingUnit::InstanceSize()) {}
4261
4262 void Trace(Serializer* s, ObjectPtr object) {
4263 LoadingUnitPtr unit = LoadingUnit::RawCast(object);
4264 objects_.Add(unit);
4265 s->Push(unit->untag()->parent());
4266 }
4267
4269 const intptr_t count = objects_.length();
4270 s->WriteUnsigned(count);
4271 for (intptr_t i = 0; i < count; i++) {
4272 LoadingUnitPtr unit = objects_[i];
4273 s->AssignRef(unit);
4274 }
4275 }
4276
4278 const intptr_t count = objects_.length();
4279 for (intptr_t i = 0; i < count; i++) {
4280 LoadingUnitPtr unit = objects_[i];
4281 AutoTraceObject(unit);
4282 WriteCompressedField(unit, parent);
4283 s->Write<intptr_t>(
4284 unit->untag()->packed_fields_.Read<UntaggedLoadingUnit::IdBits>());
4285 }
4286 }
4287
4288 private:
4290};
4291#endif // !DART_PRECOMPILED_RUNTIME
4292
4294 public:
4297
4301
4302 void ReadFill(Deserializer* d_) override {
4304
4305 ASSERT(!is_canonical()); // Never canonical.
4306 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4307 LoadingUnitPtr unit = static_cast<LoadingUnitPtr>(d.Ref(id));
4308 Deserializer::InitializeHeader(unit, kLoadingUnitCid,
4310 unit->untag()->parent_ = static_cast<LoadingUnitPtr>(d.ReadRef());
4311 unit->untag()->base_objects_ = Array::null();
4312 unit->untag()->instructions_image_ = nullptr;
4313 unit->untag()->packed_fields_ =
4315 UntaggedLoadingUnit::kNotLoaded) |
4316 UntaggedLoadingUnit::IdBits::encode(d.Read<intptr_t>());
4317 }
4318 }
4319};
4320
4321#if !defined(DART_PRECOMPILED_RUNTIME)
4323 public:
4325 : SerializationCluster("LanguageError",
4326 kLanguageErrorCid,
4327 compiler::target::LanguageError::InstanceSize()) {}
4329
4330 void Trace(Serializer* s, ObjectPtr object) {
4331 LanguageErrorPtr error = LanguageError::RawCast(object);
4332 objects_.Add(error);
4334 }
4335
4337 const intptr_t count = objects_.length();
4338 s->WriteUnsigned(count);
4339 for (intptr_t i = 0; i < count; i++) {
4340 LanguageErrorPtr error = objects_[i];
4341 s->AssignRef(error);
4342 }
4343 }
4344
4346 const intptr_t count = objects_.length();
4347 for (intptr_t i = 0; i < count; i++) {
4348 LanguageErrorPtr error = objects_[i];
4351 s->WriteTokenPosition(error->untag()->token_pos_);
4352 s->Write<bool>(error->untag()->report_after_token_);
4353 s->Write<int8_t>(error->untag()->kind_);
4354 }
4355 }
4356
4357 private:
4359};
4360#endif // !DART_PRECOMPILED_RUNTIME
4361
4363 public:
4367
4371
4372 void ReadFill(Deserializer* d_) override {
4374
4375 ASSERT(!is_canonical()); // Never canonical.
4376 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4377 LanguageErrorPtr error = static_cast<LanguageErrorPtr>(d.Ref(id));
4378 Deserializer::InitializeHeader(error, kLanguageErrorCid,
4380 d.ReadFromTo(error);
4381 error->untag()->token_pos_ = d.ReadTokenPosition();
4382 error->untag()->report_after_token_ = d.Read<bool>();
4383 error->untag()->kind_ = d.Read<int8_t>();
4384 }
4385 }
4386};
4387
4388#if !defined(DART_PRECOMPILED_RUNTIME)
4390 public:
4393 "UnhandledException",
4394 kUnhandledExceptionCid,
4395 compiler::target::UnhandledException::InstanceSize()) {}
4397
4398 void Trace(Serializer* s, ObjectPtr object) {
4399 UnhandledExceptionPtr exception = UnhandledException::RawCast(object);
4400 objects_.Add(exception);
4401 PushFromTo(exception);
4402 }
4403
4405 const intptr_t count = objects_.length();
4406 s->WriteUnsigned(count);
4407 for (intptr_t i = 0; i < count; i++) {
4408 UnhandledExceptionPtr exception = objects_[i];
4409 s->AssignRef(exception);
4410 }
4411 }
4412
4414 const intptr_t count = objects_.length();
4415 for (intptr_t i = 0; i < count; i++) {
4416 UnhandledExceptionPtr exception = objects_[i];
4417 AutoTraceObject(exception);
4418 WriteFromTo(exception);
4419 }
4420 }
4421
4422 private:
4424};
4425#endif // !DART_PRECOMPILED_RUNTIME
4426
4428 public:
4432
4436
4437 void ReadFill(Deserializer* d_) override {
4439
4440 ASSERT(!is_canonical()); // Never canonical.
4441 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4442 UnhandledExceptionPtr exception =
4443 static_cast<UnhandledExceptionPtr>(d.Ref(id));
4444 Deserializer::InitializeHeader(exception, kUnhandledExceptionCid,
4446 d.ReadFromTo(exception);
4447 }
4448 }
4449};
4450
4451#if !defined(DART_PRECOMPILED_RUNTIME)
4453 public:
4456 ClassPtr cls = IsolateGroup::Current()->class_table()->At(cid);
4457 host_next_field_offset_in_words_ =
4458 cls->untag()->host_next_field_offset_in_words_;
4459 ASSERT(host_next_field_offset_in_words_ > 0);
4460#if defined(DART_PRECOMPILER)
4461 target_next_field_offset_in_words_ =
4462 cls->untag()->target_next_field_offset_in_words_;
4463 target_instance_size_in_words_ =
4464 cls->untag()->target_instance_size_in_words_;
4465#else
4466 target_next_field_offset_in_words_ =
4467 cls->untag()->host_next_field_offset_in_words_;
4468 target_instance_size_in_words_ = cls->untag()->host_instance_size_in_words_;
4469#endif // defined(DART_PRECOMPILER)
4470 ASSERT(target_next_field_offset_in_words_ > 0);
4471 ASSERT(target_instance_size_in_words_ > 0);
4472 }
4474
4475 void Trace(Serializer* s, ObjectPtr object) {
4476 InstancePtr instance = Instance::RawCast(object);
4477 objects_.Add(instance);
4478 const intptr_t next_field_offset = host_next_field_offset_in_words_
4480 const auto unboxed_fields_bitmap =
4481 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4482 intptr_t offset = Instance::NextFieldOffset();
4483 while (offset < next_field_offset) {
4484 // Skips unboxed fields
4485 if (!unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4486 ObjectPtr raw_obj =
4487 reinterpret_cast<CompressedObjectPtr*>(
4488 reinterpret_cast<uword>(instance->untag()) + offset)
4489 ->Decompress(instance->untag()->heap_base());
4490 s->Push(raw_obj);
4491 }
4493 }
4494 }
4495
4497 const intptr_t count = objects_.length();
4498 s->WriteUnsigned(count);
4499
4500 s->Write<int32_t>(target_next_field_offset_in_words_);
4501 s->Write<int32_t>(target_instance_size_in_words_);
4502
4503 for (intptr_t i = 0; i < count; i++) {
4504 InstancePtr instance = objects_[i];
4505 s->AssignRef(instance);
4506 }
4507
4508 const intptr_t instance_size = compiler::target::RoundedAllocationSize(
4509 target_instance_size_in_words_ * compiler::target::kCompressedWordSize);
4510 target_memory_size_ += instance_size * count;
4511 }
4512
4514 intptr_t next_field_offset = host_next_field_offset_in_words_
4516 const intptr_t count = objects_.length();
4517 s->WriteUnsigned64(CalculateTargetUnboxedFieldsBitmap(s, cid_).Value());
4518 const auto unboxed_fields_bitmap =
4519 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4520
4521 for (intptr_t i = 0; i < count; i++) {
4522 InstancePtr instance = objects_[i];
4524#if defined(DART_PRECOMPILER)
4525 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4526 ClassPtr cls = s->isolate_group()->class_table()->At(cid_);
4527 s->AttributePropertyRef(cls, "<class>");
4528 }
4529#endif
4530 intptr_t offset = Instance::NextFieldOffset();
4531 while (offset < next_field_offset) {
4532 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4533 // Writes 32 bits of the unboxed value at a time.
4534 const compressed_uword value = *reinterpret_cast<compressed_uword*>(
4535 reinterpret_cast<uword>(instance->untag()) + offset);
4536 s->WriteWordWith32BitWrites(value);
4537 } else {
4538 ObjectPtr raw_obj =
4539 reinterpret_cast<CompressedObjectPtr*>(
4540 reinterpret_cast<uword>(instance->untag()) + offset)
4541 ->Decompress(instance->untag()->heap_base());
4542 s->WriteElementRef(raw_obj, offset);
4543 }
4545 }
4546 }
4547 }
4548
4549 private:
4550 intptr_t host_next_field_offset_in_words_;
4551 intptr_t target_next_field_offset_in_words_;
4552 intptr_t target_instance_size_in_words_;
4554};
4555#endif // !DART_PRECOMPILED_RUNTIME
4556
4558 protected:
4560 bool is_canonical,
4561 bool is_root_unit)
4563 is_root_unit_(is_root_unit) {}
4564
4565 const bool is_root_unit_;
4566
4567 public:
4568#if defined(DART_PRECOMPILED_RUNTIME)
4569 void PostLoad(Deserializer* d, const Array& refs) override {
4570 if (!is_root_unit_ && is_canonical()) {
4572 d->isolate_group()->constant_canonicalization_mutex());
4573 Instance& instance = Instance::Handle(d->zone());
4574 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4575 instance ^= refs.At(i);
4576 instance = instance.CanonicalizeLocked(d->thread());
4577 refs.SetAt(i, instance);
4578 }
4579 }
4580 }
4581#endif
4582};
4583
4586 public:
4588 bool is_canonical,
4589 bool is_immutable,
4590 bool is_root_unit)
4593 is_root_unit),
4594 cid_(cid),
4595 is_immutable_(is_immutable) {}
4597
4598 void ReadAlloc(Deserializer* d) override {
4599 start_index_ = d->next_index();
4600 const intptr_t count = d->ReadUnsigned();
4601 next_field_offset_in_words_ = d->Read<int32_t>();
4602 instance_size_in_words_ = d->Read<int32_t>();
4603 intptr_t instance_size = Object::RoundedAllocationSize(
4604 instance_size_in_words_ * kCompressedWordSize);
4605 for (intptr_t i = 0; i < count; i++) {
4606 d->AssignRef(d->Allocate(instance_size));
4607 }
4608 stop_index_ = d->next_index();
4609 }
4610
4611 void ReadFill(Deserializer* d_) override {
4613
4614 const intptr_t cid = cid_;
4615 const bool mark_canonical = is_root_unit_ && is_canonical();
4616 const bool is_immutable = is_immutable_;
4617 intptr_t next_field_offset = next_field_offset_in_words_
4619 intptr_t instance_size = Object::RoundedAllocationSize(
4620 instance_size_in_words_ * kCompressedWordSize);
4621 const UnboxedFieldBitmap unboxed_fields_bitmap(d.ReadUnsigned64());
4622
4623 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4624 InstancePtr instance = static_cast<InstancePtr>(d.Ref(id));
4626 mark_canonical, is_immutable);
4627 intptr_t offset = Instance::NextFieldOffset();
4628 while (offset < next_field_offset) {
4629 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4630 compressed_uword* p = reinterpret_cast<compressed_uword*>(
4631 reinterpret_cast<uword>(instance->untag()) + offset);
4632 // Reads 32 bits of the unboxed value at a time
4633 *p = d.ReadWordWith32BitReads();
4634 } else {
4635 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4636 reinterpret_cast<uword>(instance->untag()) + offset);
4637 *p = d.ReadRef();
4638 }
4640 }
4641 while (offset < instance_size) {
4642 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4643 reinterpret_cast<uword>(instance->untag()) + offset);
4644 *p = Object::null();
4646 }
4647 ASSERT(offset == instance_size);
4648 }
4649 }
4650
4651 private:
4652 const intptr_t cid_;
4653 const bool is_immutable_;
4654 intptr_t next_field_offset_in_words_;
4655 intptr_t instance_size_in_words_;
4656};
4657
4658#if !defined(DART_PRECOMPILED_RUNTIME)
4660 public:
4662 : SerializationCluster("LibraryPrefix",
4663 kLibraryPrefixCid,
4664 compiler::target::LibraryPrefix::InstanceSize()) {}
4666
4667 void Trace(Serializer* s, ObjectPtr object) {
4668 LibraryPrefixPtr prefix = LibraryPrefix::RawCast(object);
4669 objects_.Add(prefix);
4670 PushFromTo(prefix);
4671 }
4672
4674 const intptr_t count = objects_.length();
4675 s->WriteUnsigned(count);
4676 for (intptr_t i = 0; i < count; i++) {
4677 LibraryPrefixPtr prefix = objects_[i];
4678 s->AssignRef(prefix);
4679 }
4680 }
4681
4683 const intptr_t count = objects_.length();
4684 for (intptr_t i = 0; i < count; i++) {
4685 LibraryPrefixPtr prefix = objects_[i];
4686 AutoTraceObject(prefix);
4687 WriteFromTo(prefix);
4688 s->Write<uint16_t>(prefix->untag()->num_imports_);
4689 s->Write<bool>(prefix->untag()->is_deferred_load_);
4690 }
4691 }
4692
4693 private:
4695};
4696#endif // !DART_PRECOMPILED_RUNTIME
4697
4699 public:
4703
4707
4708 void ReadFill(Deserializer* d_) override {
4710
4711 ASSERT(!is_canonical()); // Never canonical.
4712 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4713 LibraryPrefixPtr prefix = static_cast<LibraryPrefixPtr>(d.Ref(id));
4714 Deserializer::InitializeHeader(prefix, kLibraryPrefixCid,
4716 d.ReadFromTo(prefix);
4717 prefix->untag()->num_imports_ = d.Read<uint16_t>();
4718 prefix->untag()->is_deferred_load_ = d.Read<bool>();
4719 }
4720 }
4721};
4722
4723#if !defined(DART_PRECOMPILED_RUNTIME)
4726 CanonicalTypeSet,
4727 Type,
4728 TypePtr,
4729 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4730 public:
4731 TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
4733 kTypeCid,
4735 represents_canonical_set,
4736 "Type",
4737 compiler::target::Type::InstanceSize()) {}
4739
4740 void Trace(Serializer* s, ObjectPtr object) {
4741 TypePtr type = Type::RawCast(object);
4742 objects_.Add(type);
4743
4745
4746 ASSERT(type->untag()->type_class_id() != kIllegalCid);
4747 ClassPtr type_class =
4748 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4749 s->Push(type_class);
4750 }
4751
4753 intptr_t count = objects_.length();
4754 s->WriteUnsigned(count);
4756 for (intptr_t i = 0; i < count; i++) {
4757 TypePtr type = objects_[i];
4758 s->AssignRef(type);
4759 }
4761 }
4762
4764 intptr_t count = objects_.length();
4765 for (intptr_t i = 0; i < count; i++) {
4766 WriteType(s, objects_[i]);
4767 }
4768 }
4769
4770 private:
4771 Type& type_ = Type::Handle();
4772 Class& cls_ = Class::Handle();
4773
4774 // Type::Canonicalize does not actually put all canonical Type objects into
4775 // canonical_types set. Some of the canonical declaration types (but not all
4776 // of them) are simply cached in UntaggedClass::declaration_type_ and are not
4777 // inserted into the canonical_types set.
4778 // Keep in sync with Type::Canonicalize.
4779 virtual bool IsInCanonicalSet(Serializer* s, TypePtr type) {
4780 ClassPtr type_class =
4781 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4782 if (type_class->untag()->declaration_type() != type) {
4783 return true;
4784 }
4785
4786 type_ = type;
4787 cls_ = type_class;
4788 return !type_.IsDeclarationTypeOf(cls_);
4789 }
4790
4791 void WriteType(Serializer* s, TypePtr type) {
4793#if defined(DART_PRECOMPILER)
4794 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4795 ClassPtr type_class =
4796 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4797 s->AttributePropertyRef(type_class, "<type_class>");
4798 }
4799#endif
4801 s->WriteUnsigned(type->untag()->flags());
4802 }
4803};
4804#endif // !DART_PRECOMPILED_RUNTIME
4805
4808 CanonicalTypeSet,
4809 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4810 public:
4811 explicit TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
4812 : CanonicalSetDeserializationCluster(is_canonical, is_root_unit, "Type") {
4813 }
4815
4820
4821 void ReadFill(Deserializer* d_) override {
4823
4824 const bool mark_canonical = is_root_unit_ && is_canonical();
4825 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4826 TypePtr type = static_cast<TypePtr>(d.Ref(id));
4828 mark_canonical);
4829 d.ReadFromTo(type);
4830 type->untag()->set_flags(d.ReadUnsigned());
4831 }
4832 }
4833
4834 void PostLoad(Deserializer* d, const Array& refs) override {
4835 if (!table_.IsNull()) {
4836 auto object_store = d->isolate_group()->object_store();
4837 VerifyCanonicalSet(d, refs,
4838 Array::Handle(object_store->canonical_types()));
4839 object_store->set_canonical_types(table_);
4840 } else if (!is_root_unit_ && is_canonical()) {
4842 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4843 type ^= refs.At(i);
4844 type = type.Canonicalize(d->thread());
4845 refs.SetAt(i, type);
4846 }
4847 }
4848
4849 Type& type = Type::Handle(d->zone());
4850 Code& stub = Code::Handle(d->zone());
4851
4852 if (Snapshot::IncludesCode(d->kind())) {
4853 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4854 type ^= refs.At(id);
4855 type.UpdateTypeTestingStubEntryPoint();
4856 }
4857 } else {
4858 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4859 type ^= refs.At(id);
4861 type.InitializeTypeTestingStubNonAtomic(stub);
4862 }
4863 }
4864 }
4865};
4866
4867#if !defined(DART_PRECOMPILED_RUNTIME)
4869 : public CanonicalSetSerializationCluster<CanonicalFunctionTypeSet,
4870 FunctionType,
4871 FunctionTypePtr> {
4872 public:
4874 bool represents_canonical_set)
4876 kFunctionTypeCid,
4878 represents_canonical_set,
4879 "FunctionType",
4880 compiler::target::FunctionType::InstanceSize()) {}
4882
4883 void Trace(Serializer* s, ObjectPtr object) {
4884 FunctionTypePtr type = FunctionType::RawCast(object);
4885 objects_.Add(type);
4887 }
4888
4890 intptr_t count = objects_.length();
4891 s->WriteUnsigned(count);
4893
4894 for (intptr_t i = 0; i < count; i++) {
4895 FunctionTypePtr type = objects_[i];
4896 s->AssignRef(type);
4897 }
4899 }
4900
4902 intptr_t count = objects_.length();
4903 for (intptr_t i = 0; i < count; i++) {
4904 WriteFunctionType(s, objects_[i]);
4905 }
4906 }
4907
4908 private:
4909 void WriteFunctionType(Serializer* s, FunctionTypePtr type) {
4912 ASSERT(Utils::IsUint(8, type->untag()->flags()));
4913 s->Write<uint8_t>(type->untag()->flags());
4914 s->Write<uint32_t>(type->untag()->packed_parameter_counts_);
4915 s->Write<uint16_t>(type->untag()->packed_type_parameter_counts_);
4916 }
4917};
4918#endif // !DART_PRECOMPILED_RUNTIME
4919
4921 : public CanonicalSetDeserializationCluster<CanonicalFunctionTypeSet> {
4922 public:
4924 bool is_root_unit)
4926 is_root_unit,
4927 "FunctionType") {}
4929
4934
4935 void ReadFill(Deserializer* d_) override {
4937
4938 const bool mark_canonical = is_root_unit_ && is_canonical();
4939 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4940 FunctionTypePtr type = static_cast<FunctionTypePtr>(d.Ref(id));
4942 type, kFunctionTypeCid, FunctionType::InstanceSize(), mark_canonical);
4943 d.ReadFromTo(type);
4944 type->untag()->set_flags(d.Read<uint8_t>());
4945 type->untag()->packed_parameter_counts_ = d.Read<uint32_t>();
4946 type->untag()->packed_type_parameter_counts_ = d.Read<uint16_t>();
4947 }
4948 }
4949
4950 void PostLoad(Deserializer* d, const Array& refs) override {
4951 if (!table_.IsNull()) {
4952 auto object_store = d->isolate_group()->object_store();
4954 d, refs, Array::Handle(object_store->canonical_function_types()));
4955 object_store->set_canonical_function_types(table_);
4956 } else if (!is_root_unit_ && is_canonical()) {
4958 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4959 type ^= refs.At(i);
4960 type = type.Canonicalize(d->thread());
4961 refs.SetAt(i, type);
4962 }
4963 }
4964
4966 Code& stub = Code::Handle(d->zone());
4967
4968 if (Snapshot::IncludesCode(d->kind())) {
4969 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4970 type ^= refs.At(id);
4971 type.UpdateTypeTestingStubEntryPoint();
4972 }
4973 } else {
4974 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4975 type ^= refs.At(id);
4977 type.InitializeTypeTestingStubNonAtomic(stub);
4978 }
4979 }
4980 }
4981};
4982
4983#if !defined(DART_PRECOMPILED_RUNTIME)
4985 : public CanonicalSetSerializationCluster<CanonicalRecordTypeSet,
4986 RecordType,
4987 RecordTypePtr> {
4988 public:
4990 bool represents_canonical_set)
4992 kRecordTypeCid,
4994 represents_canonical_set,
4995 "RecordType",
4996 compiler::target::RecordType::InstanceSize()) {}
4998
4999 void Trace(Serializer* s, ObjectPtr object) {
5000 RecordTypePtr type = RecordType::RawCast(object);
5001 objects_.Add(type);
5003 }
5004
5006 intptr_t count = objects_.length();
5007 s->WriteUnsigned(count);
5009
5010 for (intptr_t i = 0; i < count; i++) {
5011 RecordTypePtr type = objects_[i];
5012 s->AssignRef(type);
5013 }
5015 }
5016
5018 intptr_t count = objects_.length();
5019 for (intptr_t i = 0; i < count; i++) {
5020 WriteRecordType(s, objects_[i]);
5021 }
5022 }
5023
5024 private:
5025 void WriteRecordType(Serializer* s, RecordTypePtr type) {
5028 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5029 s->Write<uint8_t>(type->untag()->flags());
5030 }
5031};
5032#endif // !DART_PRECOMPILED_RUNTIME
5033
5035 : public CanonicalSetDeserializationCluster<CanonicalRecordTypeSet> {
5036 public:
5039 is_root_unit,
5040 "RecordType") {}
5042
5047
5048 void ReadFill(Deserializer* d_) override {
5050
5051 const bool mark_canonical = is_root_unit_ && is_canonical();
5052 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5053 RecordTypePtr type = static_cast<RecordTypePtr>(d.Ref(id));
5055 type, kRecordTypeCid, RecordType::InstanceSize(), mark_canonical);
5056 d.ReadFromTo(type);
5057 type->untag()->set_flags(d.Read<uint8_t>());
5058 }
5059 }
5060
5061 void PostLoad(Deserializer* d, const Array& refs) override {
5062 if (!table_.IsNull()) {
5063 auto object_store = d->isolate_group()->object_store();
5064 VerifyCanonicalSet(d, refs,
5065 Array::Handle(object_store->canonical_record_types()));
5066 object_store->set_canonical_record_types(table_);
5067 } else if (!is_root_unit_ && is_canonical()) {
5069 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5070 type ^= refs.At(i);
5071 type = type.Canonicalize(d->thread());
5072 refs.SetAt(i, type);
5073 }
5074 }
5075
5076 RecordType& type = RecordType::Handle(d->zone());
5077 Code& stub = Code::Handle(d->zone());
5078
5079 if (Snapshot::IncludesCode(d->kind())) {
5080 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5081 type ^= refs.At(id);
5082 type.UpdateTypeTestingStubEntryPoint();
5083 }
5084 } else {
5085 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5086 type ^= refs.At(id);
5088 type.InitializeTypeTestingStubNonAtomic(stub);
5089 }
5090 }
5091 }
5092};
5093
5094#if !defined(DART_PRECOMPILED_RUNTIME)
5096 : public CanonicalSetSerializationCluster<CanonicalTypeParameterSet,
5097 TypeParameter,
5098 TypeParameterPtr> {
5099 public:
5101 bool cluster_represents_canonical_set)
5103 kTypeParameterCid,
5105 cluster_represents_canonical_set,
5106 "TypeParameter",
5107 compiler::target::TypeParameter::InstanceSize()) {}
5109
5110 void Trace(Serializer* s, ObjectPtr object) {
5111 TypeParameterPtr type = TypeParameter::RawCast(object);
5112 objects_.Add(type);
5113
5115 }
5116
5118 intptr_t count = objects_.length();
5119 s->WriteUnsigned(count);
5121 for (intptr_t i = 0; i < count; i++) {
5122 TypeParameterPtr type = objects_[i];
5123 s->AssignRef(type);
5124 }
5126 }
5127
5129 intptr_t count = objects_.length();
5130 for (intptr_t i = 0; i < count; i++) {
5131 WriteTypeParameter(s, objects_[i]);
5132 }
5133 }
5134
5135 private:
5136 void WriteTypeParameter(Serializer* s, TypeParameterPtr type) {
5139 s->Write<uint16_t>(type->untag()->base_);
5140 s->Write<uint16_t>(type->untag()->index_);
5141 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5142 s->Write<uint8_t>(type->untag()->flags());
5143 }
5144};
5145#endif // !DART_PRECOMPILED_RUNTIME
5146
5148 : public CanonicalSetDeserializationCluster<CanonicalTypeParameterSet> {
5149 public:
5151 bool is_root_unit)
5153 is_root_unit,
5154 "TypeParameter") {}
5156
5161
5162 void ReadFill(Deserializer* d_) override {
5164
5165 const bool mark_canonical = is_root_unit_ && is_canonical();
5166 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5167 TypeParameterPtr type = static_cast<TypeParameterPtr>(d.Ref(id));
5168 Deserializer::InitializeHeader(type, kTypeParameterCid,
5170 mark_canonical);
5171 d.ReadFromTo(type);
5172 type->untag()->base_ = d.Read<uint16_t>();
5173 type->untag()->index_ = d.Read<uint16_t>();
5174 type->untag()->set_flags(d.Read<uint8_t>());
5175 }
5176 }
5177
5178 void PostLoad(Deserializer* d, const Array& refs) override {
5179 if (!table_.IsNull()) {
5180 auto object_store = d->isolate_group()->object_store();
5182 d, refs, Array::Handle(object_store->canonical_type_parameters()));
5183 object_store->set_canonical_type_parameters(table_);
5184 } else if (!is_root_unit_ && is_canonical()) {
5185 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5186 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5187 type_param ^= refs.At(i);
5188 type_param ^= type_param.Canonicalize(d->thread());
5189 refs.SetAt(i, type_param);
5190 }
5191 }
5192
5193 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5194 Code& stub = Code::Handle(d->zone());
5195
5196 if (Snapshot::IncludesCode(d->kind())) {
5197 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5198 type_param ^= refs.At(id);
5200 }
5201 } else {
5202 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5203 type_param ^= refs.At(id);
5205 type_param.InitializeTypeTestingStubNonAtomic(stub);
5206 }
5207 }
5208 }
5209};
5210
5211#if !defined(DART_PRECOMPILED_RUNTIME)
5213 public:
5215 : SerializationCluster("Closure",
5216 kClosureCid,
5217 compiler::target::Closure::InstanceSize(),
5218 is_canonical) {}
5220
5221 void Trace(Serializer* s, ObjectPtr object) {
5222 ClosurePtr closure = Closure::RawCast(object);
5223 objects_.Add(closure);
5224 PushFromTo(closure);
5225 }
5226
5228 const intptr_t count = objects_.length();
5229 s->WriteUnsigned(count);
5230 for (intptr_t i = 0; i < count; i++) {
5231 ClosurePtr closure = objects_[i];
5232 s->AssignRef(closure);
5233 }
5234 }
5235
5237 const intptr_t count = objects_.length();
5238 for (intptr_t i = 0; i < count; i++) {
5239 ClosurePtr closure = objects_[i];
5240 AutoTraceObject(closure);
5241 WriteFromTo(closure);
5242 }
5243 }
5244
5245 private:
5247};
5248#endif // !DART_PRECOMPILED_RUNTIME
5249
5252 public:
5253 explicit ClosureDeserializationCluster(bool is_canonical, bool is_root_unit)
5256 is_root_unit) {}
5258
5262
5263 void ReadFill(Deserializer* d_) override {
5265
5266 const bool mark_canonical = is_root_unit_ && is_canonical();
5267 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5268 ClosurePtr closure = static_cast<ClosurePtr>(d.Ref(id));
5269 Deserializer::InitializeHeader(closure, kClosureCid,
5270 Closure::InstanceSize(), mark_canonical);
5271 d.ReadFromTo(closure);
5272#if defined(DART_PRECOMPILED_RUNTIME)
5273 closure->untag()->entry_point_ = 0;
5274#endif
5275 }
5276 }
5277
5278#if defined(DART_PRECOMPILED_RUNTIME)
5279 void PostLoad(Deserializer* d, const Array& refs) override {
5280 // We only cache the entry point in bare instructions mode (as we need
5281 // to load the function anyway otherwise).
5282 ASSERT(d->kind() == Snapshot::kFullAOT);
5283 auto& closure = Closure::Handle(d->zone());
5284 auto& func = Function::Handle(d->zone());
5285 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5286 closure ^= refs.At(i);
5287 func = closure.function();
5288 uword entry_point = func.entry_point();
5289 ASSERT(entry_point != 0);
5290 closure.ptr()->untag()->entry_point_ = entry_point;
5291 }
5292 }
5293#endif
5294};
5295
5296#if !defined(DART_PRECOMPILED_RUNTIME)
5298 public:
5302
5303 void Trace(Serializer* s, ObjectPtr object) {
5304 if (!object->IsHeapObject()) {
5305 SmiPtr smi = Smi::RawCast(object);
5306 smis_.Add(smi);
5307 } else {
5308 MintPtr mint = Mint::RawCast(object);
5309 mints_.Add(mint);
5310 }
5311 }
5312
5314 s->WriteUnsigned(smis_.length() + mints_.length());
5315 for (intptr_t i = 0; i < smis_.length(); i++) {
5316 SmiPtr smi = smis_[i];
5317 s->AssignRef(smi);
5318 AutoTraceObject(smi);
5319 const int64_t value = Smi::Value(smi);
5320 s->Write<int64_t>(value);
5321 if (!Smi::IsValid(value)) {
5322 // This Smi will become a Mint when loaded.
5323 target_memory_size_ += compiler::target::Mint::InstanceSize();
5324 }
5325 }
5326 for (intptr_t i = 0; i < mints_.length(); i++) {
5327 MintPtr mint = mints_[i];
5328 s->AssignRef(mint);
5329 AutoTraceObject(mint);
5330 s->Write<int64_t>(mint->untag()->value_);
5331 // All Mints on the host should be Mints on the target.
5332 ASSERT(!Smi::IsValid(mint->untag()->value_));
5333 target_memory_size_ += compiler::target::Mint::InstanceSize();
5334 }
5335 }
5336
5338
5339 private:
5342};
5343#endif // !DART_PRECOMPILED_RUNTIME
5344
5347 public:
5348 explicit MintDeserializationCluster(bool is_canonical, bool is_root_unit)
5351 is_root_unit) {}
5353
5354 void ReadAlloc(Deserializer* d) override {
5355 start_index_ = d->next_index();
5356 const intptr_t count = d->ReadUnsigned();
5357 const bool mark_canonical = is_canonical();
5358 for (intptr_t i = 0; i < count; i++) {
5359 int64_t value = d->Read<int64_t>();
5360 if (Smi::IsValid(value)) {
5361 d->AssignRef(Smi::New(value));
5362 } else {
5363 MintPtr mint = static_cast<MintPtr>(d->Allocate(Mint::InstanceSize()));
5365 mark_canonical);
5366 mint->untag()->value_ = value;
5367 d->AssignRef(mint);
5368 }
5369 }
5370 stop_index_ = d->next_index();
5371 }
5372
5373 void ReadFill(Deserializer* d_) override { Deserializer::Local d(d_); }
5374};
5375
5376#if !defined(DART_PRECOMPILED_RUNTIME)
5378 public:
5380 : SerializationCluster("double",
5381 kDoubleCid,
5382 compiler::target::Double::InstanceSize(),
5383 is_canonical) {}
5385
5386 void Trace(Serializer* s, ObjectPtr object) {
5387 DoublePtr dbl = Double::RawCast(object);
5388 objects_.Add(dbl);
5389 }
5390
5392 const intptr_t count = objects_.length();
5393 s->WriteUnsigned(count);
5394 for (intptr_t i = 0; i < count; i++) {
5395 DoublePtr dbl = objects_[i];
5396 s->AssignRef(dbl);
5397 }
5398 }
5399
5401 const intptr_t count = objects_.length();
5402 for (intptr_t i = 0; i < count; i++) {
5403 DoublePtr dbl = objects_[i];
5404 AutoTraceObject(dbl);
5405 s->Write<double>(dbl->untag()->value_);
5406 }
5407 }
5408
5409 private:
5410 GrowableArray<DoublePtr> objects_;
5411};
5412#endif // !DART_PRECOMPILED_RUNTIME
5413
5416 public:
5417 explicit DoubleDeserializationCluster(bool is_canonical, bool is_root_unit)
5420 is_root_unit) {}
5422
5426
5427 void ReadFill(Deserializer* d_) override {
5429 const bool mark_canonical = is_root_unit_ && is_canonical();
5430 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5431 DoublePtr dbl = static_cast<DoublePtr>(d.Ref(id));
5433 mark_canonical);
5434 dbl->untag()->value_ = d.Read<double>();
5435 }
5436 }
5437};
5438
5439#if !defined(DART_PRECOMPILED_RUNTIME)
5441 public:
5443 : SerializationCluster("Simd128",
5444 cid,
5445 compiler::target::Int32x4::InstanceSize(),
5446 is_canonical) {
5447 ASSERT_EQUAL(compiler::target::Int32x4::InstanceSize(),
5448 compiler::target::Float32x4::InstanceSize());
5449 ASSERT_EQUAL(compiler::target::Int32x4::InstanceSize(),
5450 compiler::target::Float64x2::InstanceSize());
5451 }
5453
5454 void Trace(Serializer* s, ObjectPtr object) { objects_.Add(object); }
5455
5457 const intptr_t count = objects_.length();
5458 s->WriteUnsigned(count);
5459 for (intptr_t i = 0; i < count; i++) {
5460 ObjectPtr vector = objects_[i];
5461 s->AssignRef(vector);
5462 }
5463 }
5464
5466 const intptr_t count = objects_.length();
5467 for (intptr_t i = 0; i < count; i++) {
5468 ObjectPtr vector = objects_[i];
5469 AutoTraceObject(vector);
5472 s->WriteBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5473 sizeof(simd128_value_t));
5474 }
5475 }
5476
5477 private:
5478 GrowableArray<ObjectPtr> objects_;
5479};
5480#endif // !DART_PRECOMPILED_RUNTIME
5481
5484 public:
5486 bool is_canonical,
5487 bool is_root_unit)
5490 is_root_unit),
5491 cid_(cid) {}
5493
5499
5500 void ReadFill(Deserializer* d_) override {
5502 const intptr_t cid = cid_;
5503 const bool mark_canonical = is_root_unit_ && is_canonical();
5504 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5505 ObjectPtr vector = d.Ref(id);
5507 mark_canonical);
5508 d.ReadBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5509 sizeof(simd128_value_t));
5510 }
5511 }
5512
5513 private:
5514 intptr_t cid_;
5515};
5516
5517#if !defined(DART_PRECOMPILED_RUNTIME)
5519 public:
5522 "GrowableObjectArray",
5523 kGrowableObjectArrayCid,
5524 compiler::target::GrowableObjectArray::InstanceSize()) {}
5526
5527 void Trace(Serializer* s, ObjectPtr object) {
5528 GrowableObjectArrayPtr array = GrowableObjectArray::RawCast(object);
5529 objects_.Add(array);
5530 PushFromTo(array);
5531 }
5532
5534 const intptr_t count = objects_.length();
5535 s->WriteUnsigned(count);
5536 for (intptr_t i = 0; i < count; i++) {
5537 GrowableObjectArrayPtr array = objects_[i];
5538 s->AssignRef(array);
5539 }
5540 }
5541
5543 const intptr_t count = objects_.length();
5544 for (intptr_t i = 0; i < count; i++) {
5545 GrowableObjectArrayPtr array = objects_[i];
5546 AutoTraceObject(array);
5547 WriteFromTo(array);
5548 }
5549 }
5550
5551 private:
5553};
5554#endif // !DART_PRECOMPILED_RUNTIME
5555
5557 : public DeserializationCluster {
5558 public:
5562
5566
5567 void ReadFill(Deserializer* d_) override {
5569
5570 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5571 GrowableObjectArrayPtr list =
5572 static_cast<GrowableObjectArrayPtr>(d.Ref(id));
5573 Deserializer::InitializeHeader(list, kGrowableObjectArrayCid,
5575 d.ReadFromTo(list);
5576 }
5577 }
5578};
5579
5580#if !defined(DART_PRECOMPILED_RUNTIME)
5582 public:
5586
5587 void Trace(Serializer* s, ObjectPtr object) {
5588 RecordPtr record = Record::RawCast(object);
5589 objects_.Add(record);
5590
5591 const intptr_t num_fields = Record::NumFields(record);
5592 for (intptr_t i = 0; i < num_fields; ++i) {
5593 s->Push(record->untag()->field(i));
5594 }
5595 }
5596
5598 const intptr_t count = objects_.length();
5599 s->WriteUnsigned(count);
5600 for (intptr_t i = 0; i < count; ++i) {
5601 RecordPtr record = objects_[i];
5602 s->AssignRef(record);
5603 AutoTraceObject(record);
5604 const intptr_t num_fields = Record::NumFields(record);
5605 s->WriteUnsigned(num_fields);
5606 target_memory_size_ += compiler::target::Record::InstanceSize(num_fields);
5607 }
5608 }
5609
5611 const intptr_t count = objects_.length();
5612 for (intptr_t i = 0; i < count; ++i) {
5613 RecordPtr record = objects_[i];
5614 AutoTraceObject(record);
5615 const RecordShape shape(record->untag()->shape());
5616 s->WriteUnsigned(shape.AsInt());
5617 const intptr_t num_fields = shape.num_fields();
5618 for (intptr_t j = 0; j < num_fields; ++j) {
5619 s->WriteElementRef(record->untag()->field(j), j);
5620 }
5621 }
5622 }
5623
5624 private:
5625 GrowableArray<RecordPtr> objects_;
5626};
5627#endif // !DART_PRECOMPILED_RUNTIME
5628
5631 public:
5632 explicit RecordDeserializationCluster(bool is_canonical, bool is_root_unit)
5635 is_root_unit) {}
5637
5638 void ReadAlloc(Deserializer* d) override {
5639 start_index_ = d->next_index();
5640 const intptr_t count = d->ReadUnsigned();
5641 for (intptr_t i = 0; i < count; i++) {
5642 const intptr_t num_fields = d->ReadUnsigned();
5643 d->AssignRef(d->Allocate(Record::InstanceSize(num_fields)));
5644 }
5645 stop_index_ = d->next_index();
5646 }
5647
5648 void ReadFill(Deserializer* d_) override {
5650
5651 const bool stamp_canonical = is_root_unit_ && is_canonical();
5652 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5653 RecordPtr record = static_cast<RecordPtr>(d.Ref(id));
5654 const intptr_t shape = d.ReadUnsigned();
5655 const intptr_t num_fields = RecordShape(shape).num_fields();
5656 Deserializer::InitializeHeader(record, kRecordCid,
5657 Record::InstanceSize(num_fields),
5658 stamp_canonical);
5659 record->untag()->shape_ = Smi::New(shape);
5660 for (intptr_t j = 0; j < num_fields; ++j) {
5661 record->untag()->data()[j] = d.ReadRef();
5662 }
5663 }
5664 }
5665};
5666
5667#if !defined(DART_PRECOMPILED_RUNTIME)
5669 public:
5671 : SerializationCluster("TypedData", cid) {}
5673
5674 void Trace(Serializer* s, ObjectPtr object) {
5675 TypedDataPtr data = TypedData::RawCast(object);
5676 objects_.Add(data);
5677 }
5678
5680 const intptr_t count = objects_.length();
5681 s->WriteUnsigned(count);
5683 for (intptr_t i = 0; i < count; i++) {
5684 TypedDataPtr data = objects_[i];
5685 s->AssignRef(data);
5687 const intptr_t length = Smi::Value(data->untag()->length());
5688 s->WriteUnsigned(length);
5690 compiler::target::TypedData::InstanceSize(length * element_size);
5691 }
5692 }
5693
5695 const intptr_t count = objects_.length();
5697 for (intptr_t i = 0; i < count; i++) {
5698 TypedDataPtr data = objects_[i];
5700 const intptr_t length = Smi::Value(data->untag()->length());
5701 s->WriteUnsigned(length);
5702 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5703 s->WriteBytes(cdata, length * element_size);
5704 }
5705 }
5706
5707 private:
5709};
5710#endif // !DART_PRECOMPILED_RUNTIME
5711
5713 public:
5715 : DeserializationCluster("TypedData"), cid_(cid) {}
5717
5718 void ReadAlloc(Deserializer* d) override {
5719 start_index_ = d->next_index();
5720 const intptr_t count = d->ReadUnsigned();
5722 for (intptr_t i = 0; i < count; i++) {
5723 const intptr_t length = d->ReadUnsigned();
5724 d->AssignRef(d->Allocate(TypedData::InstanceSize(length * element_size)));
5725 }
5726 stop_index_ = d->next_index();
5727 }
5728
5729 void ReadFill(Deserializer* d_) override {
5731
5732 ASSERT(!is_canonical()); // Never canonical.
5734
5735 const intptr_t cid = cid_;
5736 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5737 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5738 const intptr_t length = d.ReadUnsigned();
5739 const intptr_t length_in_bytes = length * element_size;
5741 TypedData::InstanceSize(length_in_bytes));
5742 data->untag()->length_ = Smi::New(length);
5743 data->untag()->RecomputeDataField();
5744 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5745 d.ReadBytes(cdata, length_in_bytes);
5746 }
5747 }
5748
5749 private:
5750 const intptr_t cid_;
5751};
5752
5753#if !defined(DART_PRECOMPILED_RUNTIME)
5755 public:
5757 : SerializationCluster("TypedDataView",
5758 cid,
5759 compiler::target::TypedDataView::InstanceSize()) {}
5761
5762 void Trace(Serializer* s, ObjectPtr object) {
5763 TypedDataViewPtr view = TypedDataView::RawCast(object);
5764 objects_.Add(view);
5765
5766 PushFromTo(view);
5767 }
5768
5770 const intptr_t count = objects_.length();
5771 s->WriteUnsigned(count);
5772 for (intptr_t i = 0; i < count; i++) {
5773 TypedDataViewPtr view = objects_[i];
5774 s->AssignRef(view);
5775 }
5776 }
5777
5779 const intptr_t count = objects_.length();
5780 for (intptr_t i = 0; i < count; i++) {
5781 TypedDataViewPtr view = objects_[i];
5782 AutoTraceObject(view);
5783 WriteFromTo(view);
5784 }
5785 }
5786
5787 private:
5789};
5790#endif // !DART_PRECOMPILED_RUNTIME
5791
5793 public:
5795 : DeserializationCluster("TypedDataView"), cid_(cid) {}
5797
5801
5802 void ReadFill(Deserializer* d_) override {
5804
5805 const intptr_t cid = cid_;
5806 ASSERT(!is_canonical()); // Never canonical.
5807 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5808 TypedDataViewPtr view = static_cast<TypedDataViewPtr>(d.Ref(id));
5810 d.ReadFromTo(view);
5811 }
5812 }
5813
5814 void PostLoad(Deserializer* d, const Array& refs) override {
5815 auto& view = TypedDataView::Handle(d->zone());
5816 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5817 view ^= refs.At(id);
5818 view.RecomputeDataField();
5819 }
5820 }
5821
5822 private:
5823 const intptr_t cid_;
5824};
5825
5826#if !defined(DART_PRECOMPILED_RUNTIME)
5828 public:
5831 "ExternalTypedData",
5832 cid,
5833 compiler::target::ExternalTypedData::InstanceSize()) {}
5835
5836 void Trace(Serializer* s, ObjectPtr object) {
5837 ExternalTypedDataPtr data = ExternalTypedData::RawCast(object);
5838 objects_.Add(data);
5839 }
5840
5842 const intptr_t count = objects_.length();
5843 s->WriteUnsigned(count);
5844 for (intptr_t i = 0; i < count; i++) {
5845 ExternalTypedDataPtr data = objects_[i];
5846 s->AssignRef(data);
5847 }
5848 }
5849
5851 const intptr_t count = objects_.length();
5853 for (intptr_t i = 0; i < count; i++) {
5854 ExternalTypedDataPtr data = objects_[i];
5856 const intptr_t length = Smi::Value(data->untag()->length());
5857 s->WriteUnsigned(length);
5858 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data_);
5860 s->WriteBytes(cdata, length * element_size);
5861 }
5862 }
5863
5864 private:
5866};
5867#endif // !DART_PRECOMPILED_RUNTIME
5868
5870 public:
5872 : DeserializationCluster("ExternalTypedData"), cid_(cid) {}
5874
5878
5879 void ReadFill(Deserializer* d_) override {
5881
5882 ASSERT(!is_canonical()); // Never canonical.
5883 const intptr_t cid = cid_;
5885 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5886 ExternalTypedDataPtr data = static_cast<ExternalTypedDataPtr>(d.Ref(id));
5887 const intptr_t length = d.ReadUnsigned();
5890 data->untag()->length_ = Smi::New(length);
5892 data->untag()->data_ = const_cast<uint8_t*>(d.AddressOfCurrentPosition());
5893 d.Advance(length * element_size);
5894 // No finalizer / external size 0.
5895 }
5896 }
5897
5898 private:
5899 const intptr_t cid_;
5900};
5901
5902#if !defined(DART_PRECOMPILED_RUNTIME)
5904 public:
5906 : SerializationCluster("DeltaEncodedTypedData",
5907 kDeltaEncodedTypedDataCid) {}
5909
5910 void Trace(Serializer* s, ObjectPtr object) {
5911 TypedDataPtr data = TypedData::RawCast(object);
5912 objects_.Add(data);
5913 }
5914
5916 const intptr_t count = objects_.length();
5917 s->WriteUnsigned(count);
5918 for (intptr_t i = 0; i < count; i++) {
5919 const TypedDataPtr data = objects_[i];
5920 const intptr_t element_size =
5921 TypedData::ElementSizeInBytes(data->GetClassId());
5922 s->AssignRef(data);
5924 const intptr_t length_in_bytes =
5925 Smi::Value(data->untag()->length()) * element_size;
5926 s->WriteUnsigned(length_in_bytes);
5928 compiler::target::TypedData::InstanceSize(length_in_bytes);
5929 }
5930 }
5931
5933 const intptr_t count = objects_.length();
5934 TypedData& typed_data = TypedData::Handle(s->zone());
5935 for (intptr_t i = 0; i < count; i++) {
5936 const TypedDataPtr data = objects_[i];
5938 const intptr_t cid = data->GetClassId();
5939 // Only Uint16 and Uint32 typed data is supported at the moment. So encode
5940 // which this is in the low bit of the length. Uint16 is 0, Uint32 is 1.
5941 ASSERT(cid == kTypedDataUint16ArrayCid ||
5942 cid == kTypedDataUint32ArrayCid);
5943 const intptr_t cid_flag = cid == kTypedDataUint16ArrayCid ? 0 : 1;
5944 const intptr_t length = Smi::Value(data->untag()->length());
5945 const intptr_t encoded_length = (length << 1) | cid_flag;
5946 s->WriteUnsigned(encoded_length);
5947 intptr_t prev = 0;
5948 typed_data = data;
5949 for (intptr_t j = 0; j < length; ++j) {
5950 const intptr_t value = (cid == kTypedDataUint16ArrayCid)
5951 ? typed_data.GetUint16(j << 1)
5952 : typed_data.GetUint32(j << 2);
5953 ASSERT(value >= prev);
5954 s->WriteUnsigned(value - prev);
5955 prev = value;
5956 }
5957 }
5958 }
5959
5960 private:
5962};
5963#endif // !DART_PRECOMPILED_RUNTIME
5964
5966 : public DeserializationCluster {
5967 public:
5971
5972 void ReadAlloc(Deserializer* d) override {
5973 start_index_ = d->next_index();
5974 const intptr_t count = d->ReadUnsigned();
5975 for (intptr_t i = 0; i < count; i++) {
5976 const intptr_t length_in_bytes = d->ReadUnsigned();
5977 d->AssignRef(d->Allocate(TypedData::InstanceSize(length_in_bytes)));
5978 }
5979 stop_index_ = d->next_index();
5980 }
5981
5982 void ReadFill(Deserializer* d_) override {
5984 TypedData& typed_data = TypedData::Handle(d_->zone());
5985
5986 ASSERT(!is_canonical()); // Never canonical.
5987
5988 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5989 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5990 const intptr_t encoded_length = d.ReadUnsigned();
5991 const intptr_t length = encoded_length >> 1;
5992 const intptr_t cid = (encoded_length & 0x1) == 0
5993 ? kTypedDataUint16ArrayCid
5994 : kTypedDataUint32ArrayCid;
5996 const intptr_t length_in_bytes = length * element_size;
5998 TypedData::InstanceSize(length_in_bytes));
5999 data->untag()->length_ = Smi::New(length);
6000 data->untag()->RecomputeDataField();
6001 intptr_t value = 0;
6002 typed_data = data;
6003 for (intptr_t j = 0; j < length; ++j) {
6004 value += d.ReadUnsigned();
6005 if (cid == kTypedDataUint16ArrayCid) {
6006 typed_data.SetUint16(j << 1, static_cast<uint16_t>(value));
6007 } else {
6008 typed_data.SetUint32(j << 2, value);
6009 }
6010 }
6011 }
6012 }
6013};
6014
6015#if !defined(DART_PRECOMPILED_RUNTIME)
6017 public:
6019 : SerializationCluster("StackTrace",
6020 kStackTraceCid,
6021 compiler::target::StackTrace::InstanceSize()) {}
6023
6024 void Trace(Serializer* s, ObjectPtr object) {
6025 StackTracePtr trace = StackTrace::RawCast(object);
6026 objects_.Add(trace);
6027 PushFromTo(trace);
6028 }
6029
6031 const intptr_t count = objects_.length();
6032 s->WriteUnsigned(count);
6033 for (intptr_t i = 0; i < count; i++) {
6034 StackTracePtr trace = objects_[i];
6035 s->AssignRef(trace);
6036 }
6037 }
6038
6040 const intptr_t count = objects_.length();
6041 for (intptr_t i = 0; i < count; i++) {
6042 StackTracePtr trace = objects_[i];
6043 AutoTraceObject(trace);
6044 WriteFromTo(trace);
6045 }
6046 }
6047
6048 private:
6050};
6051#endif // !DART_PRECOMPILED_RUNTIME
6052
6054 public:
6057
6061
6062 void ReadFill(Deserializer* d_) override {
6064
6065 ASSERT(!is_canonical()); // Never canonical.
6066 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6067 StackTracePtr trace = static_cast<StackTracePtr>(d.Ref(id));
6068 Deserializer::InitializeHeader(trace, kStackTraceCid,
6070 d.ReadFromTo(trace);
6071 }
6072 }
6073};
6074
6075#if !defined(DART_PRECOMPILED_RUNTIME)
6077 public:
6079 : SerializationCluster("RegExp",
6080 kRegExpCid,
6081 compiler::target::RegExp::InstanceSize()) {}
6083
6084 void Trace(Serializer* s, ObjectPtr object) {
6085 RegExpPtr regexp = RegExp::RawCast(object);
6086 objects_.Add(regexp);
6087 PushFromTo(regexp);
6088 }
6089
6091 const intptr_t count = objects_.length();
6092 s->WriteUnsigned(count);
6093 for (intptr_t i = 0; i < count; i++) {
6094 RegExpPtr regexp = objects_[i];
6095 s->AssignRef(regexp);
6096 }
6097 }
6098
6100 const intptr_t count = objects_.length();
6101 for (intptr_t i = 0; i < count; i++) {
6102 RegExpPtr regexp = objects_[i];
6103 AutoTraceObject(regexp);
6104 WriteFromTo(regexp);
6105 s->Write<int32_t>(regexp->untag()->num_one_byte_registers_);
6106 s->Write<int32_t>(regexp->untag()->num_two_byte_registers_);
6107 s->Write<int8_t>(regexp->untag()->type_flags_);
6108 }
6109 }
6110
6111 private:
6112 GrowableArray<RegExpPtr> objects_;
6113};
6114#endif // !DART_PRECOMPILED_RUNTIME
6115
6117 public:
6120
6124
6125 void ReadFill(Deserializer* d_) override {
6127
6128 ASSERT(!is_canonical()); // Never canonical.
6129 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6130 RegExpPtr regexp = static_cast<RegExpPtr>(d.Ref(id));
6131 Deserializer::InitializeHeader(regexp, kRegExpCid,
6133 d.ReadFromTo(regexp);
6134 regexp->untag()->num_one_byte_registers_ = d.Read<int32_t>();
6135 regexp->untag()->num_two_byte_registers_ = d.Read<int32_t>();
6136 regexp->untag()->type_flags_ = d.Read<int8_t>();
6137 }
6138 }
6139};
6140
6141#if !defined(DART_PRECOMPILED_RUNTIME)
6143 public:
6145 : SerializationCluster("WeakProperty",
6146 kWeakPropertyCid,
6147 compiler::target::WeakProperty::InstanceSize()) {}
6149
6150 void Trace(Serializer* s, ObjectPtr object) {
6151 WeakPropertyPtr property = WeakProperty::RawCast(object);
6152 objects_.Add(property);
6153
6154 s->PushWeak(property->untag()->key());
6155 }
6156
6158 for (intptr_t i = 0; i < objects_.length(); i++) {
6159 WeakPropertyPtr property = objects_[i];
6160 if (s->IsReachable(property->untag()->key())) {
6161 s->Push(property->untag()->value());
6162 }
6163 }
6164 }
6165
6167 const intptr_t count = objects_.length();
6168 s->WriteUnsigned(count);
6169 for (intptr_t i = 0; i < count; i++) {
6170 WeakPropertyPtr property = objects_[i];
6171 s->AssignRef(property);
6172 }
6173 }
6174
6176 const intptr_t count = objects_.length();
6177 for (intptr_t i = 0; i < count; i++) {
6178 WeakPropertyPtr property = objects_[i];
6179 AutoTraceObject(property);
6180 if (s->HasRef(property->untag()->key())) {
6181 s->WriteOffsetRef(property->untag()->key(), WeakProperty::key_offset());
6182 s->WriteOffsetRef(property->untag()->value(),
6184 } else {
6185 s->WriteOffsetRef(Object::null(), WeakProperty::key_offset());
6186 s->WriteOffsetRef(Object::null(), WeakProperty::value_offset());
6187 }
6188 }
6189 }
6190
6191 private:
6193};
6194#endif // !DART_PRECOMPILED_RUNTIME
6195
6197 public:
6201
6205
6206 void ReadFill(Deserializer* d_) override {
6208
6209 ASSERT(!is_canonical()); // Never canonical.
6210 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6211 WeakPropertyPtr property = static_cast<WeakPropertyPtr>(d.Ref(id));
6212 Deserializer::InitializeHeader(property, kWeakPropertyCid,
6214 d.ReadFromTo(property);
6215 property->untag()->next_seen_by_gc_ = WeakProperty::null();
6216 }
6217 }
6218};
6219
6220#if !defined(DART_PRECOMPILED_RUNTIME)
6222 public:
6224 : SerializationCluster("Map",
6225 cid,
6226 compiler::target::Map::InstanceSize(),
6227 is_canonical) {}
6229
6230 void Trace(Serializer* s, ObjectPtr object) {
6231 MapPtr map = Map::RawCast(object);
6232 // We never have mutable hashmaps in snapshots.
6233 ASSERT(map->untag()->IsCanonical());
6234 ASSERT_EQUAL(map.GetClassId(), kConstMapCid);
6235 objects_.Add(map);
6236 PushFromTo(map);
6237 }
6238
6240 const intptr_t count = objects_.length();
6241 s->WriteUnsigned(count);
6242 for (intptr_t i = 0; i < count; i++) {
6243 MapPtr map = objects_[i];
6244 s->AssignRef(map);
6245 }
6246 }
6247
6249 const intptr_t count = objects_.length();
6250 for (intptr_t i = 0; i < count; i++) {
6251 MapPtr map = objects_[i];
6252 AutoTraceObject(map);
6253 WriteFromTo(map);
6254 }
6255 }
6256
6257 private:
6258 GrowableArray<MapPtr> objects_;
6259};
6260#endif // !DART_PRECOMPILED_RUNTIME
6261
6264 public:
6266 bool is_canonical,
6267 bool is_root_unit)
6270 is_root_unit),
6271 cid_(cid) {}
6273
6274 void ReadAlloc(Deserializer* d) override {
6276 }
6277
6278 void ReadFill(Deserializer* d_) override {
6280
6281 const intptr_t cid = cid_;
6282 const bool mark_canonical = is_root_unit_ && is_canonical();
6283 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6284 MapPtr map = static_cast<MapPtr>(d.Ref(id));
6286 mark_canonical);
6287 d.ReadFromTo(map);
6288 }
6289 }
6290
6291 private:
6292 const intptr_t cid_;
6293};
6294
6295#if !defined(DART_PRECOMPILED_RUNTIME)
6297 public:
6299 : SerializationCluster("Set",
6300 cid,
6301 compiler::target::Set::InstanceSize(),
6302 is_canonical) {}
6304
6305 void Trace(Serializer* s, ObjectPtr object) {
6306 SetPtr set = Set::RawCast(object);
6307 // We never have mutable hashsets in snapshots.
6308 ASSERT(set->untag()->IsCanonical());
6309 ASSERT_EQUAL(set.GetClassId(), kConstSetCid);
6310 objects_.Add(set);
6311 PushFromTo(set);
6312 }
6313
6315 const intptr_t count = objects_.length();
6316 s->WriteUnsigned(count);
6317 for (intptr_t i = 0; i < count; i++) {
6318 SetPtr set = objects_[i];
6319 s->AssignRef(set);
6320 }
6321 }
6322
6324 const intptr_t count = objects_.length();
6325 for (intptr_t i = 0; i < count; i++) {
6326 SetPtr set = objects_[i];
6327 AutoTraceObject(set);
6328 WriteFromTo(set);
6329 }
6330 }
6331
6332 private:
6333 GrowableArray<SetPtr> objects_;
6334};
6335#endif // !DART_PRECOMPILED_RUNTIME
6336
6339 public:
6341 bool is_canonical,
6342 bool is_root_unit)
6345 is_root_unit),
6346 cid_(cid) {}
6348
6349 void ReadAlloc(Deserializer* d) override {
6351 }
6352
6353 void ReadFill(Deserializer* d_) override {
6355
6356 const intptr_t cid = cid_;
6357 const bool mark_canonical = is_root_unit_ && is_canonical();
6358 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6359 SetPtr set = static_cast<SetPtr>(d.Ref(id));
6361 mark_canonical);
6362 d.ReadFromTo(set);
6363 }
6364 }
6365
6366 private:
6367 const intptr_t cid_;
6368};
6369
6370#if !defined(DART_PRECOMPILED_RUNTIME)
6372 public:
6376
6377 void Trace(Serializer* s, ObjectPtr object) {
6378 ArrayPtr array = Array::RawCast(object);
6379 objects_.Add(array);
6380
6381 s->Push(array->untag()->type_arguments());
6382 const intptr_t length = Smi::Value(array->untag()->length());
6383 for (intptr_t i = 0; i < length; i++) {
6384 s->Push(array->untag()->element(i));
6385 }
6386 }
6387
6388#if defined(DART_PRECOMPILER)
6389 static bool IsReadOnlyCid(intptr_t cid) {
6390 switch (cid) {
6391 case kPcDescriptorsCid:
6392 case kCodeSourceMapCid:
6393 case kCompressedStackMapsCid:
6394 case kOneByteStringCid:
6395 case kTwoByteStringCid:
6396 return true;
6397 default:
6398 return false;
6399 }
6400 }
6401#endif // defined(DART_PRECOMPILER)
6402
6404#if defined(DART_PRECOMPILER)
6405 if (FLAG_print_array_optimization_candidates) {
6406 intptr_t array_count = objects_.length();
6407 intptr_t array_count_allsmi = 0;
6408 intptr_t array_count_allro = 0;
6409 intptr_t array_count_empty = 0;
6410 intptr_t element_count = 0;
6411 intptr_t element_count_allsmi = 0;
6412 intptr_t element_count_allro = 0;
6413 for (intptr_t i = 0; i < array_count; i++) {
6414 ArrayPtr array = objects_[i];
6415 bool allsmi = true;
6416 bool allro = true;
6417 const intptr_t length = Smi::Value(array->untag()->length());
6418 for (intptr_t i = 0; i < length; i++) {
6419 ObjectPtr element = array->untag()->element(i);
6420 intptr_t cid = element->GetClassIdMayBeSmi();
6421 if (!IsReadOnlyCid(cid)) allro = false;
6422 if (cid != kSmiCid) allsmi = false;
6423 }
6424 element_count += length;
6425 if (length == 0) {
6426 array_count_empty++;
6427 } else if (allsmi) {
6428 array_count_allsmi++;
6429 element_count_allsmi += length;
6430 } else if (allro) {
6431 array_count_allro++;
6432 element_count_allro += length;
6433 }
6434 }
6435 OS::PrintErr("Arrays\n");
6436 OS::PrintErr(" total: %" Pd ", % " Pd " elements\n", array_count,
6437 element_count);
6438 OS::PrintErr(" smi-only:%" Pd ", % " Pd " elements\n",
6439 array_count_allsmi, element_count_allsmi);
6440 OS::PrintErr(" ro-only:%" Pd " , % " Pd " elements\n", array_count_allro,
6441 element_count_allro);
6442 OS::PrintErr(" empty:%" Pd "\n", array_count_empty);
6443 }
6444#endif // defined(DART_PRECOMPILER)
6445
6446 const intptr_t count = objects_.length();
6447 s->WriteUnsigned(count);
6448 for (intptr_t i = 0; i < count; i++) {
6449 ArrayPtr array = objects_[i];
6450 s->AssignRef(array);
6451 AutoTraceObject(array);
6452 const intptr_t length = Smi::Value(array->untag()->length());
6453 s->WriteUnsigned(length);
6454 target_memory_size_ += compiler::target::Array::InstanceSize(length);
6455 }
6456 }
6457
6459 const intptr_t count = objects_.length();
6460 for (intptr_t i = 0; i < count; i++) {
6461 ArrayPtr array = objects_[i];
6462 AutoTraceObject(array);
6463 const intptr_t length = Smi::Value(array->untag()->length());
6464 s->WriteUnsigned(length);
6465 WriteCompressedField(array, type_arguments);
6466 for (intptr_t j = 0; j < length; j++) {
6467 s->WriteElementRef(array->untag()->element(j), j);
6468 }
6469 }
6470 }
6471
6472 private:
6473 GrowableArray<ArrayPtr> objects_;
6474};
6475#endif // !DART_PRECOMPILED_RUNTIME
6476
6479 public:
6481 bool is_canonical,
6482 bool is_root_unit)
6485 is_root_unit),
6486 cid_(cid) {}
6488
6489 void ReadAlloc(Deserializer* d) override {
6490 start_index_ = d->next_index();
6491 const intptr_t count = d->ReadUnsigned();
6492 for (intptr_t i = 0; i < count; i++) {
6493 const intptr_t length = d->ReadUnsigned();
6494 d->AssignRef(d->Allocate(Array::InstanceSize(length)));
6495 }
6496 stop_index_ = d->next_index();
6497 }
6498
6499 void ReadFill(Deserializer* d_) override {
6501
6502 const intptr_t cid = cid_;
6503 const bool stamp_canonical = is_root_unit_ && is_canonical();
6504 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6505 ArrayPtr array = static_cast<ArrayPtr>(d.Ref(id));
6506 const intptr_t length = d.ReadUnsigned();
6508 stamp_canonical);
6510 array->untag()->SetCardRememberedBitUnsynchronized();
6511 }
6512 array->untag()->type_arguments_ =
6513 static_cast<TypeArgumentsPtr>(d.ReadRef());
6514 array->untag()->length_ = Smi::New(length);
6515 for (intptr_t j = 0; j < length; j++) {
6516 array->untag()->data()[j] = d.ReadRef();
6517 }
6518 }
6519 }
6520
6521 private:
6522 const intptr_t cid_;
6523};
6524
6525#if !defined(DART_PRECOMPILED_RUNTIME)
6527 public:
6529 : SerializationCluster("WeakArray", kWeakArrayCid, kSizeVaries) {}
6531
6532 void Trace(Serializer* s, ObjectPtr object) {
6533 WeakArrayPtr array = WeakArray::RawCast(object);
6534 objects_.Add(array);
6535
6536 const intptr_t length = Smi::Value(array->untag()->length());
6537 for (intptr_t i = 0; i < length; i++) {
6538 s->PushWeak(array->untag()->element(i));
6539 }
6540 }
6541
6543 const intptr_t count = objects_.length();
6544 s->WriteUnsigned(count);
6545 for (intptr_t i = 0; i < count; i++) {
6546 WeakArrayPtr array = objects_[i];
6547 s->AssignRef(array);
6548 AutoTraceObject(array);
6549 const intptr_t length = Smi::Value(array->untag()->length());
6550 s->WriteUnsigned(length);
6551 target_memory_size_ += compiler::target::WeakArray::InstanceSize(length);
6552 }
6553 }
6554
6556 const intptr_t count = objects_.length();
6557 for (intptr_t i = 0; i < count; i++) {
6558 WeakArrayPtr array = objects_[i];
6559 AutoTraceObject(array);
6560 const intptr_t length = Smi::Value(array->untag()->length());
6561 s->WriteUnsigned(length);
6562 for (intptr_t j = 0; j < length; j++) {
6563 if (s->HasRef(array->untag()->element(j))) {
6564 s->WriteElementRef(array->untag()->element(j), j);
6565 } else {
6566 s->WriteElementRef(Object::null(), j);
6567 }
6568 }
6569 }
6570 }
6571
6572 private:
6574};
6575#endif // !DART_PRECOMPILED_RUNTIME
6576
6578 public:
6581
6582 void ReadAlloc(Deserializer* d) override {
6583 start_index_ = d->next_index();
6584 const intptr_t count = d->ReadUnsigned();
6585 for (intptr_t i = 0; i < count; i++) {
6586 const intptr_t length = d->ReadUnsigned();
6587 d->AssignRef(d->Allocate(WeakArray::InstanceSize(length)));
6588 }
6589 stop_index_ = d->next_index();
6590 }
6591
6592 void ReadFill(Deserializer* d_) override {
6594
6595 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6596 WeakArrayPtr array = static_cast<WeakArrayPtr>(d.Ref(id));
6597 const intptr_t length = d.ReadUnsigned();
6598 Deserializer::InitializeHeader(array, kWeakArrayCid,
6600 array->untag()->next_seen_by_gc_ = WeakArray::null();
6601 array->untag()->length_ = Smi::New(length);
6602 for (intptr_t j = 0; j < length; j++) {
6603 array->untag()->data()[j] = d.ReadRef();
6604 }
6605 }
6606 }
6607};
6608
6609#if !defined(DART_PRECOMPILED_RUNTIME)
6611 : public CanonicalSetSerializationCluster<CanonicalStringSet,
6612 String,
6613 StringPtr> {
6614 public:
6615 // To distinguish one and two byte strings, we put a bit in the length to
6616 // indicate which it is. The length is an unsigned SMI, so we actually have
6617 // two spare bits available. Keep in sync with DecodeLengthAndCid.
6618 static intptr_t EncodeLengthAndCid(intptr_t length, intptr_t cid) {
6619 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
6620 ASSERT(length <= compiler::target::kSmiMax);
6621 return (length << 1) | (cid == kTwoByteStringCid ? 0x1 : 0x0);
6622 }
6623
6625 bool represents_canonical_set)
6628 represents_canonical_set,
6629 "String",
6630 kSizeVaries) {}
6632
6633 void Trace(Serializer* s, ObjectPtr object) {
6634 StringPtr str = static_cast<StringPtr>(object);
6635 objects_.Add(str);
6636 }
6637
6639 const intptr_t count = objects_.length();
6640 s->WriteUnsigned(count);
6642 for (intptr_t i = 0; i < count; i++) {
6643 StringPtr str = objects_[i];
6644 s->AssignRef(str);
6645 AutoTraceObject(str);
6646 const intptr_t cid = str->GetClassId();
6647 const intptr_t length = Smi::Value(str->untag()->length());
6648 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6649 s->WriteUnsigned(encoded);
6651 cid == kOneByteStringCid
6652 ? compiler::target::OneByteString::InstanceSize(length)
6653 : compiler::target::TwoByteString::InstanceSize(length);
6654 }
6656 }
6657
6659 const intptr_t count = objects_.length();
6660 for (intptr_t i = 0; i < count; i++) {
6661 StringPtr str = objects_[i];
6662 AutoTraceObject(str);
6663 const intptr_t cid = str->GetClassId();
6664 const intptr_t length = Smi::Value(str->untag()->length());
6665 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6666 s->WriteUnsigned(encoded);
6667 if (cid == kOneByteStringCid) {
6668 s->WriteBytes(static_cast<OneByteStringPtr>(str)->untag()->data(),
6669 length);
6670 } else {
6671 s->WriteBytes(reinterpret_cast<uint8_t*>(
6672 static_cast<TwoByteStringPtr>(str)->untag()->data()),
6673 length * 2);
6674 }
6675 }
6676 }
6677};
6678#endif // !DART_PRECOMPILED_RUNTIME
6679
6681 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
6682 public:
6683 static intptr_t DecodeLengthAndCid(intptr_t encoded, intptr_t* out_cid) {
6684 *out_cid = (encoded & 0x1) != 0 ? kTwoByteStringCid : kOneByteStringCid;
6685 return encoded >> 1;
6686 }
6687
6688 static intptr_t InstanceSize(intptr_t length, intptr_t cid) {
6689 return cid == kOneByteStringCid ? OneByteString::InstanceSize(length)
6691 }
6692
6693 explicit StringDeserializationCluster(bool is_canonical, bool is_root_unit)
6695 is_root_unit,
6696 "String") {}
6698
6699 void ReadAlloc(Deserializer* d) override {
6700 start_index_ = d->next_index();
6701 const intptr_t count = d->ReadUnsigned();
6702 for (intptr_t i = 0; i < count; i++) {
6703 const intptr_t encoded = d->ReadUnsigned();
6704 intptr_t cid = 0;
6705 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6706 d->AssignRef(d->Allocate(InstanceSize(length, cid)));
6707 }
6708 stop_index_ = d->next_index();
6710 }
6711
6712 void ReadFill(Deserializer* d_) override {
6714
6715 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6716 StringPtr str = static_cast<StringPtr>(d.Ref(id));
6717 const intptr_t encoded = d.ReadUnsigned();
6718 intptr_t cid = 0;
6719 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6720 const intptr_t instance_size = InstanceSize(length, cid);
6721 // Clean up last two words of the string object to simplify future
6722 // string comparisons.
6723 // Objects are rounded up to two-word size boundary.
6724 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6725 instance_size - 1 * kWordSize) = 0;
6726 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6727 instance_size - 2 * kWordSize) = 0;
6728 Deserializer::InitializeHeader(str, cid, instance_size, is_canonical());
6729#if DART_COMPRESSED_POINTERS
6730 // Gap caused by less-than-a-word length_ smi sitting before data_.
6731 const intptr_t length_offset =
6732 reinterpret_cast<intptr_t>(&str->untag()->length_);
6733 const intptr_t data_offset =
6734 cid == kOneByteStringCid
6735 ? reinterpret_cast<intptr_t>(
6736 static_cast<OneByteStringPtr>(str)->untag()->data())
6737 : reinterpret_cast<intptr_t>(
6738 static_cast<TwoByteStringPtr>(str)->untag()->data());
6739 const intptr_t length_with_gap = data_offset - length_offset;
6740 ASSERT(length_with_gap > kCompressedWordSize);
6741 ASSERT(length_with_gap == kWordSize);
6742 memset(reinterpret_cast<void*>(length_offset), 0, length_with_gap);
6743#endif
6744 str->untag()->length_ = Smi::New(length);
6745
6746 StringHasher hasher;
6747 if (cid == kOneByteStringCid) {
6748 for (intptr_t j = 0; j < length; j++) {
6749 uint8_t code_unit = d.Read<uint8_t>();
6750 static_cast<OneByteStringPtr>(str)->untag()->data()[j] = code_unit;
6751 hasher.Add(code_unit);
6752 }
6753
6754 } else {
6755 for (intptr_t j = 0; j < length; j++) {
6756 uint16_t code_unit = d.Read<uint8_t>();
6757 code_unit = code_unit | (d.Read<uint8_t>() << 8);
6758 static_cast<TwoByteStringPtr>(str)->untag()->data()[j] = code_unit;
6759 hasher.Add(code_unit);
6760 }
6761 }
6762 String::SetCachedHash(str, hasher.Finalize());
6763 }
6764 }
6765
6766 void PostLoad(Deserializer* d, const Array& refs) override {
6767 if (!table_.IsNull()) {
6768 auto object_store = d->isolate_group()->object_store();
6769 VerifyCanonicalSet(d, refs,
6770 WeakArray::Handle(object_store->symbol_table()));
6771 object_store->set_symbol_table(table_);
6772 if (d->isolate_group() == Dart::vm_isolate_group()) {
6773 Symbols::InitFromSnapshot(d->isolate_group());
6774 }
6775#if defined(DEBUG)
6776 Symbols::New(Thread::Current(), ":some:new:symbol:");
6777 ASSERT(object_store->symbol_table() == table_.ptr()); // Did not rehash.
6778#endif
6779 }
6780 }
6781};
6782
6783#if !defined(DART_PRECOMPILED_RUNTIME)
6801#endif // !DART_PRECOMPILED_RUNTIME
6802
6803#if !defined(DART_PRECOMPILED_RUNTIME)
6805 public:
6806 explicit VMSerializationRoots(const WeakArray& symbols,
6807 bool should_write_symbols)
6808 : symbols_(symbols),
6809 should_write_symbols_(should_write_symbols),
6810 zone_(Thread::Current()->zone()) {}
6811
6813 // These objects are always allocated by Object::InitOnce, so they are not
6814 // written into the snapshot.
6815
6816 s->AddBaseObject(Object::null(), "Null", "null");
6817 s->AddBaseObject(Object::sentinel().ptr(), "Null", "sentinel");
6818 s->AddBaseObject(Object::transition_sentinel().ptr(), "Null",
6819 "transition_sentinel");
6820 s->AddBaseObject(Object::optimized_out().ptr(), "Null", "<optimized out>");
6821 s->AddBaseObject(Object::empty_array().ptr(), "Array", "<empty_array>");
6822 s->AddBaseObject(Object::empty_instantiations_cache_array().ptr(), "Array",
6823 "<empty_instantiations_cache_array>");
6824 s->AddBaseObject(Object::empty_subtype_test_cache_array().ptr(), "Array",
6825 "<empty_subtype_test_cache_array>");
6826 s->AddBaseObject(Object::dynamic_type().ptr(), "Type", "<dynamic type>");
6827 s->AddBaseObject(Object::void_type().ptr(), "Type", "<void type>");
6828 s->AddBaseObject(Object::empty_type_arguments().ptr(), "TypeArguments",
6829 "[]");
6830 s->AddBaseObject(Bool::True().ptr(), "bool", "true");
6831 s->AddBaseObject(Bool::False().ptr(), "bool", "false");
6832 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6833 s->AddBaseObject(Object::synthetic_getter_parameter_types().ptr(), "Array",
6834 "<synthetic getter parameter types>");
6835 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6836 s->AddBaseObject(Object::synthetic_getter_parameter_names().ptr(), "Array",
6837 "<synthetic getter parameter names>");
6838 s->AddBaseObject(Object::empty_context_scope().ptr(), "ContextScope",
6839 "<empty>");
6840 s->AddBaseObject(Object::empty_object_pool().ptr(), "ObjectPool",
6841 "<empty>");
6842 s->AddBaseObject(Object::empty_compressed_stackmaps().ptr(),
6843 "CompressedStackMaps", "<empty>");
6844 s->AddBaseObject(Object::empty_descriptors().ptr(), "PcDescriptors",
6845 "<empty>");
6846 s->AddBaseObject(Object::empty_var_descriptors().ptr(),
6847 "LocalVarDescriptors", "<empty>");
6848 s->AddBaseObject(Object::empty_exception_handlers().ptr(),
6849 "ExceptionHandlers", "<empty>");
6850 s->AddBaseObject(Object::empty_async_exception_handlers().ptr(),
6851 "ExceptionHandlers", "<empty async>");
6852
6853 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6854 s->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i],
6855 "ArgumentsDescriptor", "<cached arguments descriptor>");
6856 }
6857 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6858 s->AddBaseObject(ICData::cached_icdata_arrays_[i], "Array",
6859 "<empty icdata entries>");
6860 }
6861
6862 ClassTable* table = s->isolate_group()->class_table();
6864 cid++) {
6865 // Error, CallSiteData has no class object.
6866 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6867 ASSERT(table->HasValidClassAt(cid));
6868 s->AddBaseObject(
6869 table->At(cid), "Class",
6870 Class::Handle(table->At(cid))
6872 }
6873 }
6874 s->AddBaseObject(table->At(kDynamicCid), "Class", "dynamic");
6875 s->AddBaseObject(table->At(kVoidCid), "Class", "void");
6876
6877 if (!Snapshot::IncludesCode(s->kind())) {
6878 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6879 s->AddBaseObject(StubCode::EntryAt(i).ptr());
6880 }
6881 }
6882 }
6883
6885 if (should_write_symbols_) {
6886 s->Push(symbols_.ptr());
6887 } else {
6888 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6889 s->Push(symbols_.At(i));
6890 }
6891 }
6892 if (Snapshot::IncludesCode(s->kind())) {
6893 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6894 s->Push(StubCode::EntryAt(i).ptr());
6895 }
6896 }
6897 }
6898
6900 s->WriteRootRef(should_write_symbols_ ? symbols_.ptr() : Object::null(),
6901 "symbol-table");
6902 if (Snapshot::IncludesCode(s->kind())) {
6903 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6904 s->WriteRootRef(StubCode::EntryAt(i).ptr(),
6905 zone_->PrintToString("Stub:%s", StubCode::NameAt(i)));
6906 }
6907 }
6908
6909 if (!should_write_symbols_ && s->profile_writer() != nullptr) {
6910 // If writing V8 snapshot profile create an artificial node representing
6911 // VM isolate symbol table.
6912 ASSERT(!s->IsReachable(symbols_.ptr()));
6913 s->AssignArtificialRef(symbols_.ptr());
6914 const auto& symbols_snapshot_id = s->GetProfileId(symbols_.ptr());
6915 s->profile_writer()->SetObjectTypeAndName(symbols_snapshot_id, "Symbols",
6916 "vm_symbols");
6917 s->profile_writer()->AddRoot(symbols_snapshot_id);
6918 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6919 s->profile_writer()->AttributeReferenceTo(
6920 symbols_snapshot_id, V8SnapshotProfileWriter::Reference::Element(i),
6921 s->GetProfileId(symbols_.At(i)));
6922 }
6923 }
6924 }
6925
6926 private:
6927 const WeakArray& symbols_;
6928 const bool should_write_symbols_;
6929 Zone* zone_;
6930};
6931#endif // !DART_PRECOMPILED_RUNTIME
6932
6934 public:
6935 VMDeserializationRoots() : symbol_table_(WeakArray::Handle()) {}
6936
6938 // These objects are always allocated by Object::InitOnce, so they are not
6939 // written into the snapshot.
6940
6941 d->AddBaseObject(Object::null());
6942 d->AddBaseObject(Object::sentinel().ptr());
6943 d->AddBaseObject(Object::transition_sentinel().ptr());
6944 d->AddBaseObject(Object::optimized_out().ptr());
6945 d->AddBaseObject(Object::empty_array().ptr());
6946 d->AddBaseObject(Object::empty_instantiations_cache_array().ptr());
6947 d->AddBaseObject(Object::empty_subtype_test_cache_array().ptr());
6948 d->AddBaseObject(Object::dynamic_type().ptr());
6949 d->AddBaseObject(Object::void_type().ptr());
6950 d->AddBaseObject(Object::empty_type_arguments().ptr());
6951 d->AddBaseObject(Bool::True().ptr());
6952 d->AddBaseObject(Bool::False().ptr());
6953 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6954 d->AddBaseObject(Object::synthetic_getter_parameter_types().ptr());
6955 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6956 d->AddBaseObject(Object::synthetic_getter_parameter_names().ptr());
6957 d->AddBaseObject(Object::empty_context_scope().ptr());
6958 d->AddBaseObject(Object::empty_object_pool().ptr());
6959 d->AddBaseObject(Object::empty_compressed_stackmaps().ptr());
6960 d->AddBaseObject(Object::empty_descriptors().ptr());
6961 d->AddBaseObject(Object::empty_var_descriptors().ptr());
6962 d->AddBaseObject(Object::empty_exception_handlers().ptr());
6963 d->AddBaseObject(Object::empty_async_exception_handlers().ptr());
6964
6965 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6966 d->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i]);
6967 }
6968 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6969 d->AddBaseObject(ICData::cached_icdata_arrays_[i]);
6970 }
6971
6972 ClassTable* table = d->isolate_group()->class_table();
6974 cid++) {
6975 // Error, CallSiteData has no class object.
6976 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6977 ASSERT(table->HasValidClassAt(cid));
6978 d->AddBaseObject(table->At(cid));
6979 }
6980 }
6981 d->AddBaseObject(table->At(kDynamicCid));
6982 d->AddBaseObject(table->At(kVoidCid));
6983
6984 if (!Snapshot::IncludesCode(d->kind())) {
6985 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6986 d->AddBaseObject(StubCode::EntryAt(i).ptr());
6987 }
6988 }
6989 }
6990
6991 void ReadRoots(Deserializer* d) override {
6992 symbol_table_ ^= d->ReadRef();
6993 if (!symbol_table_.IsNull()) {
6994 d->isolate_group()->object_store()->set_symbol_table(symbol_table_);
6995 }
6996 if (Snapshot::IncludesCode(d->kind())) {
6997 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6998 Code* code = Code::ReadOnlyHandle();
6999 *code ^= d->ReadRef();
7000 StubCode::EntryAtPut(i, code);
7001 }
7003 }
7004 }
7005
7006 void PostLoad(Deserializer* d, const Array& refs) override {
7007 // Move remaining bump allocation space to the freelist so it used by C++
7008 // allocations (e.g., FinalizeVMIsolate) before allocating new pages.
7009 d->heap()->old_space()->ReleaseBumpAllocation();
7010
7011 if (!symbol_table_.IsNull()) {
7012 Symbols::InitFromSnapshot(d->isolate_group());
7013 }
7014
7016 }
7017
7018 private:
7019 WeakArray& symbol_table_;
7020};
7021
7022#if !defined(DART_PRECOMPILED_RUNTIME)
7023static const char* const kObjectStoreFieldNames[] = {
7024#define DECLARE_OBJECT_STORE_FIELD(Type, Name) #Name,
7034#undef DECLARE_OBJECT_STORE_FIELD
7035};
7036
7038 public:
7039#define RESET_ROOT_LIST(V) \
7040 V(symbol_table, WeakArray, HashTables::New<CanonicalStringSet>(4)) \
7041 V(canonical_types, Array, HashTables::New<CanonicalTypeSet>(4)) \
7042 V(canonical_function_types, Array, \
7043 HashTables::New<CanonicalFunctionTypeSet>(4)) \
7044 V(canonical_record_types, Array, HashTables::New<CanonicalRecordTypeSet>(4)) \
7045 V(canonical_type_arguments, Array, \
7046 HashTables::New<CanonicalTypeArgumentsSet>(4)) \
7047 V(canonical_type_parameters, Array, \
7048 HashTables::New<CanonicalTypeParameterSet>(4)) \
7049 ONLY_IN_PRODUCT(ONLY_IN_AOT( \
7050 V(closure_functions, GrowableObjectArray, GrowableObjectArray::null()))) \
7051 ONLY_IN_AOT(V(closure_functions_table, Array, Array::null())) \
7052 ONLY_IN_AOT(V(canonicalized_stack_map_entries, CompressedStackMaps, \
7053 CompressedStackMaps::null()))
7054
7056 ObjectStore* object_store,
7057 Snapshot::Kind snapshot_kind)
7058 : base_objects_(base_objects),
7059 object_store_(object_store),
7060 snapshot_kind_(snapshot_kind) {
7061#define ONLY_IN_AOT(code) \
7062 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7063 code \
7064 }
7065#define SAVE_AND_RESET_ROOT(name, Type, init) \
7066 do { \
7067 saved_##name##_ = object_store->name(); \
7068 object_store->set_##name(Type::Handle(init)); \
7069 } while (0);
7070
7072#undef SAVE_AND_RESET_ROOT
7073#undef ONLY_IN_AOT
7074 }
7076#define ONLY_IN_AOT(code) \
7077 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7078 code \
7079 }
7080#define RESTORE_ROOT(name, Type, init) \
7081 object_store_->set_##name(saved_##name##_);
7083#undef RESTORE_ROOT
7084#undef ONLY_IN_AOT
7085 }
7086
7088 if (base_objects_ == nullptr) {
7089 // Not writing a new vm isolate: use the one this VM was loaded from.
7090 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7091 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7092 s->AddBaseObject(base_objects.At(i));
7093 }
7094 } else {
7095 // Base objects carried over from WriteVMSnapshot.
7096 for (intptr_t i = 0; i < base_objects_->length(); i++) {
7097 s->AddBaseObject((*base_objects_)[i]->ptr());
7098 }
7099 }
7100 }
7101
7103 ObjectPtr* from = object_store_->from();
7104 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7105 for (ObjectPtr* p = from; p <= to; p++) {
7106 s->Push(*p);
7107 }
7108
7109 FieldTable* initial_field_table =
7110 s->thread()->isolate_group()->initial_field_table();
7111 for (intptr_t i = 0, n = initial_field_table->NumFieldIds(); i < n; i++) {
7112 s->Push(initial_field_table->At(i));
7113 }
7114
7115 dispatch_table_entries_ = object_store_->dispatch_table_code_entries();
7116 // We should only have a dispatch table in precompiled mode.
7117 ASSERT(dispatch_table_entries_.IsNull() || s->kind() == Snapshot::kFullAOT);
7118
7119#if defined(DART_PRECOMPILER)
7120 // We treat the dispatch table as a root object and trace the Code objects
7121 // it references. Otherwise, a non-empty entry could be invalid on
7122 // deserialization if the corresponding Code object was not reachable from
7123 // the existing snapshot roots.
7124 if (!dispatch_table_entries_.IsNull()) {
7125 for (intptr_t i = 0; i < dispatch_table_entries_.Length(); i++) {
7126 s->Push(dispatch_table_entries_.At(i));
7127 }
7128 }
7129#endif
7130 }
7131
7133 ObjectPtr* from = object_store_->from();
7134 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7135 for (ObjectPtr* p = from; p <= to; p++) {
7136 s->WriteRootRef(*p, kObjectStoreFieldNames[p - from]);
7137 }
7138
7139 FieldTable* initial_field_table =
7140 s->thread()->isolate_group()->initial_field_table();
7141 intptr_t n = initial_field_table->NumFieldIds();
7142 s->WriteUnsigned(n);
7143 for (intptr_t i = 0; i < n; i++) {
7144 s->WriteRootRef(initial_field_table->At(i), "some-static-field");
7145 }
7146
7147 // The dispatch table is serialized only for precompiled snapshots.
7148 s->WriteDispatchTable(dispatch_table_entries_);
7149 }
7150
7152 return saved_canonicalized_stack_map_entries_;
7153 }
7154
7155 private:
7156 ZoneGrowableArray<Object*>* const base_objects_;
7157 ObjectStore* const object_store_;
7158 const Snapshot::Kind snapshot_kind_;
7159 Array& dispatch_table_entries_ = Array::Handle();
7160
7161#define ONLY_IN_AOT(code) code
7162#define DECLARE_FIELD(name, Type, init) Type& saved_##name##_ = Type::Handle();
7164#undef DECLARE_FIELD
7165#undef ONLY_IN_AOT
7166};
7167#endif // !DART_PRECOMPILED_RUNTIME
7168
7170 public:
7172 : object_store_(object_store) {}
7173
7175 // N.B.: Skipping index 0 because ref 0 is illegal.
7176 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7177 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7178 d->AddBaseObject(base_objects.At(i));
7179 }
7180 }
7181
7182 void ReadRoots(Deserializer* d) override {
7183 // Read roots.
7184 ObjectPtr* from = object_store_->from();
7185 ObjectPtr* to = object_store_->to_snapshot(d->kind());
7186 for (ObjectPtr* p = from; p <= to; p++) {
7187 *p = d->ReadRef();
7188 }
7189
7190 FieldTable* initial_field_table =
7191 d->thread()->isolate_group()->initial_field_table();
7192 intptr_t n = d->ReadUnsigned();
7193 initial_field_table->AllocateIndex(n - 1);
7194 for (intptr_t i = 0; i < n; i++) {
7195 initial_field_table->SetAt(i, d->ReadRef());
7196 }
7197
7198 // Deserialize dispatch table (when applicable)
7199 d->ReadDispatchTable();
7200 }
7201
7202 void PostLoad(Deserializer* d, const Array& refs) override {
7203 auto isolate_group = d->isolate_group();
7204 { isolate_group->class_table()->CopySizesFromClassObjects(); }
7205 d->heap()->old_space()->EvaluateAfterLoading();
7206
7207 auto object_store = isolate_group->object_store();
7208 const Array& units = Array::Handle(object_store->loading_units());
7209 if (!units.IsNull()) {
7211 unit ^= units.At(LoadingUnit::kRootId);
7212 unit.set_base_objects(refs);
7213 }
7214
7215 // Setup native resolver for bootstrap impl.
7217 }
7218
7219 private:
7220 ObjectStore* object_store_;
7221};
7222
7223#if !defined(DART_PRECOMPILED_RUNTIME)
7225 public:
7227 : unit_(unit) {}
7228
7230 ZoneGrowableArray<Object*>* objects = unit_->parent()->objects();
7231 for (intptr_t i = 0; i < objects->length(); i++) {
7232 s->AddBaseObject(objects->At(i)->ptr());
7233 }
7234 }
7235
7237 for (auto deferred_object : *unit_->deferred_objects()) {
7238 ASSERT(deferred_object->IsCode());
7239 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7240 ObjectPoolPtr pool = code->untag()->object_pool_;
7241 if (pool != ObjectPool::null()) {
7242 const intptr_t length = pool->untag()->length_;
7243 uint8_t* entry_bits = pool->untag()->entry_bits();
7244 for (intptr_t i = 0; i < length; i++) {
7245 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7246 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7247 s->Push(pool->untag()->data()[i].raw_obj_);
7248 }
7249 }
7250 }
7251 s->Push(code->untag()->code_source_map_);
7252 }
7253 }
7254
7256#if defined(DART_PRECOMPILER)
7257 intptr_t start_index = 0;
7258 intptr_t num_deferred_objects = unit_->deferred_objects()->length();
7259 if (num_deferred_objects != 0) {
7260 start_index = s->RefId(unit_->deferred_objects()->At(0)->ptr());
7261 ASSERT(start_index > 0);
7262 }
7263 s->WriteUnsigned(start_index);
7264 s->WriteUnsigned(num_deferred_objects);
7265 for (intptr_t i = 0; i < num_deferred_objects; i++) {
7266 const Object* deferred_object = (*unit_->deferred_objects())[i];
7267 ASSERT(deferred_object->IsCode());
7268 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7269 ASSERT(s->RefId(code) == (start_index + i));
7270 ASSERT(!Code::IsDiscarded(code));
7271 s->WriteInstructions(code->untag()->instructions_,
7272 code->untag()->unchecked_offset_, code, false);
7273 s->WriteRootRef(code->untag()->code_source_map_, "deferred-code");
7274 }
7275
7276 ObjectPoolPtr pool =
7277 s->isolate_group()->object_store()->global_object_pool();
7278 const intptr_t length = pool->untag()->length_;
7279 uint8_t* entry_bits = pool->untag()->entry_bits();
7280 intptr_t last_write = 0;
7281 for (intptr_t i = 0; i < length; i++) {
7282 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7283 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7284 if (s->IsWritten(pool->untag()->data()[i].raw_obj_)) {
7285 intptr_t skip = i - last_write;
7286 s->WriteUnsigned(skip);
7287 s->WriteRootRef(pool->untag()->data()[i].raw_obj_,
7288 "deferred-literal");
7289 last_write = i;
7290 }
7291 }
7292 }
7293 s->WriteUnsigned(length - last_write);
7294#endif
7295 }
7296
7297 private:
7299};
7300#endif // !DART_PRECOMPILED_RUNTIME
7301
7303 public:
7304 explicit UnitDeserializationRoots(const LoadingUnit& unit) : unit_(unit) {}
7305
7307 const Array& base_objects =
7308 Array::Handle(LoadingUnit::Handle(unit_.parent()).base_objects());
7309 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7310 d->AddBaseObject(base_objects.At(i));
7311 }
7312 }
7313
7314 void ReadRoots(Deserializer* d) override {
7315 deferred_start_index_ = d->ReadUnsigned();
7316 deferred_stop_index_ = deferred_start_index_ + d->ReadUnsigned();
7317 for (intptr_t id = deferred_start_index_; id < deferred_stop_index_; id++) {
7318 CodePtr code = static_cast<CodePtr>(d->Ref(id));
7320 d->ReadInstructions(code, /*deferred=*/false);
7321 if (code->untag()->owner_->IsHeapObject() &&
7322 code->untag()->owner_->IsFunction()) {
7323 FunctionPtr func = static_cast<FunctionPtr>(code->untag()->owner_);
7324 uword entry_point = code->untag()->entry_point_;
7325 ASSERT(entry_point != 0);
7326 func->untag()->entry_point_ = entry_point;
7327 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
7328 ASSERT(unchecked_entry_point != 0);
7329 func->untag()->unchecked_entry_point_ = unchecked_entry_point;
7330#if defined(DART_PRECOMPILED_RUNTIME)
7331 if (func->untag()->data()->IsHeapObject() &&
7332 func->untag()->data()->IsClosureData()) {
7333 // For closure functions in bare instructions mode, also update the
7334 // cache inside the static implicit closure object, if any.
7335 auto data = static_cast<ClosureDataPtr>(func->untag()->data());
7336 if (data->untag()->closure() != Closure::null()) {
7337 // Closure functions only have one entry point.
7338 ASSERT_EQUAL(entry_point, unchecked_entry_point);
7339 data->untag()->closure()->untag()->entry_point_ = entry_point;
7340 }
7341 }
7342#endif
7343 }
7344 code->untag()->code_source_map_ =
7345 static_cast<CodeSourceMapPtr>(d->ReadRef());
7346 }
7347
7348 ObjectPoolPtr pool =
7349 d->isolate_group()->object_store()->global_object_pool();
7350 const intptr_t length = pool->untag()->length_;
7351 uint8_t* entry_bits = pool->untag()->entry_bits();
7352 for (intptr_t i = d->ReadUnsigned(); i < length; i += d->ReadUnsigned()) {
7353 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7354 ASSERT(entry_type == ObjectPool::EntryType::kTaggedObject);
7355 // The existing entry will usually be null, but it might also be an
7356 // equivalent object that was duplicated in another loading unit.
7357 pool->untag()->data()[i].raw_obj_ = d->ReadRef();
7358 }
7359
7360 // Reinitialize the dispatch table by rereading the table's serialization
7361 // in the root snapshot.
7362 auto isolate_group = d->isolate_group();
7363 if (isolate_group->dispatch_table_snapshot() != nullptr) {
7364 ReadStream stream(isolate_group->dispatch_table_snapshot(),
7365 isolate_group->dispatch_table_snapshot_size());
7367 isolate_group->object_store()->instructions_tables());
7369 root_table ^= tables.At(0);
7370 d->ReadDispatchTable(&stream, /*deferred=*/true, root_table,
7371 deferred_start_index_, deferred_stop_index_);
7372 }
7373 }
7374
7375 void PostLoad(Deserializer* d, const Array& refs) override {
7376 d->EndInstructions();
7377 unit_.set_base_objects(refs);
7378 }
7379
7380 private:
7381 const LoadingUnit& unit_;
7382 intptr_t deferred_start_index_;
7383 intptr_t deferred_stop_index_;
7384};
7385
7386#if defined(DEBUG)
7387static constexpr int32_t kSectionMarker = 0xABAB;
7388#endif
7389
7391 Snapshot::Kind kind,
7393 ImageWriter* image_writer,
7394 bool vm,
7395 V8SnapshotProfileWriter* profile_writer)
7396 : ThreadStackResource(thread),
7397 heap_(thread->isolate_group()->heap()),
7398 zone_(thread->zone()),
7399 kind_(kind),
7400 stream_(stream),
7401 image_writer_(image_writer),
7402 canonical_clusters_by_cid_(nullptr),
7403 clusters_by_cid_(nullptr),
7404 stack_(),
7405 num_cids_(0),
7406 num_tlc_cids_(0),
7407 num_base_objects_(0),
7408 num_written_objects_(0),
7409 next_ref_index_(kFirstReference),
7410 vm_(vm),
7411 profile_writer_(profile_writer)
7412#if defined(SNAPSHOT_BACKTRACE)
7413 ,
7414 current_parent_(Object::null()),
7415 parent_pairs_()
7416#endif
7417#if defined(DART_PRECOMPILER)
7418 ,
7419 deduped_instructions_sources_(zone_)
7420#endif
7421{
7422 num_cids_ = thread->isolate_group()->class_table()->NumCids();
7423 num_tlc_cids_ = thread->isolate_group()->class_table()->NumTopLevelCids();
7424 canonical_clusters_by_cid_ = new SerializationCluster*[num_cids_];
7425 for (intptr_t i = 0; i < num_cids_; i++) {
7426 canonical_clusters_by_cid_[i] = nullptr;
7427 }
7428 clusters_by_cid_ = new SerializationCluster*[num_cids_];
7429 for (intptr_t i = 0; i < num_cids_; i++) {
7430 clusters_by_cid_[i] = nullptr;
7431 }
7432 if (profile_writer_ != nullptr) {
7433 offsets_table_ = new (zone_) OffsetsTable(zone_);
7434 }
7435}
7436
7438 delete[] canonical_clusters_by_cid_;
7439 delete[] clusters_by_cid_;
7440}
7441
7443 const char* type,
7444 const char* name) {
7445 // Don't assign references to the discarded code.
7446 const bool is_discarded_code = base_object->IsHeapObject() &&
7447 base_object->IsCode() &&
7448 Code::IsDiscarded(Code::RawCast(base_object));
7449 if (!is_discarded_code) {
7450 AssignRef(base_object);
7451 }
7452 num_base_objects_++;
7453
7454 if ((profile_writer_ != nullptr) && (type != nullptr)) {
7455 const auto& profile_id = GetProfileId(base_object);
7456 profile_writer_->SetObjectTypeAndName(profile_id, type, name);
7457 profile_writer_->AddRoot(profile_id);
7458 }
7459}
7460
7462 ASSERT(IsAllocatedReference(next_ref_index_));
7463
7464 // The object id weak table holds image offsets for Instructions instead
7465 // of ref indices.
7466 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7467 heap_->SetObjectId(object, next_ref_index_);
7468 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
7469
7470 objects_->Add(&Object::ZoneHandle(object));
7471
7472 return next_ref_index_++;
7473}
7474
7476 const intptr_t ref = -(next_ref_index_++);
7478 if (object != nullptr) {
7479 ASSERT(!object.IsHeapObject() || !object.IsInstructions());
7480 ASSERT(heap_->GetObjectId(object) == kUnreachableReference);
7481 heap_->SetObjectId(object, ref);
7482 ASSERT(heap_->GetObjectId(object) == ref);
7483 }
7484 return ref;
7485}
7486
7487void Serializer::FlushProfile() {
7488 if (profile_writer_ == nullptr) return;
7489 const intptr_t bytes =
7490 stream_->Position() - object_currently_writing_.last_stream_position_;
7491 profile_writer_->AttributeBytesTo(object_currently_writing_.id_, bytes);
7492 object_currently_writing_.last_stream_position_ = stream_->Position();
7493}
7494
7496 ObjectPtr object) const {
7497 // Instructions are handled separately.
7498 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7499 return GetProfileId(UnsafeRefId(object));
7500}
7501
7503 intptr_t heap_id) const {
7504 if (IsArtificialReference(heap_id)) {
7505 return {IdSpace::kArtificial, -heap_id};
7506 }
7507 ASSERT(IsAllocatedReference(heap_id));
7508 return {IdSpace::kSnapshot, heap_id};
7509}
7510
7512 ObjectPtr object,
7513 const V8SnapshotProfileWriter::Reference& reference) {
7514 if (profile_writer_ == nullptr) return;
7515 const auto& object_id = GetProfileId(object);
7516#if defined(DART_PRECOMPILER)
7517 if (object->IsHeapObject() && object->IsWeakSerializationReference()) {
7518 auto const wsr = WeakSerializationReference::RawCast(object);
7519 auto const target = wsr->untag()->target();
7520 const auto& target_id = GetProfileId(target);
7521 if (object_id != target_id) {
7522 const auto& replacement_id = GetProfileId(wsr->untag()->replacement());
7523 ASSERT(object_id == replacement_id);
7524 // The target of the WSR will be replaced in the snapshot, so write
7525 // attributions for both the dropped target and for the replacement.
7526 profile_writer_->AttributeDroppedReferenceTo(
7527 object_currently_writing_.id_, reference, target_id, replacement_id);
7528 return;
7529 }
7530 // The replacement isn't used for this WSR in the snapshot, as either the
7531 // target is strongly referenced or the WSR itself is unreachable, so fall
7532 // through to attributing a reference to the WSR (which shares the profile
7533 // ID of the target).
7534 }
7535#endif
7536 profile_writer_->AttributeReferenceTo(object_currently_writing_.id_,
7537 reference, object_id);
7538}
7539
7541 Serializer* serializer,
7543 ObjectPtr object)
7544 : serializer_(serializer),
7545 old_object_(serializer->object_currently_writing_.object_),
7546 old_id_(serializer->object_currently_writing_.id_),
7547 old_cid_(serializer->object_currently_writing_.cid_) {
7548 if (serializer_->profile_writer_ == nullptr) return;
7549 // The ID should correspond to one already added appropriately to the
7550 // profile writer.
7551 ASSERT(serializer_->profile_writer_->HasId(id));
7552 serializer_->FlushProfile();
7553 serializer_->object_currently_writing_.object_ = object;
7554 serializer_->object_currently_writing_.id_ = id;
7555 serializer_->object_currently_writing_.cid_ =
7556 object == nullptr ? -1 : object->GetClassIdMayBeSmi();
7557}
7558
7560 if (serializer_->profile_writer_ == nullptr) return;
7561 serializer_->FlushProfile();
7562 serializer_->object_currently_writing_.object_ = old_object_;
7563 serializer_->object_currently_writing_.id_ = old_id_;
7564 serializer_->object_currently_writing_.cid_ = old_cid_;
7565}
7566
7567V8SnapshotProfileWriter::ObjectId Serializer::WritingObjectScope::ReserveId(
7568 Serializer* s,
7569 const char* type,
7570 ObjectPtr obj,
7571 const char* name) {
7572 if (s->profile_writer_ == nullptr) {
7574 }
7575 if (name == nullptr) {
7576 // Handle some cases where there are obvious names to assign.
7577 switch (obj->GetClassIdMayBeSmi()) {
7578 case kSmiCid: {
7579 name = OS::SCreate(s->zone(), "%" Pd "", Smi::Value(Smi::RawCast(obj)));
7580 break;
7581 }
7582 case kMintCid: {
7583 name = OS::SCreate(s->zone(), "%" Pd64 "",
7584 Mint::RawCast(obj)->untag()->value_);
7585 break;
7586 }
7587 case kOneByteStringCid:
7588 case kTwoByteStringCid: {
7589 name = String::ToCString(s->thread(), String::RawCast(obj));
7590 break;
7591 }
7592 }
7593 }
7594 const auto& obj_id = s->GetProfileId(obj);
7595 s->profile_writer_->SetObjectTypeAndName(obj_id, type, name);
7596 return obj_id;
7597}
7598
7599#if !defined(DART_PRECOMPILED_RUNTIME)
7601 ASSERT(profile_writer() != nullptr);
7602
7603 // UnsafeRefId will do lazy reference allocation for WSRs.
7604 intptr_t id = UnsafeRefId(obj);
7606 if (id != kUnreachableReference) {
7607 return IsArtificialReference(id);
7608 }
7609 if (obj->IsHeapObject() && obj->IsWeakSerializationReference()) {
7610 auto const target =
7613 // Since the WSR is unreachable, we can replace its id with whatever the
7614 // ID of the target is, whether real or artificial.
7615 id = heap_->GetObjectId(target);
7616 heap_->SetObjectId(obj, id);
7617 return IsArtificialReference(id);
7618 }
7619
7620 const char* type = nullptr;
7621 const char* name = nullptr;
7623 const classid_t cid = obj->GetClassIdMayBeSmi();
7624 switch (cid) {
7625 // For profiling static call target tables in AOT mode.
7626 case kSmiCid: {
7627 type = "Smi";
7628 break;
7629 }
7630 // For profiling per-code object pools in bare instructions mode.
7631 case kObjectPoolCid: {
7632 type = "ObjectPool";
7633 auto const pool = ObjectPool::RawCast(obj);
7634 for (intptr_t i = 0; i < pool->untag()->length_; i++) {
7635 uint8_t bits = pool->untag()->entry_bits()[i];
7636 if (ObjectPool::TypeBits::decode(bits) ==
7637 ObjectPool::EntryType::kTaggedObject) {
7638 auto const elem = pool->untag()->data()[i].raw_obj_;
7639 // Elements should be reachable from the global object pool.
7640 ASSERT(HasRef(elem));
7642 }
7643 }
7644 break;
7645 }
7646 // For profiling static call target tables and the dispatch table in AOT.
7647 case kImmutableArrayCid:
7648 case kArrayCid: {
7649 type = "Array";
7650 auto const array = Array::RawCast(obj);
7651 for (intptr_t i = 0, n = Smi::Value(array->untag()->length()); i < n;
7652 i++) {
7653 ObjectPtr elem = array->untag()->element(i);
7655 }
7656 break;
7657 }
7658 // For profiling the dispatch table.
7659 case kCodeCid: {
7660 type = "Code";
7661 auto const code = Code::RawCast(obj);
7663 links.Add({code->untag()->owner(),
7665 break;
7666 }
7667 case kFunctionCid: {
7668 FunctionPtr func = static_cast<FunctionPtr>(obj);
7669 type = "Function";
7671 func);
7672 links.Add({func->untag()->owner(),
7674 ObjectPtr data = func->untag()->data();
7675 if (data->GetClassId() == kClosureDataCid) {
7676 links.Add(
7678 }
7679 break;
7680 }
7681 case kClosureDataCid: {
7682 auto data = static_cast<ClosureDataPtr>(obj);
7683 type = "ClosureData";
7684 links.Add(
7685 {data->untag()->parent_function(),
7687 break;
7688 }
7689 case kClassCid: {
7690 ClassPtr cls = static_cast<ClassPtr>(obj);
7691 type = "Class";
7692 name = String::ToCString(thread(), cls->untag()->name());
7693 links.Add({cls->untag()->library(),
7695 break;
7696 }
7697 case kPatchClassCid: {
7698 PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj);
7699 type = "PatchClass";
7700 links.Add(
7701 {patch_cls->untag()->wrapped_class(),
7703 break;
7704 }
7705 case kLibraryCid: {
7706 LibraryPtr lib = static_cast<LibraryPtr>(obj);
7707 type = "Library";
7708 name = String::ToCString(thread(), lib->untag()->url());
7709 break;
7710 }
7711 case kFunctionTypeCid: {
7712 type = "FunctionType";
7713 break;
7714 };
7715 case kRecordTypeCid: {
7716 type = "RecordType";
7717 break;
7718 };
7719 default:
7720 FATAL("Request to create artificial node for object with cid %d", cid);
7721 }
7722
7723 id = AssignArtificialRef(obj);
7724 Serializer::WritingObjectScope scope(this, type, obj, name);
7725 for (const auto& link : links) {
7726 CreateArtificialNodeIfNeeded(link.first);
7727 AttributeReference(link.first, link.second);
7728 }
7729 return true;
7730}
7731#endif // !defined(DART_PRECOMPILED_RUNTIME)
7732
7733intptr_t Serializer::RefId(ObjectPtr object) const {
7734 auto const id = UnsafeRefId(object);
7735 if (IsAllocatedReference(id)) {
7736 return id;
7737 }
7740 auto& handle = thread()->ObjectHandle();
7741 handle = object;
7742 FATAL("Reference to unreachable object %s", handle.ToCString());
7743}
7744
7745intptr_t Serializer::UnsafeRefId(ObjectPtr object) const {
7746 // The object id weak table holds image offsets for Instructions instead
7747 // of ref indices.
7748 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7749 if (!Snapshot::IncludesCode(kind_) &&
7750 object->GetClassIdMayBeSmi() == kCodeCid) {
7751 return RefId(Object::null());
7752 }
7753 auto id = heap_->GetObjectId(object);
7754 if (id != kUnallocatedReference) {
7755 return id;
7756 }
7757 // This is the only case where we may still see unallocated references after
7758 // WriteAlloc is finished.
7759 if (object->IsWeakSerializationReference()) {
7760 // Lazily set the object ID of the WSR to the object which will replace
7761 // it in the snapshot.
7762 auto const wsr = static_cast<WeakSerializationReferencePtr>(object);
7763 // Either the target or the replacement must be allocated, since the
7764 // WSR is reachable.
7765 id = HasRef(wsr->untag()->target()) ? RefId(wsr->untag()->target())
7766 : RefId(wsr->untag()->replacement());
7767 heap_->SetObjectId(wsr, id);
7768 return id;
7769 }
7771 auto& handle = thread()->ObjectHandle();
7772 handle = object;
7773 FATAL("Reference for object %s is unallocated", handle.ToCString());
7774}
7775
7776const char* Serializer::ReadOnlyObjectType(intptr_t cid) {
7777 switch (cid) {
7778 case kPcDescriptorsCid:
7779 return "PcDescriptors";
7780 case kCodeSourceMapCid:
7781 return "CodeSourceMap";
7782 case kCompressedStackMapsCid:
7783 return "CompressedStackMaps";
7784 case kStringCid:
7785 return current_loading_unit_id_ <= LoadingUnit::kRootId
7786 ? "CanonicalString"
7787 : nullptr;
7788 case kOneByteStringCid:
7789 return current_loading_unit_id_ <= LoadingUnit::kRootId
7790 ? "OneByteStringCid"
7791 : nullptr;
7792 case kTwoByteStringCid:
7793 return current_loading_unit_id_ <= LoadingUnit::kRootId
7794 ? "TwoByteStringCid"
7795 : nullptr;
7796 default:
7797 return nullptr;
7798 }
7799}
7800
7802 bool is_canonical) {
7803#if defined(DART_PRECOMPILED_RUNTIME)
7804 UNREACHABLE();
7805 return nullptr;
7806#else
7807 Zone* Z = zone_;
7808 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
7809 Push(isolate_group()->class_table()->At(cid));
7810 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7811 }
7814 }
7817 }
7818 if (IsTypedDataClassId(cid)) {
7819 return new (Z) TypedDataSerializationCluster(cid);
7820 }
7821
7822#if !defined(DART_COMPRESSED_POINTERS)
7823 // Sometimes we write memory images for read-only objects that contain no
7824 // pointers. These can be mmapped directly, needing no relocation, and added
7825 // to the list of heap pages. This gives us lazy/demand paging from the OS.
7826 // We do not do this for snapshots without code to keep snapshots portable
7827 // between machines with different word sizes. We do not do this when we use
7828 // compressed pointers because we cannot always control the load address of
7829 // the memory image, and it might be outside the 4GB region addressable by
7830 // compressed pointers.
7831 if (Snapshot::IncludesCode(kind_)) {
7832 if (auto const type = ReadOnlyObjectType(cid)) {
7833 return new (Z) RODataSerializationCluster(Z, type, cid, is_canonical);
7834 }
7835 }
7836#endif
7837
7838 const bool cluster_represents_canonical_set =
7839 current_loading_unit_id_ <= LoadingUnit::kRootId && is_canonical;
7840
7841 switch (cid) {
7842 case kClassCid:
7843 return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
7844 case kTypeParametersCid:
7846 case kTypeArgumentsCid:
7848 is_canonical, cluster_represents_canonical_set);
7849 case kPatchClassCid:
7850 return new (Z) PatchClassSerializationCluster();
7851 case kFunctionCid:
7852 return new (Z) FunctionSerializationCluster();
7853 case kClosureDataCid:
7854 return new (Z) ClosureDataSerializationCluster();
7855 case kFfiTrampolineDataCid:
7857 case kFieldCid:
7858 return new (Z) FieldSerializationCluster();
7859 case kScriptCid:
7860 return new (Z) ScriptSerializationCluster();
7861 case kLibraryCid:
7862 return new (Z) LibrarySerializationCluster();
7863 case kNamespaceCid:
7864 return new (Z) NamespaceSerializationCluster();
7865 case kKernelProgramInfoCid:
7867 case kCodeCid:
7868 return new (Z) CodeSerializationCluster(heap_);
7869 case kObjectPoolCid:
7870 return new (Z) ObjectPoolSerializationCluster();
7871 case kPcDescriptorsCid:
7872 return new (Z) PcDescriptorsSerializationCluster();
7873 case kCodeSourceMapCid:
7874 return new (Z) CodeSourceMapSerializationCluster();
7875 case kCompressedStackMapsCid:
7877 case kExceptionHandlersCid:
7879 case kContextCid:
7880 return new (Z) ContextSerializationCluster();
7881 case kContextScopeCid:
7882 return new (Z) ContextScopeSerializationCluster();
7883 case kUnlinkedCallCid:
7884 return new (Z) UnlinkedCallSerializationCluster();
7885 case kICDataCid:
7886 return new (Z) ICDataSerializationCluster();
7887 case kMegamorphicCacheCid:
7889 case kSubtypeTestCacheCid:
7891 case kLoadingUnitCid:
7892 return new (Z) LoadingUnitSerializationCluster();
7893 case kLanguageErrorCid:
7894 return new (Z) LanguageErrorSerializationCluster();
7895 case kUnhandledExceptionCid:
7897 case kLibraryPrefixCid:
7898 return new (Z) LibraryPrefixSerializationCluster();
7899 case kTypeCid:
7900 return new (Z) TypeSerializationCluster(is_canonical,
7901 cluster_represents_canonical_set);
7902 case kFunctionTypeCid:
7904 is_canonical, cluster_represents_canonical_set);
7905 case kRecordTypeCid:
7906 return new (Z) RecordTypeSerializationCluster(
7907 is_canonical, cluster_represents_canonical_set);
7908 case kTypeParameterCid:
7910 is_canonical, cluster_represents_canonical_set);
7911 case kClosureCid:
7912 return new (Z) ClosureSerializationCluster(is_canonical);
7913 case kMintCid:
7914 return new (Z) MintSerializationCluster(is_canonical);
7915 case kDoubleCid:
7916 return new (Z) DoubleSerializationCluster(is_canonical);
7917 case kInt32x4Cid:
7918 case kFloat32x4Cid:
7919 case kFloat64x2Cid:
7920 return new (Z) Simd128SerializationCluster(cid, is_canonical);
7921 case kGrowableObjectArrayCid:
7923 case kRecordCid:
7924 return new (Z) RecordSerializationCluster(is_canonical);
7925 case kStackTraceCid:
7926 return new (Z) StackTraceSerializationCluster();
7927 case kRegExpCid:
7928 return new (Z) RegExpSerializationCluster();
7929 case kWeakPropertyCid:
7930 return new (Z) WeakPropertySerializationCluster();
7931 case kMapCid:
7932 // We do not have mutable hash maps in snapshots.
7933 UNREACHABLE();
7934 case kConstMapCid:
7935 return new (Z) MapSerializationCluster(is_canonical, kConstMapCid);
7936 case kSetCid:
7937 // We do not have mutable hash sets in snapshots.
7938 UNREACHABLE();
7939 case kConstSetCid:
7940 return new (Z) SetSerializationCluster(is_canonical, kConstSetCid);
7941 case kArrayCid:
7942 return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
7943 case kImmutableArrayCid:
7944 return new (Z)
7945 ArraySerializationCluster(is_canonical, kImmutableArrayCid);
7946 case kWeakArrayCid:
7947 return new (Z) WeakArraySerializationCluster();
7948 case kStringCid:
7949 return new (Z) StringSerializationCluster(
7950 is_canonical, cluster_represents_canonical_set && !vm_);
7951#define CASE_FFI_CID(name) case kFfi##name##Cid:
7953#undef CASE_FFI_CID
7954 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7955 case kDeltaEncodedTypedDataCid:
7957 case kWeakSerializationReferenceCid:
7958#if defined(DART_PRECOMPILER)
7959 ASSERT(kind_ == Snapshot::kFullAOT);
7960 return new (Z) WeakSerializationReferenceSerializationCluster();
7961#endif
7962 default:
7963 break;
7964 }
7965
7966 // The caller will check for nullptr and provide an error with more context
7967 // than is available here.
7968 return nullptr;
7969#endif // !DART_PRECOMPILED_RUNTIME
7970}
7971
7973 if (loading_units_ == nullptr) return true;
7974
7975 intptr_t unit_id = heap_->GetLoadingUnit(obj);
7976 if (unit_id == WeakTable::kNoValue) {
7977 FATAL("Missing loading unit assignment: %s\n",
7978 Object::Handle(obj).ToCString());
7979 }
7980 return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
7981}
7982
7984 const intptr_t unit_id = heap_->GetLoadingUnit(code);
7985 ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
7986 (*loading_units_)[unit_id]->AddDeferredObject(code);
7987}
7988
7989#if !defined(DART_PRECOMPILED_RUNTIME)
7990#if defined(DART_PRECOMPILER)
7991// We use the following encoding schemes when encoding references to Code
7992// objects.
7993//
7994// In AOT mode:
7995//
7996// 0 -- LazyCompile stub
7997// 1 -+
7998// | for non-root-unit/non-VM snapshots
7999// ... > reference into parent snapshot objects
8000// | (base is num_base_objects_ in this case, 0 otherwise).
8001// base -+
8002// base + 1 -+
8003// | for non-deferred Code objects (those with instructions)
8004// > index in into the instructions table (code_index_).
8005// | (L is code_index_.Length()).
8006// base + L -+
8007// ... -+
8008// | for deferred Code objects (those without instructions)
8009// > index of this Code object in the deferred part of the
8010// | Code cluster.
8011//
8012// Note that this encoding has the following property: non-discarded
8013// non-deferred Code objects form the tail of the instruction table
8014// which makes indices assigned to non-discarded non-deferred Code objects
8015// and deferred Code objects continuous. This means when decoding
8016// code_index - (base + 1) - first_entry_with_code yields an index of the
8017// Code object in the Code cluster both for non-deferred and deferred
8018// Code objects.
8019//
8020// For JIT snapshots we do:
8021//
8022// 0 -- LazyCompile stub
8023// 1 -+
8024// |
8025// ... > index of the Code object in the Code cluster.
8026// |
8027//
8028intptr_t Serializer::GetCodeIndex(CodePtr code) {
8029 // In the precompiled mode Code object is uniquely identified by its
8030 // instructions (because ProgramVisitor::DedupInstructions will dedup Code
8031 // objects with the same instructions).
8032 if (code == StubCode::LazyCompile().ptr() && !vm_) {
8033 return 0;
8034 } else if (FLAG_precompiled_mode) {
8035 const intptr_t ref = heap_->GetObjectId(code);
8037
8038 const intptr_t base =
8040 ? 0
8041 : num_base_objects_;
8042
8043 // Check if we are referring to the Code object which originates from the
8044 // parent loading unit. In this case we write out the reference of this
8045 // object.
8046 if (!Code::IsDiscarded(code) && ref < base) {
8048 return 1 + ref;
8049 }
8050
8051 // Otherwise the code object must either be discarded or originate from
8052 // the Code cluster.
8053 ASSERT(Code::IsDiscarded(code) || (code_cluster_->first_ref() <= ref &&
8054 ref <= code_cluster_->last_ref()));
8055
8056 // If Code object is non-deferred then simply write out the index of the
8057 // entry point, otherwise write out the index of the deferred code object.
8058 if (ref < code_cluster_->first_deferred_ref()) {
8059 const intptr_t key = static_cast<intptr_t>(code->untag()->instructions_);
8060 ASSERT(code_index_.HasKey(key));
8061 const intptr_t result = code_index_.Lookup(key);
8062 ASSERT(0 < result && result <= code_index_.Length());
8063 // Note: result already has + 1.
8064 return base + result;
8065 } else {
8066 // Note: only root snapshot can have deferred Code objects in the
8067 // cluster.
8068 const intptr_t cluster_index = ref - code_cluster_->first_deferred_ref();
8069 return 1 + base + code_index_.Length() + cluster_index;
8070 }
8071 } else {
8072 const intptr_t ref = heap_->GetObjectId(code);
8074 ASSERT(code_cluster_->first_ref() <= ref &&
8075 ref <= code_cluster_->last_ref());
8076 return 1 + (ref - code_cluster_->first_ref());
8077 }
8078}
8079#endif // defined(DART_PRECOMPILER)
8080
8082 const CompressedStackMaps& canonical_stack_map_entries) {
8083 if (!Snapshot::IncludesCode(kind())) return;
8084
8085 // Code objects that have identical/duplicate instructions must be adjacent in
8086 // the order that Code objects are written because the encoding of the
8087 // reference from the Code to the Instructions assumes monotonically
8088 // increasing offsets as part of a delta encoding. Also the code order table
8089 // that allows for mapping return addresses back to Code objects depends on
8090 // this sorting.
8091 if (code_cluster_ != nullptr) {
8092 CodeSerializationCluster::Sort(this, code_cluster_->objects());
8093 }
8094 if ((loading_units_ != nullptr) &&
8095 (current_loading_unit_id_ == LoadingUnit::kRootId)) {
8096 for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units_->length();
8097 i++) {
8098 auto unit_objects = loading_units_->At(i)->deferred_objects();
8099 CodeSerializationCluster::Sort(this, unit_objects);
8100 ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
8101 for (intptr_t j = 0; j < unit_objects->length(); j++) {
8102 code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
8103 }
8104 }
8105 }
8106
8107#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8108 if (kind() == Snapshot::kFullAOT) {
8109 // Group the code objects whose instructions are not being deferred in this
8110 // snapshot unit in the order they will be written: first the code objects
8111 // encountered for this first time in this unit being written by the
8112 // CodeSerializationCluster, then code object previously deferred whose
8113 // instructions are now written by UnitSerializationRoots. This order needs
8114 // to be known to finalize bare-instructions-mode's PC-relative calls.
8115 GrowableArray<CodePtr> code_objects;
8116 if (code_cluster_ != nullptr) {
8117 auto in = code_cluster_->objects();
8118 for (intptr_t i = 0; i < in->length(); i++) {
8119 code_objects.Add(in->At(i));
8120 }
8121 }
8122 if (loading_units_ != nullptr) {
8123 auto in =
8124 loading_units_->At(current_loading_unit_id_)->deferred_objects();
8125 for (intptr_t i = 0; i < in->length(); i++) {
8126 code_objects.Add(in->At(i)->ptr());
8127 }
8128 }
8129
8130 GrowableArray<ImageWriterCommand> writer_commands;
8131 RelocateCodeObjects(vm_, &code_objects, &writer_commands);
8132 image_writer_->PrepareForSerialization(&writer_commands);
8133
8134 if (code_objects.length() == 0) {
8135 return;
8136 }
8137
8138 // Build UntaggedInstructionsTable::Data object to be added to the
8139 // read-only data section of the snapshot. It contains:
8140 //
8141 // - a binary search table mapping an Instructions entry point to its
8142 // stack maps (by offset from the beginning of the Data object);
8143 // - followed by stack maps bytes;
8144 // - followed by canonical stack map entries.
8145 //
8146 struct StackMapInfo : public ZoneAllocated {
8147 CompressedStackMapsPtr map;
8148 intptr_t use_count;
8149 uint32_t offset;
8150 };
8151
8153 IntMap<StackMapInfo*> stack_maps_info;
8154
8155 // Build code_index_ (which maps Instructions object to the order in
8156 // which they appear in the code section in the end) and collect all
8157 // stack maps.
8158 // We also find the first Instructions object which is going to have
8159 // Code object associated with it. This will allow to reduce the binary
8160 // search space when searching specifically for the code object in runtime.
8161 uint32_t total = 0;
8162 intptr_t not_discarded_count = 0;
8163 uint32_t first_entry_with_code = 0;
8164 for (auto& cmd : writer_commands) {
8166 RELEASE_ASSERT(code_objects[total] ==
8167 cmd.insert_instruction_of_code.code);
8168 ASSERT(!Code::IsDiscarded(cmd.insert_instruction_of_code.code) ||
8169 (not_discarded_count == 0));
8170 if (!Code::IsDiscarded(cmd.insert_instruction_of_code.code)) {
8171 if (not_discarded_count == 0) {
8172 first_entry_with_code = total;
8173 }
8174 not_discarded_count++;
8175 }
8176 total++;
8177
8178 // Update code_index_.
8179 {
8180 const intptr_t instr = static_cast<intptr_t>(
8181 cmd.insert_instruction_of_code.code->untag()->instructions_);
8182 ASSERT(!code_index_.HasKey(instr));
8183 code_index_.Insert(instr, total);
8184 }
8185
8186 // Collect stack maps.
8187 CompressedStackMapsPtr stack_map =
8188 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8189 const intptr_t key = static_cast<intptr_t>(stack_map);
8190
8191 if (stack_maps_info.HasKey(key)) {
8192 stack_maps_info.Lookup(key)->use_count++;
8193 } else {
8194 auto info = new StackMapInfo();
8195 info->map = stack_map;
8196 info->use_count = 1;
8197 stack_maps.Add(info);
8198 stack_maps_info.Insert(key, info);
8199 }
8200 }
8201 }
8202 ASSERT(static_cast<intptr_t>(total) == code_index_.Length());
8203 instructions_table_len_ = not_discarded_count;
8204
8205 // Sort stack maps by usage so that most commonly used stack maps are
8206 // together at the start of the Data object.
8207 stack_maps.Sort([](StackMapInfo* const* a, StackMapInfo* const* b) {
8208 if ((*a)->use_count < (*b)->use_count) return 1;
8209 if ((*a)->use_count > (*b)->use_count) return -1;
8210 return 0;
8211 });
8212
8213 // Build Data object.
8214 MallocWriteStream pc_mapping(4 * KB);
8215
8216 // Write the header out.
8217 {
8218 UntaggedInstructionsTable::Data header;
8219 memset(&header, 0, sizeof(header));
8220 header.length = total;
8221 header.first_entry_with_code = first_entry_with_code;
8222 pc_mapping.WriteFixed<UntaggedInstructionsTable::Data>(header);
8223 }
8224
8225 // Reserve space for the binary search table.
8226 for (auto& cmd : writer_commands) {
8228 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>({0, 0});
8229 }
8230 }
8231
8232 // Now write collected stack maps after the binary search table.
8233 auto write_stack_map = [&](CompressedStackMapsPtr smap) {
8234 const auto flags_and_size = smap->untag()->payload()->flags_and_size();
8235 const auto payload_size =
8237 pc_mapping.WriteFixed<uint32_t>(flags_and_size);
8238 pc_mapping.WriteBytes(smap->untag()->payload()->data(), payload_size);
8239 };
8240
8241 for (auto sm : stack_maps) {
8242 sm->offset = pc_mapping.bytes_written();
8243 write_stack_map(sm->map);
8244 }
8245
8246 // Write canonical entries (if any).
8247 if (!canonical_stack_map_entries.IsNull()) {
8248 auto header = reinterpret_cast<UntaggedInstructionsTable::Data*>(
8249 pc_mapping.buffer());
8250 header->canonical_stack_map_entries_offset = pc_mapping.bytes_written();
8251 write_stack_map(canonical_stack_map_entries.ptr());
8252 }
8253 const auto total_bytes = pc_mapping.bytes_written();
8254
8255 // Now that we have offsets to all stack maps we can write binary
8256 // search table.
8257 pc_mapping.SetPosition(
8258 sizeof(UntaggedInstructionsTable::Data)); // Skip the header.
8259 for (auto& cmd : writer_commands) {
8261 CompressedStackMapsPtr smap =
8262 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8263 const auto offset =
8264 stack_maps_info.Lookup(static_cast<intptr_t>(smap))->offset;
8265 const auto entry = image_writer_->GetTextOffsetFor(
8266 Code::InstructionsOf(cmd.insert_instruction_of_code.code),
8267 cmd.insert_instruction_of_code.code);
8268
8269 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>(
8270 {static_cast<uint32_t>(entry), offset});
8271 }
8272 }
8273 // Restore position so that Steal does not truncate the buffer.
8274 pc_mapping.SetPosition(total_bytes);
8275
8276 intptr_t length = 0;
8277 uint8_t* bytes = pc_mapping.Steal(&length);
8278
8279 instructions_table_rodata_offset_ =
8280 image_writer_->AddBytesToData(bytes, length);
8281 // Attribute all bytes in this object to the root for simplicity.
8282 if (profile_writer_ != nullptr) {
8283 const auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8284 profile_writer_->AttributeReferenceTo(
8287 "<instructions-table-rodata>"),
8288 {offset_space, instructions_table_rodata_offset_});
8289 }
8290 }
8291#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8292}
8293
8294void Serializer::WriteInstructions(InstructionsPtr instr,
8295 uint32_t unchecked_offset,
8296 CodePtr code,
8297 bool deferred) {
8298 ASSERT(code != Code::null());
8299
8300 ASSERT(InCurrentLoadingUnitOrRoot(code) != deferred);
8301 if (deferred) {
8302 return;
8303 }
8304
8305 const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code);
8306#if defined(DART_PRECOMPILER)
8307 if (profile_writer_ != nullptr) {
8308 ASSERT(object_currently_writing_.id_ !=
8310 const auto offset_space = vm_ ? IdSpace::kVmText : IdSpace::kIsolateText;
8311 profile_writer_->AttributeReferenceTo(
8312 object_currently_writing_.id_,
8314 {offset_space, offset});
8315 }
8316
8317 if (Code::IsDiscarded(code)) {
8318 // Discarded Code objects are not supported in the vm isolate snapshot.
8319 ASSERT(!vm_);
8320 return;
8321 }
8322
8323 if (FLAG_precompiled_mode) {
8324 const uint32_t payload_info =
8325 (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0);
8326 WriteUnsigned(payload_info);
8327 return;
8328 }
8329#endif
8330 Write<uint32_t>(offset);
8331 WriteUnsigned(unchecked_offset);
8332}
8333
8335 if (profile_writer_ == nullptr) return;
8336 // ROData cannot be roots.
8337 ASSERT(object_currently_writing_.id_ !=
8339 auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8340 // TODO(sjindel): Give this edge a more appropriate type than element
8341 // (internal, maybe?).
8342 profile_writer_->AttributeReferenceTo(
8343 object_currently_writing_.id_,
8344 V8SnapshotProfileWriter::Reference::Element(0), {offset_space, offset});
8345}
8346
8347uint32_t Serializer::GetDataOffset(ObjectPtr object) const {
8348#if defined(SNAPSHOT_BACKTRACE)
8349 return image_writer_->GetDataOffsetFor(object, ParentOf(object));
8350#else
8351 return image_writer_->GetDataOffsetFor(object);
8352#endif
8353}
8354
8355intptr_t Serializer::GetDataSize() const {
8356 if (image_writer_ == nullptr) {
8357 return 0;
8358 }
8359 return image_writer_->data_size();
8360}
8361#endif // !defined(DART_PRECOMPILED_RUNTIME)
8362
8363void Serializer::Push(ObjectPtr object, intptr_t cid_override) {
8364 const bool is_code = object->IsHeapObject() && object->IsCode();
8365 if (is_code && !Snapshot::IncludesCode(kind_)) {
8366 return; // Do not trace, will write null.
8367 }
8368
8369 intptr_t id = heap_->GetObjectId(object);
8370 if (id == kUnreachableReference) {
8371 // When discovering the transitive closure of objects reachable from the
8372 // roots we do not trace references, e.g. inside [RawCode], to
8373 // [RawInstructions], since [RawInstructions] doesn't contain any references
8374 // and the serialization code uses an [ImageWriter] for those.
8375 if (object->IsHeapObject() && object->IsInstructions()) {
8376 UnexpectedObject(object,
8377 "Instructions should only be reachable from Code");
8378 }
8379
8380 heap_->SetObjectId(object, kUnallocatedReference);
8381 ASSERT(IsReachableReference(heap_->GetObjectId(object)));
8382 stack_.Add({object, cid_override});
8383 if (!(is_code && Code::IsDiscarded(Code::RawCast(object)))) {
8384 num_written_objects_++;
8385 }
8386#if defined(SNAPSHOT_BACKTRACE)
8387 parent_pairs_.Add(&Object::Handle(zone_, object));
8388 parent_pairs_.Add(&Object::Handle(zone_, current_parent_));
8389#endif
8390 }
8391}
8392
8394 // The GC considers immediate objects to always be alive. This doesn't happen
8395 // automatically in the serializer because the serializer does not have
8396 // immediate objects: it handles Smis as ref indices like all other objects.
8397 // This visit causes the serializer to reproduce the GC's semantics for
8398 // weakness, which in particular allows the templates in hash_table.h to work
8399 // with weak arrays because the metadata Smis always survive.
8400 if (!object->IsHeapObject() || vm_) {
8401 Push(object);
8402 }
8403}
8404
8405void Serializer::Trace(ObjectPtr object, intptr_t cid_override) {
8406 intptr_t cid;
8407 bool is_canonical;
8408 if (!object->IsHeapObject()) {
8409 // Smis are merged into the Mint cluster because Smis for the writer might
8410 // become Mints for the reader and vice versa.
8411 cid = kMintCid;
8412 is_canonical = true;
8413 } else {
8414 cid = object->GetClassId();
8415 is_canonical = object->untag()->IsCanonical();
8416 }
8417 if (cid_override != kIllegalCid) {
8418 cid = cid_override;
8419 } else if (IsStringClassId(cid)) {
8420 cid = kStringCid;
8421 }
8422
8423 SerializationCluster** cluster_ref =
8424 is_canonical ? &canonical_clusters_by_cid_[cid] : &clusters_by_cid_[cid];
8425 if (*cluster_ref == nullptr) {
8426 *cluster_ref = NewClusterForClass(cid, is_canonical);
8427 if (*cluster_ref == nullptr) {
8428 UnexpectedObject(object, "No serialization cluster defined");
8429 }
8430 }
8431 SerializationCluster* cluster = *cluster_ref;
8432 ASSERT(cluster != nullptr);
8433 if (cluster->is_canonical() != is_canonical) {
8434 FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
8435 cluster->name(), cid,
8436 cluster->is_canonical() ? "marked" : "not marked",
8437 is_canonical ? "should be" : "should not be");
8438 }
8439
8440#if defined(SNAPSHOT_BACKTRACE)
8441 current_parent_ = object;
8442#endif
8443
8444 cluster->Trace(this, object);
8445
8446#if defined(SNAPSHOT_BACKTRACE)
8447 current_parent_ = Object::null();
8448#endif
8449}
8450
8451void Serializer::UnexpectedObject(ObjectPtr raw_object, const char* message) {
8452 // Exit the no safepoint scope so we can allocate while printing.
8453 while (thread()->no_safepoint_scope_depth() > 0) {
8455 }
8456 Object& object = Object::Handle(raw_object);
8457 OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n", message,
8458 Snapshot::KindToCString(kind_), static_cast<uword>(object.ptr()),
8459 object.ToCString());
8460#if defined(SNAPSHOT_BACKTRACE)
8461 while (!object.IsNull()) {
8462 object = ParentOf(object);
8463 OS::PrintErr("referenced by 0x%" Px " %s\n",
8464 static_cast<uword>(object.ptr()), object.ToCString());
8465 }
8466#endif
8467 OS::Abort();
8468}
8469
8470#if defined(SNAPSHOT_BACKTRACE)
8471ObjectPtr Serializer::ParentOf(ObjectPtr object) const {
8472 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8473 if (parent_pairs_[i]->ptr() == object) {
8474 return parent_pairs_[i + 1]->ptr();
8475 }
8476 }
8477 return Object::null();
8478}
8479
8480ObjectPtr Serializer::ParentOf(const Object& object) const {
8481 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8482 if (parent_pairs_[i]->ptr() == object.ptr()) {
8483 return parent_pairs_[i + 1]->ptr();
8484 }
8485 }
8486 return Object::null();
8487}
8488#endif // SNAPSHOT_BACKTRACE
8489
8490void Serializer::WriteVersionAndFeatures(bool is_vm_snapshot) {
8491 const char* expected_version = Version::SnapshotString();
8492 ASSERT(expected_version != nullptr);
8493 const intptr_t version_len = strlen(expected_version);
8494 WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len);
8495
8496 char* expected_features =
8497 Dart::FeaturesString(IsolateGroup::Current(), is_vm_snapshot, kind_);
8498 ASSERT(expected_features != nullptr);
8499 const intptr_t features_len = strlen(expected_features);
8500 WriteBytes(reinterpret_cast<const uint8_t*>(expected_features),
8501 features_len + 1);
8502 free(expected_features);
8503}
8504
8505#if !defined(DART_PRECOMPILED_RUNTIME)
8507 SerializationCluster* const* b) {
8508 if ((*a)->size() > (*b)->size()) {
8509 return -1;
8510 } else if ((*a)->size() < (*b)->size()) {
8511 return 1;
8512 } else {
8513 return 0;
8514 }
8515}
8516
8517#define CID_CLUSTER(Type) \
8518 reinterpret_cast<Type##SerializationCluster*>(clusters_by_cid_[k##Type##Cid])
8519
8524
8526 // While object_currently_writing_ is initialized to the artificial root, we
8527 // set up a scope to ensure proper flushing to the profile.
8530 roots->AddBaseObjects(this);
8531
8532 NoSafepointScope no_safepoint;
8533
8534 roots->PushRoots(this);
8535
8536 // Resolving WeakSerializationReferences and WeakProperties may cause new
8537 // objects to be pushed on the stack, and handling the changes to the stack
8538 // may cause the targets of WeakSerializationReferences and keys of
8539 // WeakProperties to become reachable, so we do this as a fixed point
8540 // computation. Note that reachability is computed monotonically (an object
8541 // can change from not reachable to reachable, but never the reverse), which
8542 // is technically a conservative approximation for WSRs, but doing a strict
8543 // analysis that allows non-monotonic reachability may not halt.
8544 //
8545 // To see this, take a WSR whose replacement causes the target of another WSR
8546 // to become reachable, which then causes the target of the first WSR to
8547 // become reachable, but the only way to reach the target is through the
8548 // target of the second WSR, which was only reachable via the replacement
8549 // the first.
8550 //
8551 // In practice, this case doesn't come up as replacements tend to be either
8552 // null, smis, or singleton objects that do not contain WSRs currently.
8553 while (stack_.length() > 0) {
8554 // Strong references.
8555 while (stack_.length() > 0) {
8556 StackEntry entry = stack_.RemoveLast();
8557 Trace(entry.obj, entry.cid_override);
8558 }
8559
8560 // Ephemeron references.
8561#if defined(DART_PRECOMPILER)
8562 if (auto const cluster = CID_CLUSTER(WeakSerializationReference)) {
8563 cluster->RetraceEphemerons(this);
8564 }
8565#endif
8566 if (auto const cluster = CID_CLUSTER(WeakProperty)) {
8567 cluster->RetraceEphemerons(this);
8568 }
8569 }
8570
8571#if defined(DART_PRECOMPILER)
8572 auto const wsr_cluster = CID_CLUSTER(WeakSerializationReference);
8573 if (wsr_cluster != nullptr) {
8574 // Now that we have computed the reachability fixpoint, we remove the
8575 // count of now-reachable WSRs as they are not actually serialized.
8576 num_written_objects_ -= wsr_cluster->Count(this);
8577 // We don't need to write this cluster, so remove it from consideration.
8578 clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
8579 }
8580 ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
8581#endif
8582
8583 code_cluster_ = CID_CLUSTER(Code);
8584
8586 // The order that PostLoad runs matters for some classes because of
8587 // assumptions during canonicalization, read filling, or post-load filling of
8588 // some classes about what has already been read and/or canonicalized.
8589 // Explicitly add these clusters first, then add the rest ordered by class id.
8590#define ADD_CANONICAL_NEXT(cid) \
8591 if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
8592 clusters.Add(cluster); \
8593 canonical_clusters_by_cid_[cid] = nullptr; \
8594 }
8595#define ADD_NON_CANONICAL_NEXT(cid) \
8596 if (auto const cluster = clusters_by_cid_[cid]) { \
8597 clusters.Add(cluster); \
8598 clusters_by_cid_[cid] = nullptr; \
8599 }
8600 ADD_CANONICAL_NEXT(kOneByteStringCid)
8601 ADD_CANONICAL_NEXT(kTwoByteStringCid)
8602 ADD_CANONICAL_NEXT(kStringCid)
8603 ADD_CANONICAL_NEXT(kMintCid)
8604 ADD_CANONICAL_NEXT(kDoubleCid)
8605 ADD_CANONICAL_NEXT(kTypeParameterCid)
8606 ADD_CANONICAL_NEXT(kTypeCid)
8607 ADD_CANONICAL_NEXT(kTypeArgumentsCid)
8608 // Code cluster should be deserialized before Function as
8609 // FunctionDeserializationCluster::ReadFill uses instructions table
8610 // which is filled in CodeDeserializationCluster::ReadFill.
8611 // Code cluster should also precede ObjectPool as its ReadFill uses
8612 // entry points of stubs.
8613 ADD_NON_CANONICAL_NEXT(kCodeCid)
8614 // The function cluster should be deserialized before any closures, as
8615 // PostLoad for closures caches the entry point found in the function.
8616 ADD_NON_CANONICAL_NEXT(kFunctionCid)
8617 ADD_CANONICAL_NEXT(kClosureCid)
8618#undef ADD_CANONICAL_NEXT
8619#undef ADD_NON_CANONICAL_NEXT
8620 const intptr_t out_of_order_clusters = clusters.length();
8621 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8622 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8623 clusters.Add(cluster);
8624 }
8625 }
8626 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8627 if (auto const cluster = clusters_by_cid_[cid]) {
8628 clusters.Add(clusters_by_cid_[cid]);
8629 }
8630 }
8631 // Put back any taken out temporarily to avoid re-adding them during the loop.
8632 for (intptr_t i = 0; i < out_of_order_clusters; i++) {
8633 const auto& cluster = clusters.At(i);
8634 const intptr_t cid = cluster->cid();
8635 auto const cid_clusters =
8636 cluster->is_canonical() ? canonical_clusters_by_cid_ : clusters_by_cid_;
8637 ASSERT(cid_clusters[cid] == nullptr);
8638 cid_clusters[cid] = cluster;
8639 }
8640
8641 PrepareInstructions(roots->canonicalized_stack_map_entries());
8642
8643 intptr_t num_objects = num_base_objects_ + num_written_objects_;
8644#if defined(ARCH_IS_64_BIT)
8645 if (!Utils::IsInt(32, num_objects)) {
8646 FATAL("Ref overflow");
8647 }
8648#endif
8649
8650 WriteUnsigned(num_base_objects_);
8651 WriteUnsigned(num_objects);
8652 WriteUnsigned(clusters.length());
8653 ASSERT((instructions_table_len_ == 0) || FLAG_precompiled_mode);
8654 WriteUnsigned(instructions_table_len_);
8655 WriteUnsigned(instructions_table_rodata_offset_);
8656
8657 for (SerializationCluster* cluster : clusters) {
8658 cluster->WriteAndMeasureAlloc(this);
8659 bytes_heap_allocated_ += cluster->target_memory_size();
8660#if defined(DEBUG)
8661 Write<int32_t>(next_ref_index_);
8662#endif
8663 }
8664
8665 // We should have assigned a ref to every object we pushed.
8666 ASSERT((next_ref_index_ - 1) == num_objects);
8667 // And recorded them all in [objects_].
8668 ASSERT(objects_->length() == num_objects);
8669
8670#if defined(DART_PRECOMPILER)
8671 if (profile_writer_ != nullptr && wsr_cluster != nullptr) {
8672 // Post-WriteAlloc, we eagerly create artificial nodes for any unreachable
8673 // targets in reachable WSRs if writing a v8 snapshot profile, since they
8674 // will be used in AttributeReference().
8675 //
8676 // Unreachable WSRs may also need artificial nodes, as they may be members
8677 // of other unreachable objects that have artificial nodes in the profile,
8678 // but they are instead lazily handled in CreateArtificialNodeIfNeeded().
8679 wsr_cluster->CreateArtificialTargetNodesIfNeeded(this);
8680 }
8681#endif
8682
8683 for (SerializationCluster* cluster : clusters) {
8684 cluster->WriteAndMeasureFill(this);
8685#if defined(DEBUG)
8686 Write<int32_t>(kSectionMarker);
8687#endif
8688 }
8689
8690 roots->WriteRoots(this);
8691
8692#if defined(DEBUG)
8693 Write<int32_t>(kSectionMarker);
8694#endif
8695
8697
8699
8700 return objects_;
8701}
8702#endif // !defined(DART_PRECOMPILED_RUNTIME)
8703
8704#if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8705// The serialized format of the dispatch table is a sequence of variable-length
8706// integers (the built-in variable-length integer encoding/decoding of
8707// the stream). Each encoded integer e is interpreted thus:
8708// -kRecentCount .. -1 Pick value from the recent values buffer at index -1-e.
8709// 0 Empty (unused) entry.
8710// 1 .. kMaxRepeat Repeat previous entry e times.
8711// kIndexBase or higher Pick entry point from the object at index e-kIndexBase
8712// in the snapshot code cluster. Also put it in the recent
8713// values buffer at the next round-robin index.
8714
8715// Constants for serialization format. Chosen such that repeats and recent
8716// values are encoded as single bytes in SLEB128 encoding.
8717static constexpr intptr_t kDispatchTableSpecialEncodingBits = 6;
8718static constexpr intptr_t kDispatchTableRecentCount =
8719 1 << kDispatchTableSpecialEncodingBits;
8720static constexpr intptr_t kDispatchTableRecentMask =
8721 (1 << kDispatchTableSpecialEncodingBits) - 1;
8722static constexpr intptr_t kDispatchTableMaxRepeat =
8723 (1 << kDispatchTableSpecialEncodingBits) - 1;
8724static constexpr intptr_t kDispatchTableIndexBase = kDispatchTableMaxRepeat + 1;
8725#endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8726
8728#if defined(DART_PRECOMPILER)
8729 if (kind() != Snapshot::kFullAOT) return;
8730
8731 // Create an artificial node to which the bytes should be attributed. We
8732 // don't attribute them to entries.ptr(), as we don't want to attribute the
8733 // bytes for printing out a length of 0 to Object::null() when the dispatch
8734 // table is empty.
8735 const intptr_t profile_ref = AssignArtificialRef();
8736 const auto& dispatch_table_profile_id = GetProfileId(profile_ref);
8737 if (profile_writer_ != nullptr) {
8738 profile_writer_->SetObjectTypeAndName(dispatch_table_profile_id,
8739 "DispatchTable", "dispatch_table");
8740 profile_writer_->AddRoot(dispatch_table_profile_id);
8741 }
8742 WritingObjectScope scope(this, dispatch_table_profile_id);
8743 if (profile_writer_ != nullptr) {
8744 // We'll write the Array object as a property of the artificial dispatch
8745 // table node, so Code objects otherwise unreferenced will have it as an
8746 // ancestor.
8748 AttributePropertyRef(entries.ptr(), "<code entries>");
8749 }
8750
8751 const intptr_t bytes_before = bytes_written();
8752 const intptr_t table_length = entries.IsNull() ? 0 : entries.Length();
8753
8754 ASSERT(table_length <= compiler::target::kWordMax);
8755 WriteUnsigned(table_length);
8756 if (table_length == 0) {
8757 dispatch_table_size_ = bytes_written() - bytes_before;
8758 return;
8759 }
8760
8761 ASSERT(code_cluster_ != nullptr);
8762 // If instructions can be deduped, the code order table in the deserializer
8763 // may not contain all Code objects in the snapshot. Thus, we write the ID
8764 // for the first code object here so we can retrieve it during deserialization
8765 // and calculate the snapshot ID for Code objects from the cluster index.
8766 //
8767 // We could just use the snapshot reference ID of the Code object itself
8768 // instead of the cluster index and avoid this. However, since entries are
8769 // SLEB128 encoded, the size delta for serializing the first ID once is less
8770 // than the size delta of serializing the ID plus kIndexBase for each entry,
8771 // even when Code objects are allocated before all other non-base objects.
8772 //
8773 // We could also map Code objects to the first Code object in the cluster with
8774 // the same entry point and serialize that ID instead, but that loses
8775 // information about which Code object was originally referenced.
8776 WriteUnsigned(code_cluster_->first_ref());
8777
8778 CodePtr previous_code = nullptr;
8779 CodePtr recent[kDispatchTableRecentCount] = {nullptr};
8780 intptr_t recent_index = 0;
8781 intptr_t repeat_count = 0;
8782 for (intptr_t i = 0; i < table_length; i++) {
8783 auto const code = Code::RawCast(entries.At(i));
8784 // First, see if we're repeating the previous entry (invalid, recent, or
8785 // encoded).
8786 if (code == previous_code) {
8787 if (++repeat_count == kDispatchTableMaxRepeat) {
8788 Write(kDispatchTableMaxRepeat);
8789 repeat_count = 0;
8790 }
8791 continue;
8792 }
8793 // Emit any outstanding repeat count before handling the new code value.
8794 if (repeat_count > 0) {
8795 Write(repeat_count);
8796 repeat_count = 0;
8797 }
8798 previous_code = code;
8799 // The invalid entry can be repeated, but is never part of the recent list
8800 // since it already encodes to a single byte..
8801 if (code == Code::null()) {
8802 Write(0);
8803 continue;
8804 }
8805 // Check against the recent entries, and write an encoded reference to
8806 // the recent entry if found.
8807 intptr_t found_index = 0;
8808 for (; found_index < kDispatchTableRecentCount; found_index++) {
8809 if (recent[found_index] == code) break;
8810 }
8811 if (found_index < kDispatchTableRecentCount) {
8812 Write(~found_index);
8813 continue;
8814 }
8815 // We have a non-repeated, non-recent entry, so encode the reference ID of
8816 // the code object and emit that.
8817 auto const code_index = GetCodeIndex(code);
8818 // Use the index in the code cluster, not in the snapshot..
8819 auto const encoded = kDispatchTableIndexBase + code_index;
8820 ASSERT(encoded <= compiler::target::kWordMax);
8821 Write(encoded);
8822 recent[recent_index] = code;
8823 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
8824 }
8825 if (repeat_count > 0) {
8826 Write(repeat_count);
8827 }
8828 dispatch_table_size_ = bytes_written() - bytes_before;
8829#endif // defined(DART_PRECOMPILER)
8830}
8831
8833#if !defined(DART_PRECOMPILED_RUNTIME)
8834 if (FLAG_print_snapshot_sizes_verbose) {
8835 TextBuffer buffer(1024);
8836 // Header, using format sizes matching those below to ensure alignment.
8837 buffer.Printf("%25s", "Cluster");
8838 buffer.Printf(" %6s", "Objs");
8839 buffer.Printf(" %8s", "Size");
8840 buffer.Printf(" %8s", "Fraction");
8841 buffer.Printf(" %10s", "Cumulative");
8842 buffer.Printf(" %8s", "HeapSize");
8843 buffer.Printf(" %5s", "Cid");
8844 buffer.Printf(" %9s", "Canonical");
8845 buffer.AddString("\n");
8846 GrowableArray<SerializationCluster*> clusters_by_size;
8847 for (intptr_t cid = 1; cid < num_cids_; cid++) {
8848 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8849 clusters_by_size.Add(cluster);
8850 }
8851 if (auto const cluster = clusters_by_cid_[cid]) {
8852 clusters_by_size.Add(cluster);
8853 }
8854 }
8855 intptr_t text_size = 0;
8856 if (image_writer_ != nullptr) {
8857 auto const text_object_count = image_writer_->GetTextObjectCount();
8858 text_size = image_writer_->text_size();
8859 intptr_t trampoline_count, trampoline_size;
8860 image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size);
8861 auto const instructions_count = text_object_count - trampoline_count;
8862 auto const instructions_size = text_size - trampoline_size;
8863 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8864 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions"),
8865 instructions_count, instructions_size));
8866 if (trampoline_size > 0) {
8867 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8868 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline"),
8869 trampoline_count, trampoline_size));
8870 }
8871 }
8872 // The dispatch_table_size_ will be 0 if the snapshot did not include a
8873 // dispatch table (i.e., the VM snapshot). For a precompiled isolate
8874 // snapshot, we always serialize at least _one_ byte for the DispatchTable.
8875 if (dispatch_table_size_ > 0) {
8876 const auto& dispatch_table_entries = Array::Handle(
8877 zone_,
8878 isolate_group()->object_store()->dispatch_table_code_entries());
8879 auto const entry_count =
8880 dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length();
8881 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8882 "DispatchTable", entry_count, dispatch_table_size_));
8883 }
8884 if (instructions_table_len_ > 0) {
8885 const intptr_t memory_size =
8886 compiler::target::InstructionsTable::InstanceSize() +
8887 compiler::target::Array::InstanceSize(instructions_table_len_);
8888 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8889 "InstructionsTable", instructions_table_len_, 0, memory_size));
8890 }
8891 clusters_by_size.Sort(CompareClusters);
8892 double total_size =
8893 static_cast<double>(bytes_written() + GetDataSize() + text_size);
8894 double cumulative_fraction = 0.0;
8895 for (intptr_t i = 0; i < clusters_by_size.length(); i++) {
8896 SerializationCluster* cluster = clusters_by_size[i];
8897 double fraction = static_cast<double>(cluster->size()) / total_size;
8898 cumulative_fraction += fraction;
8899 buffer.Printf("%25s", cluster->name());
8900 buffer.Printf(" %6" Pd "", cluster->num_objects());
8901 buffer.Printf(" %8" Pd "", cluster->size());
8902 buffer.Printf(" %1.6lf", fraction);
8903 buffer.Printf(" %1.8lf", cumulative_fraction);
8904 buffer.Printf(" %8" Pd "", cluster->target_memory_size());
8905 if (cluster->cid() != -1) {
8906 buffer.Printf(" %5" Pd "", cluster->cid());
8907 } else {
8908 buffer.Printf(" %5s", "");
8909 }
8910 if (cluster->is_canonical()) {
8911 buffer.Printf(" %9s", "canonical");
8912 } else {
8913 buffer.Printf(" %9s", "");
8914 }
8915 buffer.AddString("\n");
8916 }
8917 OS::PrintErr("%s", buffer.buffer());
8918 }
8919#endif // !defined(DART_PRECOMPILED_RUNTIME)
8920}
8921
8924 const uint8_t* buffer,
8925 intptr_t size,
8926 const uint8_t* data_buffer,
8927 const uint8_t* instructions_buffer,
8928 bool is_non_root_unit,
8929 intptr_t offset)
8931 heap_(thread->isolate_group()->heap()),
8932 old_space_(heap_->old_space()),
8933 freelist_(old_space_->DataFreeList()),
8934 zone_(thread->zone()),
8935 kind_(kind),
8936 stream_(buffer, size),
8937 image_reader_(nullptr),
8938 refs_(nullptr),
8939 next_ref_index_(kFirstReference),
8940 clusters_(nullptr),
8941 is_non_root_unit_(is_non_root_unit),
8942 instructions_table_(InstructionsTable::Handle(thread->zone())) {
8944 ASSERT(instructions_buffer != nullptr);
8945 ASSERT(data_buffer != nullptr);
8946 image_reader_ = new (zone_) ImageReader(data_buffer, instructions_buffer);
8947 }
8948 stream_.SetPosition(offset);
8949}
8950
8952 delete[] clusters_;
8953}
8954
8956 const uint32_t tags = Read<uint32_t>();
8957 const intptr_t cid = UntaggedObject::ClassIdTag::decode(tags);
8958 const bool is_canonical = UntaggedObject::CanonicalBit::decode(tags);
8959 const bool is_immutable = UntaggedObject::ImmutableBit::decode(tags);
8960 Zone* Z = zone_;
8961 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
8962 return new (Z) InstanceDeserializationCluster(
8963 cid, is_canonical, is_immutable, !is_non_root_unit_);
8964 }
8966 ASSERT(!is_canonical);
8968 }
8970 ASSERT(!is_canonical);
8972 }
8973 if (IsTypedDataClassId(cid)) {
8974 ASSERT(!is_canonical);
8976 }
8977
8978#if !defined(DART_COMPRESSED_POINTERS)
8979 if (Snapshot::IncludesCode(kind_)) {
8980 switch (cid) {
8981 case kPcDescriptorsCid:
8982 case kCodeSourceMapCid:
8983 case kCompressedStackMapsCid:
8984 return new (Z)
8985 RODataDeserializationCluster(cid, is_canonical, !is_non_root_unit_);
8986 case kOneByteStringCid:
8987 case kTwoByteStringCid:
8988 case kStringCid:
8989 if (!is_non_root_unit_) {
8990 return new (Z) RODataDeserializationCluster(cid, is_canonical,
8991 !is_non_root_unit_);
8992 }
8993 break;
8994 }
8995 }
8996#endif
8997
8998 switch (cid) {
8999 case kClassCid:
9000 ASSERT(!is_canonical);
9001 return new (Z) ClassDeserializationCluster();
9002 case kTypeParametersCid:
9004 case kTypeArgumentsCid:
9005 return new (Z)
9006 TypeArgumentsDeserializationCluster(is_canonical, !is_non_root_unit_);
9007 case kPatchClassCid:
9008 ASSERT(!is_canonical);
9009 return new (Z) PatchClassDeserializationCluster();
9010 case kFunctionCid:
9011 ASSERT(!is_canonical);
9012 return new (Z) FunctionDeserializationCluster();
9013 case kClosureDataCid:
9014 ASSERT(!is_canonical);
9015 return new (Z) ClosureDataDeserializationCluster();
9016 case kFfiTrampolineDataCid:
9017 ASSERT(!is_canonical);
9019 case kFieldCid:
9020 ASSERT(!is_canonical);
9021 return new (Z) FieldDeserializationCluster();
9022 case kScriptCid:
9023 ASSERT(!is_canonical);
9024 return new (Z) ScriptDeserializationCluster();
9025 case kLibraryCid:
9026 ASSERT(!is_canonical);
9027 return new (Z) LibraryDeserializationCluster();
9028 case kNamespaceCid:
9029 ASSERT(!is_canonical);
9030 return new (Z) NamespaceDeserializationCluster();
9031#if !defined(DART_PRECOMPILED_RUNTIME)
9032 case kKernelProgramInfoCid:
9033 ASSERT(!is_canonical);
9035#endif // !DART_PRECOMPILED_RUNTIME
9036 case kCodeCid:
9037 ASSERT(!is_canonical);
9038 return new (Z) CodeDeserializationCluster();
9039 case kObjectPoolCid:
9040 ASSERT(!is_canonical);
9041 return new (Z) ObjectPoolDeserializationCluster();
9042 case kPcDescriptorsCid:
9043 ASSERT(!is_canonical);
9045 case kCodeSourceMapCid:
9046 ASSERT(!is_canonical);
9048 case kCompressedStackMapsCid:
9049 ASSERT(!is_canonical);
9051 case kExceptionHandlersCid:
9052 ASSERT(!is_canonical);
9054 case kContextCid:
9055 ASSERT(!is_canonical);
9056 return new (Z) ContextDeserializationCluster();
9057 case kContextScopeCid:
9058 ASSERT(!is_canonical);
9060 case kUnlinkedCallCid:
9061 ASSERT(!is_canonical);
9063 case kICDataCid:
9064 ASSERT(!is_canonical);
9065 return new (Z) ICDataDeserializationCluster();
9066 case kMegamorphicCacheCid:
9067 ASSERT(!is_canonical);
9069 case kSubtypeTestCacheCid:
9070 ASSERT(!is_canonical);
9072 case kLoadingUnitCid:
9073 ASSERT(!is_canonical);
9074 return new (Z) LoadingUnitDeserializationCluster();
9075 case kLanguageErrorCid:
9076 ASSERT(!is_canonical);
9078 case kUnhandledExceptionCid:
9079 ASSERT(!is_canonical);
9081 case kLibraryPrefixCid:
9082 ASSERT(!is_canonical);
9084 case kTypeCid:
9085 return new (Z)
9086 TypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9087 case kFunctionTypeCid:
9088 return new (Z)
9089 FunctionTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9090 case kRecordTypeCid:
9091 return new (Z)
9092 RecordTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9093 case kTypeParameterCid:
9094 return new (Z)
9095 TypeParameterDeserializationCluster(is_canonical, !is_non_root_unit_);
9096 case kClosureCid:
9097 return new (Z)
9098 ClosureDeserializationCluster(is_canonical, !is_non_root_unit_);
9099 case kMintCid:
9100 return new (Z)
9101 MintDeserializationCluster(is_canonical, !is_non_root_unit_);
9102 case kDoubleCid:
9103 return new (Z)
9104 DoubleDeserializationCluster(is_canonical, !is_non_root_unit_);
9105 case kInt32x4Cid:
9106 case kFloat32x4Cid:
9107 case kFloat64x2Cid:
9108 return new (Z)
9109 Simd128DeserializationCluster(cid, is_canonical, !is_non_root_unit_);
9110 case kGrowableObjectArrayCid:
9111 ASSERT(!is_canonical);
9113 case kRecordCid:
9114 return new (Z)
9115 RecordDeserializationCluster(is_canonical, !is_non_root_unit_);
9116 case kStackTraceCid:
9117 ASSERT(!is_canonical);
9118 return new (Z) StackTraceDeserializationCluster();
9119 case kRegExpCid:
9120 ASSERT(!is_canonical);
9121 return new (Z) RegExpDeserializationCluster();
9122 case kWeakPropertyCid:
9123 ASSERT(!is_canonical);
9125 case kMapCid:
9126 // We do not have mutable hash maps in snapshots.
9127 UNREACHABLE();
9128 case kConstMapCid:
9129 return new (Z) MapDeserializationCluster(kConstMapCid, is_canonical,
9130 !is_non_root_unit_);
9131 case kSetCid:
9132 // We do not have mutable hash sets in snapshots.
9133 UNREACHABLE();
9134 case kConstSetCid:
9135 return new (Z) SetDeserializationCluster(kConstSetCid, is_canonical,
9136 !is_non_root_unit_);
9137 case kArrayCid:
9138 return new (Z) ArrayDeserializationCluster(kArrayCid, is_canonical,
9139 !is_non_root_unit_);
9140 case kImmutableArrayCid:
9141 return new (Z) ArrayDeserializationCluster(
9142 kImmutableArrayCid, is_canonical, !is_non_root_unit_);
9143 case kWeakArrayCid:
9144 return new (Z) WeakArrayDeserializationCluster();
9145 case kStringCid:
9146 return new (Z) StringDeserializationCluster(
9147 is_canonical,
9148 !is_non_root_unit_ && isolate_group() != Dart::vm_isolate_group());
9149#define CASE_FFI_CID(name) case kFfi##name##Cid:
9151#undef CASE_FFI_CID
9152 return new (Z) InstanceDeserializationCluster(
9153 cid, is_canonical, is_immutable, !is_non_root_unit_);
9154 case kDeltaEncodedTypedDataCid:
9156 default:
9157 break;
9158 }
9159 FATAL("No cluster defined for cid %" Pd, cid);
9160 return nullptr;
9161}
9162
9164 ReadStream* stream,
9165 bool deferred,
9166 const InstructionsTable& root_instruction_table,
9167 intptr_t deferred_code_start_index,
9168 intptr_t deferred_code_end_index) {
9169#if defined(DART_PRECOMPILED_RUNTIME)
9170 const uint8_t* table_snapshot_start = stream->AddressOfCurrentPosition();
9171 const intptr_t length = stream->ReadUnsigned();
9172 if (length == 0) return;
9173
9174 const intptr_t first_code_id = stream->ReadUnsigned();
9175 deferred_code_start_index -= first_code_id;
9176 deferred_code_end_index -= first_code_id;
9177
9178 auto const IG = isolate_group();
9179 auto code = IG->object_store()->dispatch_table_null_error_stub();
9180 ASSERT(code != Code::null());
9181 uword null_entry = Code::EntryPointOf(code);
9182
9184 if (deferred) {
9185 table = IG->dispatch_table();
9186 ASSERT(table != nullptr && table->length() == length);
9187 } else {
9188 ASSERT(IG->dispatch_table() == nullptr);
9189 table = new DispatchTable(length);
9190 }
9191 auto const array = table->array();
9192 uword value = 0;
9193 uword recent[kDispatchTableRecentCount] = {0};
9194 intptr_t recent_index = 0;
9195 intptr_t repeat_count = 0;
9196 for (intptr_t i = 0; i < length; i++) {
9197 if (repeat_count > 0) {
9198 array[i] = value;
9199 repeat_count--;
9200 continue;
9201 }
9202 auto const encoded = stream->Read<intptr_t>();
9203 if (encoded == 0) {
9204 value = null_entry;
9205 } else if (encoded < 0) {
9206 intptr_t r = ~encoded;
9207 ASSERT(r < kDispatchTableRecentCount);
9208 value = recent[r];
9209 } else if (encoded <= kDispatchTableMaxRepeat) {
9210 repeat_count = encoded - 1;
9211 } else {
9212 const intptr_t code_index = encoded - kDispatchTableIndexBase;
9213 if (deferred) {
9214 const intptr_t code_id =
9215 CodeIndexToClusterIndex(root_instruction_table, code_index);
9216 if ((deferred_code_start_index <= code_id) &&
9217 (code_id < deferred_code_end_index)) {
9218 auto code = static_cast<CodePtr>(Ref(first_code_id + code_id));
9219 value = Code::EntryPointOf(code);
9220 } else {
9221 // Reuse old value from the dispatch table.
9222 value = array[i];
9223 }
9224 } else {
9225 value = GetEntryPointByCodeIndex(code_index);
9226 }
9227 recent[recent_index] = value;
9228 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
9229 }
9230 array[i] = value;
9231 }
9232 ASSERT(repeat_count == 0);
9233
9234 if (!deferred) {
9235 IG->set_dispatch_table(table);
9236 intptr_t table_snapshot_size =
9237 stream->AddressOfCurrentPosition() - table_snapshot_start;
9238 IG->set_dispatch_table_snapshot(table_snapshot_start);
9239 IG->set_dispatch_table_snapshot_size(table_snapshot_size);
9240 }
9241#endif
9242}
9243
9245 if (image_reader_ != nullptr) {
9246 return image_reader_->VerifyAlignment();
9247 }
9248 return ApiError::null();
9249}
9250
9252 IsolateGroup* isolate_group,
9253 intptr_t* offset) {
9254 char* error = VerifyVersion();
9255 if (error == nullptr) {
9256 error = VerifyFeatures(isolate_group);
9257 }
9258 if (error == nullptr) {
9259 *offset = stream_.Position();
9260 }
9261 return error;
9262}
9263
9264char* SnapshotHeaderReader::VerifyVersion() {
9265 // If the version string doesn't match, return an error.
9266 // Note: New things are allocated only if we're going to return an error.
9267
9268 const char* expected_version = Version::SnapshotString();
9269 ASSERT(expected_version != nullptr);
9270 const intptr_t version_len = strlen(expected_version);
9271 if (stream_.PendingBytes() < version_len) {
9272 const intptr_t kMessageBufferSize = 128;
9273 char message_buffer[kMessageBufferSize];
9274 Utils::SNPrint(message_buffer, kMessageBufferSize,
9275 "No full snapshot version found, expected '%s'",
9276 expected_version);
9277 return BuildError(message_buffer);
9278 }
9279
9280 const char* version =
9281 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9282 ASSERT(version != nullptr);
9283 if (strncmp(version, expected_version, version_len) != 0) {
9284 const intptr_t kMessageBufferSize = 256;
9285 char message_buffer[kMessageBufferSize];
9286 char* actual_version = Utils::StrNDup(version, version_len);
9287 Utils::SNPrint(message_buffer, kMessageBufferSize,
9288 "Wrong %s snapshot version, expected '%s' found '%s'",
9289 (Snapshot::IsFull(kind_)) ? "full" : "script",
9290 expected_version, actual_version);
9291 free(actual_version);
9292 return BuildError(message_buffer);
9293 }
9294 stream_.Advance(version_len);
9295
9296 return nullptr;
9297}
9298
9299char* SnapshotHeaderReader::VerifyFeatures(IsolateGroup* isolate_group) {
9300 const char* expected_features =
9301 Dart::FeaturesString(isolate_group, (isolate_group == nullptr), kind_);
9302 ASSERT(expected_features != nullptr);
9303 const intptr_t expected_len = strlen(expected_features);
9304
9305 const char* features = nullptr;
9306 intptr_t features_length = 0;
9307
9308 auto error = ReadFeatures(&features, &features_length);
9309 if (error != nullptr) {
9310 return error;
9311 }
9312
9313 if (features_length != expected_len ||
9314 (strncmp(features, expected_features, expected_len) != 0)) {
9315 const intptr_t kMessageBufferSize = 1024;
9316 char message_buffer[kMessageBufferSize];
9317 char* actual_features = Utils::StrNDup(
9318 features, features_length < 1024 ? features_length : 1024);
9319 Utils::SNPrint(message_buffer, kMessageBufferSize,
9320 "Snapshot not compatible with the current VM configuration: "
9321 "the snapshot requires '%s' but the VM has '%s'",
9322 actual_features, expected_features);
9323 free(const_cast<char*>(expected_features));
9324 free(actual_features);
9325 return BuildError(message_buffer);
9326 }
9327 free(const_cast<char*>(expected_features));
9328 return nullptr;
9329}
9330
9331char* SnapshotHeaderReader::ReadFeatures(const char** features,
9332 intptr_t* features_length) {
9333 const char* cursor =
9334 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9335 const intptr_t length = Utils::StrNLen(cursor, stream_.PendingBytes());
9336 if (length == stream_.PendingBytes()) {
9337 return BuildError(
9338 "The features string in the snapshot was not '\\0'-terminated.");
9339 }
9340 *features = cursor;
9341 *features_length = length;
9342 stream_.Advance(length + 1);
9343 return nullptr;
9344}
9345
9346char* SnapshotHeaderReader::BuildError(const char* message) {
9347 return Utils::StrDup(message);
9348}
9349
9350ApiErrorPtr FullSnapshotReader::ConvertToApiError(char* message) {
9351 // This can also fail while bringing up the VM isolate, so make sure to
9352 // allocate the error message in old space.
9353 const String& msg = String::Handle(String::New(message, Heap::kOld));
9354
9355 // The [message] was constructed with [BuildError] and needs to be freed.
9356 free(message);
9357
9358 return ApiError::New(msg, Heap::kOld);
9359}
9360
9361void Deserializer::ReadInstructions(CodePtr code, bool deferred) {
9362#if defined(DART_PRECOMPILED_RUNTIME)
9363 if (deferred) {
9364 uword entry_point = StubCode::NotLoaded().EntryPoint();
9365 code->untag()->entry_point_ = entry_point;
9366 code->untag()->unchecked_entry_point_ = entry_point;
9367 code->untag()->monomorphic_entry_point_ = entry_point;
9368 code->untag()->monomorphic_unchecked_entry_point_ = entry_point;
9369 code->untag()->instructions_length_ = 0;
9370 return;
9371 }
9372
9373 const uword payload_start = instructions_table_.EntryPointAt(
9374 instructions_table_.rodata()->first_entry_with_code +
9375 instructions_index_);
9376 const uint32_t payload_info = ReadUnsigned();
9377 const uint32_t unchecked_offset = payload_info >> 1;
9378 const bool has_monomorphic_entrypoint = (payload_info & 0x1) == 0x1;
9379
9380 const uword entry_offset =
9381 has_monomorphic_entrypoint ? Instructions::kPolymorphicEntryOffsetAOT : 0;
9382 const uword monomorphic_entry_offset =
9383 has_monomorphic_entrypoint ? Instructions::kMonomorphicEntryOffsetAOT : 0;
9384
9385 const uword entry_point = payload_start + entry_offset;
9386 const uword monomorphic_entry_point =
9387 payload_start + monomorphic_entry_offset;
9388
9389 instructions_table_.SetCodeAt(instructions_index_++, code);
9390
9391 // There are no serialized RawInstructions objects in this mode.
9392 code->untag()->instructions_ = Instructions::null();
9393 code->untag()->entry_point_ = entry_point;
9394 code->untag()->unchecked_entry_point_ = entry_point + unchecked_offset;
9395 code->untag()->monomorphic_entry_point_ = monomorphic_entry_point;
9396 code->untag()->monomorphic_unchecked_entry_point_ =
9397 monomorphic_entry_point + unchecked_offset;
9398#else
9399 ASSERT(!deferred);
9400 InstructionsPtr instr = image_reader_->GetInstructionsAt(Read<uint32_t>());
9401 uint32_t unchecked_offset = ReadUnsigned();
9402 code->untag()->instructions_ = instr;
9403 code->untag()->unchecked_offset_ = unchecked_offset;
9405 const uint32_t active_offset = Read<uint32_t>();
9406 instr = image_reader_->GetInstructionsAt(active_offset);
9407 unchecked_offset = ReadUnsigned();
9408 code->untag()->active_instructions_ = instr;
9409 Code::InitializeCachedEntryPointsFrom(code, instr, unchecked_offset);
9410#endif // defined(DART_PRECOMPILED_RUNTIME)
9411}
9412
9414#if defined(DART_PRECOMPILED_RUNTIME)
9415 if (instructions_table_.IsNull()) {
9416 ASSERT(instructions_index_ == 0);
9417 return;
9418 }
9419
9420 const auto& code_objects =
9421 Array::Handle(instructions_table_.ptr()->untag()->code_objects());
9422 ASSERT(code_objects.Length() == instructions_index_);
9423
9424 uword previous_end = image_reader_->GetBareInstructionsEnd();
9425 for (intptr_t i = instructions_index_ - 1; i >= 0; --i) {
9426 CodePtr code = Code::RawCast(code_objects.At(i));
9428 ASSERT(start <= previous_end);
9429 code->untag()->instructions_length_ = previous_end - start;
9430 previous_end = start;
9431 }
9432
9433 ObjectStore* object_store = IsolateGroup::Current()->object_store();
9434 GrowableObjectArray& tables =
9435 GrowableObjectArray::Handle(zone_, object_store->instructions_tables());
9436 if (tables.IsNull()) {
9438 object_store->set_instructions_tables(tables);
9439 }
9440 if ((tables.Length() == 0) ||
9441 (tables.At(tables.Length() - 1) != instructions_table_.ptr())) {
9442 ASSERT((!is_non_root_unit_ && tables.Length() == 0) ||
9443 (is_non_root_unit_ && tables.Length() > 0));
9444 tables.Add(instructions_table_, Heap::kOld);
9445 }
9446#endif
9447}
9448
9450 return image_reader_->GetObjectAt(offset);
9451}
9452
9454 public:
9457 page_space_(page_space),
9458 freelist_(page_space->DataFreeList()) {
9459 page_space_->AcquireLock(freelist_);
9460 }
9461 ~HeapLocker() { page_space_->ReleaseLock(freelist_); }
9462
9463 private:
9464 PageSpace* page_space_;
9465 FreeList* freelist_;
9466};
9467
9469 const void* clustered_start = AddressOfCurrentPosition();
9470
9471 Array& refs = Array::Handle(zone_);
9472 num_base_objects_ = ReadUnsigned();
9473 num_objects_ = ReadUnsigned();
9474 num_clusters_ = ReadUnsigned();
9475 const intptr_t instructions_table_len = ReadUnsigned();
9476 const uint32_t instruction_table_data_offset = ReadUnsigned();
9477 USE(instruction_table_data_offset);
9478
9479 clusters_ = new DeserializationCluster*[num_clusters_];
9480 refs = Array::New(num_objects_ + kFirstReference, Heap::kOld);
9481
9482#if defined(DART_PRECOMPILED_RUNTIME)
9483 if (instructions_table_len > 0) {
9484 ASSERT(FLAG_precompiled_mode);
9485 const uword start_pc = image_reader_->GetBareInstructionsAt(0);
9486 const uword end_pc = image_reader_->GetBareInstructionsEnd();
9487 uword instruction_table_data = 0;
9488 if (instruction_table_data_offset != 0) {
9489 // NoSafepointScope to satisfy assertion in DataStart. InstructionsTable
9490 // data resides in RO memory and is immovable and immortal making it
9491 // safe to use DataStart result outside of NoSafepointScope.
9492 NoSafepointScope no_safepoint;
9493 instruction_table_data = reinterpret_cast<uword>(
9494 OneByteString::DataStart(String::Handle(static_cast<StringPtr>(
9495 image_reader_->GetObjectAt(instruction_table_data_offset)))));
9496 }
9497 instructions_table_ = InstructionsTable::New(
9498 instructions_table_len, start_pc, end_pc, instruction_table_data);
9499 }
9500#else
9501 ASSERT(instructions_table_len == 0);
9502#endif // defined(DART_PRECOMPILED_RUNTIME)
9503
9504 {
9505 // The deserializer initializes objects without using the write barrier,
9506 // partly for speed since we know all the deserialized objects will be
9507 // long-lived and partly because the target objects can be not yet
9508 // initialized at the time of the write. To make this safe, we must ensure
9509 // there are no other threads mutating this heap, and that incremental
9510 // marking is not in progress. This is normally the case anyway for the
9511 // main snapshot being deserialized at isolate load, but needs checks for
9512 // loading secondary snapshots are part of deferred loading.
9513 HeapIterationScope iter(thread());
9514 // For bump-pointer allocation in old-space.
9515 HeapLocker hl(thread(), heap_->old_space());
9516 // Must not perform any other type of allocation, which might trigger GC
9517 // while there are still uninitialized objects.
9518 NoSafepointScope no_safepoint;
9519 refs_ = refs.ptr();
9520
9521 roots->AddBaseObjects(this);
9522
9523 if (num_base_objects_ != (next_ref_index_ - kFirstReference)) {
9524 FATAL("Snapshot expects %" Pd
9525 " base objects, but deserializer provided %" Pd,
9526 num_base_objects_, next_ref_index_ - kFirstReference);
9527 }
9528
9529 {
9530 TIMELINE_DURATION(thread(), Isolate, "ReadAlloc");
9531 for (intptr_t i = 0; i < num_clusters_; i++) {
9532 clusters_[i] = ReadCluster();
9533 clusters_[i]->ReadAlloc(this);
9534#if defined(DEBUG)
9535 intptr_t serializers_next_ref_index_ = Read<int32_t>();
9536 ASSERT_EQUAL(serializers_next_ref_index_, next_ref_index_);
9537#endif
9538 }
9539 }
9540
9541 // We should have completely filled the ref array.
9542 ASSERT_EQUAL(next_ref_index_ - kFirstReference, num_objects_);
9543
9544 {
9545 TIMELINE_DURATION(thread(), Isolate, "ReadFill");
9546 for (intptr_t i = 0; i < num_clusters_; i++) {
9547 clusters_[i]->ReadFill(this);
9548#if defined(DEBUG)
9549 int32_t section_marker = Read<int32_t>();
9550 ASSERT(section_marker == kSectionMarker);
9551#endif
9552 }
9553 }
9554
9555 roots->ReadRoots(this);
9556
9557#if defined(DEBUG)
9558 int32_t section_marker = Read<int32_t>();
9559 ASSERT(section_marker == kSectionMarker);
9560#endif
9561
9562 refs_ = nullptr;
9563 }
9564
9565 roots->PostLoad(this, refs);
9566
9568#if defined(DEBUG)
9569 isolate_group->ValidateClassTable();
9570 if (isolate_group != Dart::vm_isolate()->group()) {
9571 isolate_group->heap()->Verify("Deserializer::Deserialize");
9572 }
9573#endif
9574
9575 {
9576 TIMELINE_DURATION(thread(), Isolate, "PostLoad");
9577 for (intptr_t i = 0; i < num_clusters_; i++) {
9578 clusters_[i]->PostLoad(this, refs);
9579 }
9580 }
9581
9582 if (isolate_group->snapshot_is_dontneed_safe()) {
9583 size_t clustered_length =
9584 reinterpret_cast<uword>(AddressOfCurrentPosition()) -
9585 reinterpret_cast<uword>(clustered_start);
9586 VirtualMemory::DontNeed(const_cast<void*>(clustered_start),
9587 clustered_length);
9588 }
9589}
9590
9591#if !defined(DART_PRECOMPILED_RUNTIME)
9593 Snapshot::Kind kind,
9594 NonStreamingWriteStream* vm_snapshot_data,
9595 NonStreamingWriteStream* isolate_snapshot_data,
9596 ImageWriter* vm_image_writer,
9597 ImageWriter* isolate_image_writer)
9598 : thread_(Thread::Current()),
9599 kind_(kind),
9600 vm_snapshot_data_(vm_snapshot_data),
9601 isolate_snapshot_data_(isolate_snapshot_data),
9602 vm_isolate_snapshot_size_(0),
9603 isolate_snapshot_size_(0),
9604 vm_image_writer_(vm_image_writer),
9605 isolate_image_writer_(isolate_image_writer) {
9606 ASSERT(isolate_group() != nullptr);
9607 ASSERT(heap() != nullptr);
9608 ObjectStore* object_store = isolate_group()->object_store();
9609 ASSERT(object_store != nullptr);
9610
9611#if defined(DEBUG)
9612 isolate_group()->ValidateClassTable();
9613#endif // DEBUG
9614
9615#if defined(DART_PRECOMPILER)
9616 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9617 profile_writer_ = new (zone()) V8SnapshotProfileWriter(zone());
9618 }
9619#endif
9620}
9621
9623
9624ZoneGrowableArray<Object*>* FullSnapshotWriter::WriteVMSnapshot() {
9625 TIMELINE_DURATION(thread(), Isolate, "WriteVMSnapshot");
9626
9627 ASSERT(vm_snapshot_data_ != nullptr);
9628 Serializer serializer(thread(), kind_, vm_snapshot_data_, vm_image_writer_,
9629 /*vm=*/true, profile_writer_);
9630
9631 serializer.ReserveHeader();
9632 serializer.WriteVersionAndFeatures(true);
9635 Dart::vm_isolate_group()->object_store()->symbol_table()),
9636 /*should_write_symbols=*/!Snapshot::IncludesStringsInROData(kind_));
9637 ZoneGrowableArray<Object*>* objects = serializer.Serialize(&roots);
9638 serializer.FillHeader(serializer.kind());
9639 clustered_vm_size_ = serializer.bytes_written();
9640 heap_vm_size_ = serializer.bytes_heap_allocated();
9641
9642 if (Snapshot::IncludesCode(kind_)) {
9643 vm_image_writer_->SetProfileWriter(profile_writer_);
9644 vm_image_writer_->Write(serializer.stream(), true);
9645 mapped_data_size_ += vm_image_writer_->data_size();
9646 mapped_text_size_ += vm_image_writer_->text_size();
9647 vm_image_writer_->ResetOffsets();
9648 vm_image_writer_->ClearProfileWriter();
9649 }
9650
9651 // The clustered part + the direct mapped data part.
9652 vm_isolate_snapshot_size_ = serializer.bytes_written();
9653 return objects;
9654}
9655
9656void FullSnapshotWriter::WriteProgramSnapshot(
9657 ZoneGrowableArray<Object*>* objects,
9658 GrowableArray<LoadingUnitSerializationData*>* units) {
9659 TIMELINE_DURATION(thread(), Isolate, "WriteProgramSnapshot");
9660
9661 ASSERT(isolate_snapshot_data_ != nullptr);
9662 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9663 isolate_image_writer_, /*vm=*/false, profile_writer_);
9664 serializer.set_loading_units(units);
9665 serializer.set_current_loading_unit_id(LoadingUnit::kRootId);
9666 ObjectStore* object_store = isolate_group()->object_store();
9667 ASSERT(object_store != nullptr);
9668
9669 // These type arguments must always be retained.
9670 ASSERT(object_store->type_argument_int()->untag()->IsCanonical());
9671 ASSERT(object_store->type_argument_double()->untag()->IsCanonical());
9672 ASSERT(object_store->type_argument_string()->untag()->IsCanonical());
9673 ASSERT(object_store->type_argument_string_dynamic()->untag()->IsCanonical());
9674 ASSERT(object_store->type_argument_string_string()->untag()->IsCanonical());
9675
9676 serializer.ReserveHeader();
9677 serializer.WriteVersionAndFeatures(false);
9678 ProgramSerializationRoots roots(objects, object_store, kind_);
9679 objects = serializer.Serialize(&roots);
9680 if (units != nullptr) {
9681 (*units)[LoadingUnit::kRootId]->set_objects(objects);
9682 }
9683 serializer.FillHeader(serializer.kind());
9684 clustered_isolate_size_ = serializer.bytes_written();
9685 heap_isolate_size_ = serializer.bytes_heap_allocated();
9686
9687 if (Snapshot::IncludesCode(kind_)) {
9688 isolate_image_writer_->SetProfileWriter(profile_writer_);
9689 isolate_image_writer_->Write(serializer.stream(), false);
9690#if defined(DART_PRECOMPILER)
9691 isolate_image_writer_->DumpStatistics();
9692#endif
9693
9694 mapped_data_size_ += isolate_image_writer_->data_size();
9695 mapped_text_size_ += isolate_image_writer_->text_size();
9696 isolate_image_writer_->ResetOffsets();
9697 isolate_image_writer_->ClearProfileWriter();
9698 }
9699
9700 // The clustered part + the direct mapped data part.
9701 isolate_snapshot_size_ = serializer.bytes_written();
9702}
9703
9707 uint32_t program_hash) {
9708 TIMELINE_DURATION(thread(), Isolate, "WriteUnitSnapshot");
9709
9710 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9711 isolate_image_writer_, /*vm=*/false, profile_writer_);
9712 serializer.set_loading_units(units);
9713 serializer.set_current_loading_unit_id(unit->id());
9714
9715 serializer.ReserveHeader();
9716 serializer.WriteVersionAndFeatures(false);
9717 serializer.Write(program_hash);
9718
9719 UnitSerializationRoots roots(unit);
9720 unit->set_objects(serializer.Serialize(&roots));
9721
9722 serializer.FillHeader(serializer.kind());
9723 clustered_isolate_size_ = serializer.bytes_written();
9724
9725 if (Snapshot::IncludesCode(kind_)) {
9726 isolate_image_writer_->SetProfileWriter(profile_writer_);
9727 isolate_image_writer_->Write(serializer.stream(), false);
9728#if defined(DART_PRECOMPILER)
9729 isolate_image_writer_->DumpStatistics();
9730#endif
9731
9732 mapped_data_size_ += isolate_image_writer_->data_size();
9733 mapped_text_size_ += isolate_image_writer_->text_size();
9734 isolate_image_writer_->ResetOffsets();
9735 isolate_image_writer_->ClearProfileWriter();
9736 }
9737
9738 // The clustered part + the direct mapped data part.
9739 isolate_snapshot_size_ = serializer.bytes_written();
9740}
9741
9745 if (vm_snapshot_data_ != nullptr) {
9746 objects = WriteVMSnapshot();
9747 } else {
9748 objects = nullptr;
9749 }
9750
9751 if (isolate_snapshot_data_ != nullptr) {
9752 WriteProgramSnapshot(objects, data);
9753 }
9754
9755 if (FLAG_print_snapshot_sizes) {
9756 OS::Print("VMIsolate(CodeSize): %" Pd "\n", clustered_vm_size_);
9757 OS::Print("Isolate(CodeSize): %" Pd "\n", clustered_isolate_size_);
9758 OS::Print("ReadOnlyData(CodeSize): %" Pd "\n", mapped_data_size_);
9759 OS::Print("Instructions(CodeSize): %" Pd "\n", mapped_text_size_);
9760 OS::Print("Total(CodeSize): %" Pd "\n",
9761 clustered_vm_size_ + clustered_isolate_size_ + mapped_data_size_ +
9762 mapped_text_size_);
9763 OS::Print("VMIsolate(HeapSize): %" Pd "\n", heap_vm_size_);
9764 OS::Print("Isolate(HeapSize): %" Pd "\n", heap_isolate_size_);
9765 OS::Print("Total(HeapSize): %" Pd "\n", heap_vm_size_ + heap_isolate_size_);
9766 }
9767
9768#if defined(DART_PRECOMPILER)
9769 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9770 profile_writer_->Write(FLAG_write_v8_snapshot_profile_to);
9771 }
9772#endif
9773}
9774#endif // defined(DART_PRECOMPILED_RUNTIME)
9775
9777 const uint8_t* instructions_buffer,
9778 Thread* thread)
9779 : kind_(snapshot->kind()),
9780 thread_(thread),
9781 buffer_(snapshot->Addr()),
9782 size_(snapshot->length()),
9783 data_image_(snapshot->DataImage()),
9784 instructions_image_(instructions_buffer) {}
9785
9787 const Snapshot* snapshot) {
9788 SnapshotHeaderReader header_reader(snapshot);
9789
9790 char* error = header_reader.VerifyVersion();
9791 if (error != nullptr) {
9792 return error;
9793 }
9794
9795 const char* features = nullptr;
9796 intptr_t features_length = 0;
9797 error = header_reader.ReadFeatures(&features, &features_length);
9798 if (error != nullptr) {
9799 return error;
9800 }
9801
9802 ASSERT(features[features_length] == '\0');
9803 const char* cursor = features;
9804 while (*cursor != '\0') {
9805 while (*cursor == ' ') {
9806 cursor++;
9807 }
9808
9809 const char* end = strstr(cursor, " ");
9810 if (end == nullptr) {
9811 end = features + features_length;
9812 }
9813
9814#define SET_FLAG(name) \
9815 if (strncmp(cursor, #name, end - cursor) == 0) { \
9816 FLAG_##name = true; \
9817 cursor = end; \
9818 continue; \
9819 } \
9820 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9821 FLAG_##name = false; \
9822 cursor = end; \
9823 continue; \
9824 }
9825
9826#define CHECK_FLAG(name, mode) \
9827 if (strncmp(cursor, #name, end - cursor) == 0) { \
9828 if (!FLAG_##name) { \
9829 return header_reader.BuildError("Flag " #name \
9830 " is true in snapshot, " \
9831 "but " #name \
9832 " is always false in " mode); \
9833 } \
9834 cursor = end; \
9835 continue; \
9836 } \
9837 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9838 if (FLAG_##name) { \
9839 return header_reader.BuildError("Flag " #name \
9840 " is false in snapshot, " \
9841 "but " #name \
9842 " is always true in " mode); \
9843 } \
9844 cursor = end; \
9845 continue; \
9846 }
9847
9848#define SET_P(name, T, DV, C) SET_FLAG(name)
9849
9850#if defined(PRODUCT)
9851#define SET_OR_CHECK_R(name, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9852#else
9853#define SET_OR_CHECK_R(name, PV, T, DV, C) SET_FLAG(name)
9854#endif
9855
9856#if defined(PRODUCT)
9857#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9858#elif defined(DART_PRECOMPILED_RUNTIME)
9859#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) \
9860 CHECK_FLAG(name, "the precompiled runtime")
9861#else
9862#define SET_OR_CHECK_C(name, PV, T, DV, C) SET_FLAG(name)
9863#endif
9864
9865#if !defined(DEBUG)
9866#define SET_OR_CHECK_D(name, T, DV, C) CHECK_FLAG(name, "non-debug mode")
9867#else
9868#define SET_OR_CHECK_D(name, T, DV, C) SET_FLAG(name)
9869#endif
9870
9872
9873#undef SET_OR_CHECK_D
9874#undef SET_OR_CHECK_C
9875#undef SET_OR_CHECK_R
9876#undef SET_P
9877#undef CHECK_FLAG
9878#undef SET_FLAG
9879
9880 cursor = end;
9881 }
9882
9883 return nullptr;
9884}
9885
9887 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9888
9889 intptr_t offset = 0;
9890 char* error = header_reader.VerifyVersionAndFeatures(
9891 /*isolate_group=*/nullptr, &offset);
9892 if (error != nullptr) {
9893 return ConvertToApiError(error);
9894 }
9895
9896 // Even though there's no concurrent threads we have to guard agains, some
9897 // logic we do in deserialization triggers common code that asserts the
9898 // program lock is held.
9899 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9900
9901 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
9902 instructions_image_, /*is_non_root_unit=*/false,
9903 offset);
9904 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9905 if (api_error != ApiError::null()) {
9906 return api_error;
9907 }
9908
9909 if (Snapshot::IncludesCode(kind_)) {
9910 ASSERT(data_image_ != nullptr);
9911 thread_->isolate_group()->SetupImagePage(data_image_,
9912 /* is_executable */ false);
9913 ASSERT(instructions_image_ != nullptr);
9914 thread_->isolate_group()->SetupImagePage(instructions_image_,
9915 /* is_executable */ true);
9916 }
9917
9919 deserializer.Deserialize(&roots);
9920
9921#if defined(DART_PRECOMPILED_RUNTIME)
9922 // Initialize entries in the VM portion of the BSS segment.
9924 Image image(instructions_image_);
9925 if (auto const bss = image.bss()) {
9926 BSS::Initialize(thread_, bss, /*vm=*/true);
9927 }
9928#endif // defined(DART_PRECOMPILED_RUNTIME)
9929
9930 return ApiError::null();
9931}
9932
9934 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9935 intptr_t offset = 0;
9936 char* error =
9937 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
9938 if (error != nullptr) {
9939 return ConvertToApiError(error);
9940 }
9941
9942 // Even though there's no concurrent threads we have to guard agains, some
9943 // logic we do in deserialization triggers common code that asserts the
9944 // program lock is held.
9945 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9946
9947 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
9948 instructions_image_, /*is_non_root_unit=*/false,
9949 offset);
9950 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9951 if (api_error != ApiError::null()) {
9952 return api_error;
9953 }
9954
9955 if (Snapshot::IncludesCode(kind_)) {
9956 ASSERT(data_image_ != nullptr);
9957 thread_->isolate_group()->SetupImagePage(data_image_,
9958 /* is_executable */ false);
9959 ASSERT(instructions_image_ != nullptr);
9960 thread_->isolate_group()->SetupImagePage(instructions_image_,
9961 /* is_executable */ true);
9962 }
9963
9965 deserializer.Deserialize(&roots);
9966
9967 if (Snapshot::IncludesCode(kind_)) {
9968 const auto& units = Array::Handle(
9969 thread_->isolate_group()->object_store()->loading_units());
9970 if (!units.IsNull()) {
9971 const auto& unit = LoadingUnit::Handle(
9973 // Unlike other units, we don't explicitly load the root loading unit,
9974 // so we mark it as loaded here, setting the instructions image as well.
9975 unit.set_load_outstanding();
9976 unit.set_instructions_image(instructions_image_);
9977 unit.set_loaded(true);
9978 }
9979 }
9980
9981 InitializeBSS();
9982
9983 return ApiError::null();
9984}
9985
9987 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9988 intptr_t offset = 0;
9989 char* error =
9990 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
9991 if (error != nullptr) {
9992 return ConvertToApiError(error);
9993 }
9994
9995 Deserializer deserializer(
9996 thread_, kind_, buffer_, size_, data_image_, instructions_image_,
9997 /*is_non_root_unit=*/unit.id() != LoadingUnit::kRootId, offset);
9998 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9999 if (api_error != ApiError::null()) {
10000 return api_error;
10001 }
10002 {
10003 Array& units =
10004 Array::Handle(isolate_group()->object_store()->loading_units());
10005 uint32_t main_program_hash = Smi::Value(Smi::RawCast(units.At(0)));
10006 uint32_t unit_program_hash = deserializer.Read<uint32_t>();
10007 if (main_program_hash != unit_program_hash) {
10008 return ApiError::New(String::Handle(
10009 String::New("Deferred loading unit is from a different "
10010 "program than the main loading unit")));
10011 }
10012 }
10013
10014 if (Snapshot::IncludesCode(kind_)) {
10015 ASSERT(data_image_ != nullptr);
10016 thread_->isolate_group()->SetupImagePage(data_image_,
10017 /* is_executable */ false);
10018 ASSERT(instructions_image_ != nullptr);
10019 thread_->isolate_group()->SetupImagePage(instructions_image_,
10020 /* is_executable */ true);
10021 unit.set_instructions_image(instructions_image_);
10022 }
10023
10024 UnitDeserializationRoots roots(unit);
10025 deserializer.Deserialize(&roots);
10026
10027 InitializeBSS();
10028
10029 return ApiError::null();
10030}
10031
10032void FullSnapshotReader::InitializeBSS() {
10033#if defined(DART_PRECOMPILED_RUNTIME)
10034 // Initialize entries in the isolate portion of the BSS segment.
10036 Image image(instructions_image_);
10037 if (auto const bss = image.bss()) {
10038 BSS::Initialize(thread_, bss, /*vm=*/false);
10039 }
10040#endif // defined(DART_PRECOMPILED_RUNTIME)
10041}
10042
10043} // namespace dart
AutoreleasePool pool
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
int count
SkPoint pos
static float prev(float f)
static size_t total_size(SkSBlockAllocator< N > &pool)
static bool skip(SkStream *stream, size_t amount)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
#define IG
#define SET_OR_CHECK_R(name, PV, T, DV, C)
#define SAVE_AND_RESET_ROOT(name, Type, init)
#define AutoTraceObject(obj)
#define PushFromTo(obj,...)
#define RESET_ROOT_LIST(V)
#define DECLARE_OBJECT_STORE_FIELD(Type, Name)
#define CID_CLUSTER(Type)
#define SET_P(name, T, DV, C)
#define ADD_CANONICAL_NEXT(cid)
#define CASE_FFI_CID(name)
#define WriteFromTo(obj,...)
#define SET_OR_CHECK_C(name, PV, T, DV, C)
#define AutoTraceObjectName(obj, str)
#define RESTORE_ROOT(name, Type, init)
#define SET_OR_CHECK_D(name, T, DV, C)
#define WriteCompressedField(obj, name)
#define ADD_NON_CANONICAL_NEXT(cid)
#define DECLARE_FIELD(name, Type, init)
#define WriteFieldValue(field, value)
#define WriteField(obj, field)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
#define CLASS_LIST_FFI_TYPE_MARKER(V)
Definition class_id.h:165
AbstractInstanceDeserializationCluster(const char *name, bool is_canonical, bool is_root_unit)
void UpdateTypeTestingStubEntryPoint() const
Definition object.h:9302
void InitializeTypeTestingStubNonAtomic(const Code &stub) const
Definition object.cc:21848
void ReadFill(Deserializer *d_) override
ArrayDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
ArraySerializationCluster(bool is_canonical, intptr_t cid)
static intptr_t InstanceSize()
Definition object.h:10910
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
Definition object.h:10933
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
Definition object.h:10797
ObjectPtr At(intptr_t index) const
Definition object.h:10854
intptr_t Length() const
Definition object.h:10808
void SetAt(intptr_t index, const Object &value) const
Definition object.h:10858
static void Initialize(Thread *current, uword *bss, bool vm)
Definition bss_relocs.cc:30
intptr_t Length() const
Definition hash_map.h:27
bool HasKey(typename KeyValueTrait::Key key) const
Definition hash_map.h:52
void Add(const T &value)
const T & At(intptr_t index) const
void Sort(int compare(const T *, const T *))
intptr_t length() const
char * buffer() const
Definition text_buffer.h:35
void WriteBytes(const void *addr, intptr_t len)
Definition datastream.h:424
void WriteFixed(T value)
Definition datastream.h:473
void WriteWordWith32BitWrites(uword value)
Definition datastream.h:389
intptr_t Align(intptr_t alignment, intptr_t offset=0)
Definition datastream.h:341
void WriteUnsigned(T value)
Definition datastream.h:400
DART_FORCE_INLINE intptr_t bytes_written() const
Definition datastream.h:338
virtual intptr_t Position() const
Definition datastream.h:339
void WriteRefId(intptr_t value)
Definition datastream.h:409
static constexpr bool decode(uint16_t value)
Definition bitfield.h:173
static constexpr uword update(ClassIdTagType value, uword original)
Definition bitfield.h:190
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void SetupNativeResolver()
void BuildCanonicalSetFromLayout(Deserializer *d)
CanonicalSetDeserializationCluster(bool is_canonical, bool is_root_unit, const char *name)
void VerifyCanonicalSet(Deserializer *d, const Array &refs, const typename SetType::ArrayHandle &current_table)
CanonicalSetSerializationCluster(intptr_t cid, bool is_canonical, bool represents_canonical_set, const char *name, intptr_t target_instance_size=0)
GrowableArray< PointerType > objects_
virtual bool IsInCanonicalSet(Serializer *s, PointerType ptr)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
ClassSerializationCluster(intptr_t num_cids)
ClassPtr At(intptr_t cid) const
intptr_t NumTopLevelCids() const
intptr_t NumCids() const
static bool IsTopLevelCid(intptr_t cid)
static int32_t target_next_field_offset_in_words(const ClassPtr cls)
Definition object.h:1961
static intptr_t InstanceSize()
Definition object.h:1687
static int32_t target_type_arguments_field_offset_in_words(const ClassPtr cls)
Definition object.h:1973
static int32_t target_instance_size_in_words(const ClassPtr cls)
Definition object.h:1949
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:4294
void ReadFill(Deserializer *d_) override
ClosureDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
ClosureSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:12357
void ReadFill(Deserializer *d, intptr_t start_index, intptr_t stop_index, bool deferred)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAllocOneCode(Deserializer *d)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d) override
static bool AreActive()
static void Sort(Serializer *s, GrowableArray< CodePtr > *codes)
static void Sort(Serializer *s, GrowableArray< Code * > *codes)
void WriteFill(Serializer *s, Snapshot::Kind kind, CodePtr code, bool deferred)
void WriteAlloc(Serializer *s, CodePtr code)
static const char * MakeDisambiguatedCodeName(Serializer *s, CodePtr c)
static void Insert(Serializer *s, GrowableArray< CodeOrderInfo > *order_list, IntMap< intptr_t > *order_map, CodePtr code)
void Trace(Serializer *s, ObjectPtr object)
GrowableArray< CodePtr > * objects()
void TracePool(Serializer *s, ObjectPoolPtr pool, bool only_call_targets)
static int CompareCodeOrderInfo(CodeOrderInfo const *a, CodeOrderInfo const *b)
GrowableArray< CodePtr > * deferred_objects()
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:6202
static uword EntryPointOf(const CodePtr code)
Definition object.h:6838
static intptr_t InstanceSize()
Definition object.h:7134
@ kPcRelativeCall
Definition object.h:6942
@ kPcRelativeTTSCall
Definition object.h:6943
@ kCallViaCode
Definition object.h:6945
@ kPcRelativeTailCall
Definition object.h:6944
static InstructionsPtr InstructionsOf(const CodePtr code)
Definition object.h:6748
bool IsDisabled() const
Definition object.h:7228
static uword PayloadStartOf(const CodePtr code)
Definition object.h:6824
bool HasMonomorphicEntry() const
Definition object.h:6812
static bool IsDiscarded(const CodePtr code)
Definition object.h:6807
static void NotifyCodeObservers(const Code &code, bool optimized)
Definition object.cc:18191
@ kSCallTableCodeOrTypeTarget
Definition object.h:6955
@ kSCallTableKindAndOffset
Definition object.h:6954
bool IsUnknownDartCode() const
Definition object.h:7216
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:6272
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:7506
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:7419
static IsolateGroup * vm_isolate_group()
Definition dart.h:69
static Isolate * vm_isolate()
Definition dart.h:68
static char * FeaturesString(IsolateGroup *isolate_group, bool is_vm_snapshot, Snapshot::Kind kind)
Definition dart.cc:998
void Trace(Serializer *s, ObjectPtr object)
virtual void ReadFill(Deserializer *deserializer)=0
DeserializationCluster(const char *name, bool is_canonical=false, bool is_immutable=false)
virtual void PostLoad(Deserializer *deserializer, const Array &refs)
void ReadAllocFixedSize(Deserializer *deserializer, intptr_t instance_size)
virtual void ReadAlloc(Deserializer *deserializer)=0
const char * name() const
virtual void ReadRoots(Deserializer *deserializer)=0
virtual void AddBaseObjects(Deserializer *deserializer)=0
virtual void PostLoad(Deserializer *deserializer, const Array &refs)=0
void ReadFromTo(T obj, P &&... params)
ObjectPtr Ref(intptr_t index) const
TokenPosition ReadTokenPosition()
uint64_t ReadUnsigned64()
ObjectPtr Allocate(intptr_t size)
void ReadInstructions(CodePtr code, bool deferred)
intptr_t ReadUnsigned()
void set_code_start_index(intptr_t value)
void AssignRef(ObjectPtr object)
ApiErrorPtr VerifyImageAlignment()
bool is_non_root_unit() const
const InstructionsTable & instructions_table() const
intptr_t next_index() const
const uint8_t * AddressOfCurrentPosition() const
Zone * zone() const
static void InitializeHeader(ObjectPtr raw, intptr_t cid, intptr_t size, bool is_canonical=false)
void AddBaseObject(ObjectPtr base_object)
intptr_t num_base_objects() const
ObjectPtr Ref(intptr_t index) const
void Advance(intptr_t value)
CodePtr GetCodeByIndex(intptr_t code_index, uword *entry_point) const
uword GetEntryPointByCodeIndex(intptr_t code_index) const
intptr_t position() const
TokenPosition ReadTokenPosition()
ObjectPtr GetObjectAt(uint32_t offset) const
intptr_t code_start_index() const
Heap * heap() const
static intptr_t CodeIndexToClusterIndex(const InstructionsTable &table, intptr_t code_index)
Snapshot::Kind kind() const
DeserializationCluster * ReadCluster()
uword ReadWordWith32BitReads()
void Align(intptr_t alignment, intptr_t offset=0)
void ReadBytes(uint8_t *addr, intptr_t len)
Deserializer(Thread *thread, Snapshot::Kind kind, const uint8_t *buffer, intptr_t size, const uint8_t *data_buffer, const uint8_t *instructions_buffer, bool is_non_root_unit, intptr_t offset=0)
intptr_t code_stop_index() const
void set_position(intptr_t p)
void Deserialize(DeserializationRoots *roots)
void set_code_stop_index(intptr_t value)
static void DisassembleStub(const char *name, const Code &code)
static void DisassembleCode(const Function &function, const Code &code, bool optimized)
void ReadFill(Deserializer *d_) override
DoubleDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
DoubleSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:10114
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:6579
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:11714
static constexpr int kDataSerializationAlignment
Definition object.h:11708
FakeSerializationCluster(const char *name, intptr_t num_objects, intptr_t size, intptr_t target_memory_size=0)
void Trace(Serializer *s, ObjectPtr object)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:4354
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
void SetAt(intptr_t index, ObjectPtr raw_instance, bool concurrent_use=false)
Definition field_table.h:75
ObjectPtr At(intptr_t index, bool concurrent_use=false) const
Definition field_table.h:61
void AllocateIndex(intptr_t index)
intptr_t NumFieldIds() const
Definition field_table.h:39
void set_is_nullable_unsafe(bool val) const
Definition object.h:4735
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
void InitializeGuardedListLengthInObjectOffset(bool unsafe=false) const
Definition object.cc:12588
static intptr_t InstanceSize()
Definition object.h:4531
void set_guarded_list_length_in_object_offset_unsafe(intptr_t offset) const
Definition object.cc:12165
void set_guarded_cid_unsafe(intptr_t cid) const
Definition object.h:4638
static intptr_t TargetOffsetOf(FieldPtr field)
Definition object.h:13229
void set_guarded_list_length_unsafe(intptr_t list_length) const
Definition object.cc:12156
void set_static_type_exactness_state_unsafe(StaticTypeExactnessState state) const
Definition object.h:4618
static intptr_t value_offset()
Definition object.h:11171
static intptr_t InstanceSize()
Definition object.h:11167
static intptr_t InstanceSize()
Definition object.h:11236
static intptr_t value_offset()
Definition object.h:11240
ApiErrorPtr ReadUnitSnapshot(const LoadingUnit &unit)
ApiErrorPtr ReadProgramSnapshot()
FullSnapshotReader(const Snapshot *snapshot, const uint8_t *instructions_buffer, Thread *thread)
FullSnapshotWriter(Snapshot::Kind kind, NonStreamingWriteStream *vm_snapshot_data, NonStreamingWriteStream *isolate_snapshot_data, ImageWriter *vm_image_writer, ImageWriter *iso_image_writer)
void WriteFullSnapshot(GrowableArray< LoadingUnitSerializationData * > *data=nullptr)
void WriteUnitSnapshot(GrowableArray< LoadingUnitSerializationData * > *units, LoadingUnitSerializationData *unit, uint32_t program_hash)
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static const char * MakeDisambiguatedFunctionName(Serializer *s, FunctionPtr f)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
FunctionTypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void PostLoad(Deserializer *d, const Array &refs) override
void Trace(Serializer *s, ObjectPtr object)
FunctionTypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
static intptr_t InstanceSize()
Definition object.h:9756
CodePtr CurrentCode() const
Definition object.h:3157
static intptr_t InstanceSize()
Definition object.h:3965
bool HasCode() const
Definition object.cc:7994
void ClearCodeSafe() const
Definition object.cc:8016
void PrintName(const NameFormattingParams &params, BaseTextBuffer *printer) const
Definition object.cc:11167
void SetInstructionsSafe(const Code &value) const
Definition object.cc:7978
void Trace(Serializer *s, ObjectPtr object)
void Add(const Object &value, Heap::Space space=Heap::kNew) const
Definition object.cc:25070
static GrowableObjectArrayPtr New(Heap::Space space=Heap::kNew)
Definition object.h:11118
static intptr_t InstanceSize()
Definition object.h:11114
intptr_t Length() const
Definition object.h:11046
ObjectPtr At(intptr_t index) const
Definition object.h:11059
static constexpr double kMaxLoadFactor
Definition hash_table.h:617
HeapLocker(Thread *thread, PageSpace *page_space)
@ kOld
Definition heap.h:39
intptr_t GetLoadingUnit(ObjectPtr raw_obj) const
Definition heap.h:207
PageSpace * old_space()
Definition heap.h:63
void ResetObjectIdTable()
Definition heap.cc:888
bool Verify(const char *msg, MarkExpectation mark_expectation=kForbidMarked)
Definition heap.cc:760
intptr_t GetObjectId(ObjectPtr raw_obj) const
Definition heap.h:197
void SetObjectId(ObjectPtr raw_obj, intptr_t object_id)
Definition heap.h:193
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
@ kCachedICDataArrayCount
Definition object.h:2756
static intptr_t InstanceSize()
Definition object.h:2556
ObjectPtr GetObjectAt(uint32_t offset) const
InstructionsPtr GetInstructionsAt(uint32_t offset) const
ApiErrorPtr VerifyAlignment() const
intptr_t GetTextObjectCount() const
void Write(NonStreamingWriteStream *clustered_stream, bool vm)
void SetProfileWriter(V8SnapshotProfileWriter *profile_writer)
static const char * TagObjectTypeAsReadOnly(Zone *zone, const char *type)
intptr_t text_size() const
void GetTrampolineInfo(intptr_t *count, intptr_t *size) const
int32_t GetTextOffsetFor(InstructionsPtr instructions, CodePtr code)
uint32_t GetDataOffsetFor(ObjectPtr raw_object)
intptr_t data_size() const
void PrepareForSerialization(GrowableArray< ImageWriterCommand > *commands)
uint32_t AddBytesToData(uint8_t *bytes, intptr_t length)
void ReadAlloc(Deserializer *d) override
InstanceDeserializationCluster(intptr_t cid, bool is_canonical, bool is_immutable, bool is_root_unit)
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
InstanceSerializationCluster(bool is_canonical, intptr_t cid)
static intptr_t NextFieldOffset()
Definition object.h:8326
static InstructionsTablePtr New(intptr_t length, uword start_pc, uword end_pc, uword rodata)
Definition object.cc:15565
void SetCodeAt(intptr_t index, CodePtr code) const
Definition object.cc:15586
const UntaggedInstructionsTable::Data * rodata() const
Definition object.h:5962
uword EntryPointAt(intptr_t index) const
Definition object.cc:15688
static intptr_t InstanceSize()
Definition object.h:11205
static intptr_t value_offset()
Definition object.h:11209
V Lookup(const Key &key) const
Definition hash_map.h:548
void Insert(const Key &key, const Value &value)
Definition hash_map.h:543
Heap * heap() const
Definition isolate.h:295
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
ClassTable * class_table() const
Definition isolate.h:491
void SetupImagePage(const uint8_t *snapshot_buffer, bool is_executable)
Definition isolate.cc:1917
IsolateGroup * group() const
Definition isolate.h:990
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:5458
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:8058
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:8434
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:5090
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
void set_objects(ZoneGrowableArray< Object * > *objects)
ZoneGrowableArray< Object * > * objects()
LoadingUnitSerializationData * parent() const
GrowableArray< Code * > * deferred_objects()
intptr_t id() const
Definition object.h:7956
LoadingUnitPtr parent() const
Definition object.h:7951
static intptr_t InstanceSize()
Definition object.h:7944
static constexpr intptr_t kRootId
Definition object.h:7940
void set_base_objects(const Array &value) const
Definition object.cc:19750
void set_instructions_image(const uint8_t *value) const
Definition object.h:8003
uint8_t * Steal(intptr_t *length)
Definition datastream.h:633
void ReadAlloc(Deserializer *d) override
MapDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
MapSerializationCluster(bool is_canonical, intptr_t cid)
void Trace(Serializer *s, ObjectPtr object)
void WriteAlloc(Serializer *s)
void WriteFill(Serializer *s)
static intptr_t InstanceSize()
Definition object.h:12085
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:7605
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
MintDeserializationCluster(bool is_canonical, bool is_root_unit)
MintSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:10069
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:5424
static uword LinkNativeCallEntry()
DART_FORCE_INLINE void SetPosition(intptr_t value)
Definition datastream.h:618
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static void Print(const char *format,...) PRINTF_ATTRIBUTE(1
static DART_NORETURN void Abort()
static char * SCreate(Zone *zone, const char *format,...) PRINTF_ATTRIBUTE(2
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static uint8_t EncodeBits(EntryType type, Patchability patchable, SnapshotBehavior snapshot_behavior)
Definition object.h:5582
static intptr_t InstanceSize()
Definition object.h:5620
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
intptr_t GetClassIdMayBeSmi() const
@ kInternalName
Definition object.h:622
static ObjectPtr null()
Definition object.h:433
ObjectPtr ptr() const
Definition object.h:332
static Object * ReadOnlyHandle()
Definition object.h:431
static void set_vm_isolate_snapshot_object_table(const Array &table)
Definition object.cc:1601
static void FinalizeReadOnlyObject(ObjectPtr object)
Definition object.cc:1556
virtual const char * ToCString() const
Definition object.h:366
static constexpr intptr_t RoundedAllocationSize(intptr_t size)
Definition object.h:758
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static ObjectPtr RawCast(ObjectPtr obj)
Definition object.h:325
static Object & ZoneHandle()
Definition object.h:419
const char * FieldNameForOffset(intptr_t cid, intptr_t offset)
static intptr_t InstanceSize()
Definition object.h:10543
void AcquireLock(FreeList *freelist)
Definition pages.cc:426
void ReleaseLock(FreeList *freelist)
Definition pages.cc:430
DART_FORCE_INLINE uword AllocateSnapshotLocked(FreeList *freelist, intptr_t size)
Definition pages.h:161
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:2271
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:6060
ProgramDeserializationRoots(ObjectStore *object_store)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadRoots(Deserializer *d) override
void AddBaseObjects(Deserializer *d) override
void AddBaseObjects(Serializer *s)
ProgramSerializationRoots(ZoneGrowableArray< Object * > *base_objects, ObjectStore *object_store, Snapshot::Kind snapshot_kind)
virtual const CompressedStackMaps & canonicalized_stack_map_entries() const
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
RODataDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void PostLoad(Deserializer *d, const Array &refs) override
RODataSerializationCluster(Zone *zone, const char *type, intptr_t cid, bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
uword ReadWordWith32BitReads()
Definition datastream.h:157
void Align(intptr_t alignment, intptr_t offset=0)
Definition datastream.h:133
intptr_t ReadRefId()
Definition datastream.h:103
intptr_t Position() const
Definition datastream.h:127
intptr_t PendingBytes() const
Definition datastream.h:147
const uint8_t * AddressOfCurrentPosition() const
Definition datastream.h:140
void Advance(intptr_t value)
Definition datastream.h:142
void SetPosition(intptr_t value)
Definition datastream.h:128
void ReadBytes(void *addr, intptr_t len)
Definition datastream.h:90
void ReadAlloc(Deserializer *d) override
RecordDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
RecordSerializationCluster(bool is_canonical)
void Trace(Serializer *s, ObjectPtr object)
intptr_t AsInt() const
Definition object.h:11296
intptr_t num_fields() const
Definition object.h:11288
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
RecordTypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void Trace(Serializer *s, ObjectPtr object)
RecordTypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
static intptr_t InstanceSize()
Definition object.h:11376
static intptr_t InstanceSize()
Definition object.h:11434
static intptr_t NumFields(RecordPtr ptr)
Definition object.h:11400
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:12875
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t line_starts_offset()
Definition object.h:4935
static intptr_t InstanceSize()
Definition object.h:4974
const char * name() const
void WriteAndMeasureAlloc(Serializer *serializer)
void WriteAndMeasureFill(Serializer *serializer)
static constexpr intptr_t kSizeVaries
intptr_t target_memory_size() const
const intptr_t target_instance_size_
intptr_t num_objects() const
virtual void Trace(Serializer *serializer, ObjectPtr object)=0
SerializationCluster(const char *name, intptr_t cid, intptr_t target_instance_size=kSizeVaries, bool is_canonical=false)
virtual void WriteAlloc(Serializer *serializer)=0
virtual void WriteFill(Serializer *serializer)=0
virtual void AddBaseObjects(Serializer *serializer)=0
virtual const CompressedStackMaps & canonicalized_stack_map_entries() const
virtual void WriteRoots(Serializer *serializer)=0
virtual void PushRoots(Serializer *serializer)=0
WritingObjectScope(Serializer *serializer, const char *type, ObjectPtr object, const char *name)
WritingObjectScope(Serializer *serializer, const char *type, ObjectPtr object, StringPtr name)
WritingObjectScope(Serializer *serializer, ObjectPtr object)
intptr_t current_loading_unit_id() const
void WriteCid(intptr_t cid)
void WritePropertyRef(ObjectPtr object, const char *property)
void WriteWordWith32BitWrites(uword value)
NonStreamingWriteStream * stream()
void DumpCombinedCodeStatistics()
DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to)
void TraceDataOffset(uint32_t offset)
void AddBaseObject(ObjectPtr base_object, const char *type=nullptr, const char *name=nullptr)
void WriteVersionAndFeatures(bool is_vm_snapshot)
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj)
Zone * zone() const
void Write(T value)
void WriteRootRef(ObjectPtr object, const char *name=nullptr)
Serializer(Thread *thread, Snapshot::Kind kind, NonStreamingWriteStream *stream, ImageWriter *image_writer_, bool vm_, V8SnapshotProfileWriter *profile_writer=nullptr)
GrowableArray< LoadingUnitSerializationData * > * loading_units() const
bool HasArtificialRef(ObjectPtr object) const
void set_loading_units(GrowableArray< LoadingUnitSerializationData * > *units)
void set_current_loading_unit_id(intptr_t id)
bool HasProfileNode(ObjectPtr object) const
Heap * heap() const
void WriteFromTo(T obj, P &&... args)
void WriteElementRef(ObjectPtr object, intptr_t index)
void FillHeader(Snapshot::Kind kind)
uint32_t GetDataOffset(ObjectPtr object) const
void AttributeReference(ObjectPtr object, const V8SnapshotProfileWriter::Reference &reference)
bool HasRef(ObjectPtr object) const
bool IsWritten(ObjectPtr object) const
intptr_t AssignArtificialRef(ObjectPtr object=nullptr)
DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to)
void PushWeak(ObjectPtr object)
Snapshot::Kind kind() const
intptr_t RefId(ObjectPtr object) const
intptr_t GetCodeIndex(CodePtr code)
void PushFromTo(T obj, P &&... args)
SerializationCluster * NewClusterForClass(intptr_t cid, bool is_canonical)
bool IsReachable(ObjectPtr object) const
void Trace(ObjectPtr object, intptr_t cid_override)
ZoneGrowableArray< Object * > * Serialize(SerializationRoots *roots)
intptr_t bytes_written()
void RecordDeferredCode(CodePtr ptr)
void WriteBytes(const void *addr, intptr_t len)
void WriteOffsetRef(ObjectPtr object, intptr_t offset)
intptr_t UnsafeRefId(ObjectPtr object) const
void AttributePropertyRef(ObjectPtr object, const char *property)
void WriteUnsigned(intptr_t value)
intptr_t AssignRef(ObjectPtr object)
void WriteTokenPosition(TokenPosition pos)
void Align(intptr_t alignment, intptr_t offset=0)
void PrepareInstructions(const CompressedStackMaps &canonical_smap)
V8SnapshotProfileWriter::ObjectId GetProfileId(ObjectPtr object) const
void WriteDispatchTable(const Array &entries)
V8SnapshotProfileWriter * profile_writer() const
intptr_t GetDataSize() const
void UnexpectedObject(ObjectPtr object, const char *message)
bool CreateArtificialNodeIfNeeded(ObjectPtr obj)
intptr_t bytes_heap_allocated()
void Push(ObjectPtr object, intptr_t cid_override=kIllegalCid)
void WriteRefId(intptr_t value)
void WriteInstructions(InstructionsPtr instr, uint32_t unchecked_offset, CodePtr code, bool deferred)
intptr_t next_ref_index() const
void AttributeElementRef(ObjectPtr object, intptr_t index)
void WriteUnsigned64(uint64_t value)
void ReadAlloc(Deserializer *d) override
SetDeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
SetSerializationCluster(bool is_canonical, intptr_t cid)
void Trace(Serializer *s, ObjectPtr object)
void WriteAlloc(Serializer *s)
void WriteFill(Serializer *s)
static intptr_t InstanceSize()
Definition object.h:12188
void ReadFill(Deserializer *d_) override
Simd128DeserializationCluster(intptr_t cid, bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
Simd128SerializationCluster(intptr_t cid, bool is_canonical)
static SmiPtr New(intptr_t value)
Definition object.h:9985
intptr_t Value() const
Definition object.h:9969
static bool IsValid(int64_t value)
Definition object.h:10005
char * VerifyVersionAndFeatures(IsolateGroup *isolate_group, intptr_t *offset)
static char * InitializeGlobalVMFlagsFromSnapshot(const Snapshot *snapshot)
static bool IsFull(Kind kind)
Definition snapshot.h:63
void set_magic()
Definition snapshot.h:49
static const char * KindToCString(Kind kind)
Definition snapshot.cc:12
static bool IncludesStringsInROData(Kind kind)
Definition snapshot.h:71
static bool IncludesCode(Kind kind)
Definition snapshot.h:67
static constexpr intptr_t kHeaderSize
Definition snapshot.h:43
ThreadState * thread() const
Definition allocation.h:33
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:12565
static StaticTypeExactnessState NotTracking()
static intptr_t DecodeLengthAndCid(intptr_t encoded, intptr_t *out_cid)
StringDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
static intptr_t InstanceSize(intptr_t length, intptr_t cid)
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
void Add(uint16_t code_unit)
Definition object.h:10480
intptr_t Finalize()
Definition object.h:10496
StringSerializationCluster(bool is_canonical, bool represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t EncodeLengthAndCid(intptr_t length, intptr_t cid)
static StringPtr New(const char *cstr, Heap::Space space=Heap::kNew)
Definition object.cc:23777
static uint32_t SetCachedHash(StringPtr obj, uint32_t hash)
Definition object.h:10433
static const Code & EntryAt(intptr_t index)
Definition stub_code.h:101
static const char * NameAt(intptr_t index)
Definition stub_code.h:99
static void InitializationDone()
Definition stub_code.h:44
static intptr_t NumEntries()
Definition stub_code.h:107
static void EntryAtPut(intptr_t index, Code *entry)
Definition stub_code.h:102
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:7778
static void InitFromSnapshot(IsolateGroup *isolate_group)
Definition symbols.cc:127
static StringPtr New(Thread *thread, const char *cstr)
Definition symbols.h:722
IsolateGroup * isolate_group() const
void DecrementNoSafepointScopeDepth()
Definition thread.h:720
static Thread * Current()
Definition thread.h:361
IsolateGroup * isolate_group() const
Definition thread.h:540
static TokenPosition Deserialize(int32_t value)
static intptr_t InstanceSize()
Definition object.h:10683
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
TypeArgumentsDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadAlloc(Deserializer *d) override
TypeArgumentsSerializationCluster(bool is_canonical, bool represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:8962
TypeArgumentsPtr Canonicalize(Thread *thread) const
Definition object.cc:7761
void ReadAlloc(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadFill(Deserializer *d_) override
TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void ReadAlloc(Deserializer *d) override
TypeParameterDeserializationCluster(bool is_canonical, bool is_root_unit)
TypeParameterSerializationCluster(bool is_canonical, bool cluster_represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:9856
virtual AbstractTypePtr Canonicalize(Thread *thread) const
Definition object.cc:22932
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:8511
virtual bool IsInCanonicalSet(Serializer *s, TypePtr type)
TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
void Trace(Serializer *s, ObjectPtr object)
static CodePtr DefaultCodeForType(const AbstractType &type, bool lazy_specialize=true)
bool IsDeclarationTypeOf(const Class &cls) const
Definition object.cc:22243
static intptr_t InstanceSize()
Definition object.h:9402
intptr_t ElementSizeInBytes() const
Definition object.h:11505
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void PostLoad(Deserializer *d, const Array &refs) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:11767
static intptr_t InstanceSize()
Definition object.h:11647
static DART_FORCE_INLINE constexpr intptr_t Length()
Definition class_table.h:67
DART_FORCE_INLINE bool Get(intptr_t position) const
Definition class_table.h:51
DART_FORCE_INLINE void Reset()
Definition class_table.h:65
DART_FORCE_INLINE void Set(intptr_t position)
Definition class_table.h:55
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:8127
void ReadRoots(Deserializer *d) override
void AddBaseObjects(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
UnitDeserializationRoots(const LoadingUnit &unit)
void PushRoots(Serializer *s)
UnitSerializationRoots(LoadingUnitSerializationData *unit)
void WriteRoots(Serializer *s)
void AddBaseObjects(Serializer *s)
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:2389
static constexpr uword update(intptr_t size, uword tag)
Definition raw_object.h:209
static ObjectPtr FromAddr(uword addr)
Definition raw_object.h:495
bool InVMIsolateHeap() const
Definition raw_object.cc:20
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static int SNPrint(char *str, size_t size, const char *format,...) PRINTF_ATTRIBUTE(3
static char * StrDup(const char *s)
static intptr_t StrNLen(const char *s, intptr_t n)
static bool IsUint(intptr_t N, T value)
Definition utils.h:313
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static char * StrNDup(const char *s, intptr_t n)
static const ObjectId kArtificialRootId
void AttributeReferenceTo(const ObjectId &from_object_id, const Reference &reference, const ObjectId &to_object_id)
void SetObjectTypeAndName(const ObjectId &object_id, const char *type, const char *name)
void AddRoot(const ObjectId &object_id, const char *name=nullptr)
void AttributeBytesTo(const ObjectId &object_id, size_t num_bytes)
bool HasId(const ObjectId &object_id)
void ReadRoots(Deserializer *d) override
void PostLoad(Deserializer *d, const Array &refs) override
void AddBaseObjects(Deserializer *d) override
void WriteRoots(Serializer *s)
void PushRoots(Serializer *s)
void AddBaseObjects(Serializer *s)
VMSerializationRoots(const WeakArray &symbols, bool should_write_symbols)
static const char * SnapshotString()
Definition version_in.cc:15
static void DontNeed(void *address, intptr_t size)
void ReadFill(Deserializer *d_) override
void ReadAlloc(Deserializer *d) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t InstanceSize()
Definition object.h:6715
intptr_t Length() const
Definition object.h:6670
ObjectPtr At(intptr_t index) const
Definition object.h:6695
void ReadAlloc(Deserializer *d) override
void ReadFill(Deserializer *d_) override
void Trace(Serializer *s, ObjectPtr object)
static intptr_t key_offset()
Definition object.h:12896
static intptr_t value_offset()
Definition object.h:12900
static intptr_t InstanceSize()
Definition object.h:12906
static constexpr intptr_t kNoValue
Definition weak_table.h:18
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition zone.cc:313
#define THR_Print(format,...)
Definition log.h:20
const EmbeddedViewParams * params
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
VkInstance instance
Definition main.cc:48
sk_sp< SkImage > image
Definition examples.cpp:29
static bool b
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
if(end==-1)
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const uint8_t buffer[]
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
GAsyncResult * result
uint32_t * target
#define VM_GLOBAL_FLAG_LIST(P, R, C, D)
Definition flag_list.h:58
const char * charp
Definition flags.h:12
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
size_t length
Win32Message message
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
static const char *const kObjectStoreFieldNames[]
static constexpr bool IsReachableReference(intptr_t ref)
const char *const name
static constexpr intptr_t kCompressedWordSizeLog2
Definition globals.h:43
static constexpr intptr_t kUnreachableReference
DART_EXPORT bool IsNull(Dart_Handle object)
int32_t classid_t
Definition globals.h:524
@ kIllegalCid
Definition class_id.h:214
@ kNumPredefinedCids
Definition class_id.h:257
@ kNativePointer
Definition class_id.h:218
@ kVoidCid
Definition class_id.h:254
@ kDynamicCid
Definition class_id.h:253
static constexpr intptr_t kUnallocatedReference
static constexpr bool IsArtificialReference(intptr_t ref)
static constexpr bool IsAllocatedReference(intptr_t ref)
constexpr intptr_t KB
Definition globals.h:528
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
uintptr_t compressed_uword
Definition globals.h:44
static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(Serializer *s, intptr_t class_id)
bool ShouldHaveImmutabilityBitSetCid(intptr_t predefined_cid)
Definition class_id.h:507
static void USE(T &&)
Definition globals.h:618
constexpr intptr_t kFirstInternalOnlyCid
Definition class_id.h:288
bool IsInternalVMdefinedClassId(intptr_t index)
Definition class_id.h:549
const intptr_t cid
static constexpr intptr_t kCompressedWordSize
Definition globals.h:42
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static constexpr intptr_t kFirstReference
static DART_FORCE_INLINE CodePtr GetCodeAndEntryPointByIndex(const Deserializer *d, intptr_t code_index, uword *entry_point)
constexpr intptr_t kWordSize
Definition globals.h:509
static constexpr intptr_t kObjectAlignment
ArrayOfTuplesView< Code::SCallTableEntry, std::tuple< Smi, Object, Function > > StaticCallsTable
Definition object.h:13520
static int CompareClusters(SerializationCluster *const *a, SerializationCluster *const *b)
static int8_t data[kExtLength]
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
constexpr intptr_t kLastInternalOnlyCid
Definition class_id.h:289
bool IsStringClassId(intptr_t index)
Definition class_id.h:350
dst
Definition cp.py:12
#define OBJECT_STORE_FIELD_LIST(R_, RW, ARW_RELAXED, ARW_AR, LAZY_CORE, LAZY_ASYNC, LAZY_ISOLATE, LAZY_INTERNAL, LAZY_FFI)
#define Pp
Definition globals.h:425
#define Px
Definition globals.h:410
#define Pd64
Definition globals.h:416
#define Pd
Definition globals.h:408
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define T
#define REUSABLE_FUNCTION_HANDLESCOPE(thread)
#define REUSABLE_OBJECT_HANDLESCOPE(thread)
#define REUSABLE_CODE_HANDLESCOPE(thread)
static const char header[]
Definition skpbench.cpp:88
Point offset
static NameFormattingParams DisambiguatedUnqualified(Object::NameVisibility visibility)
Definition object.h:2955
static NameFormattingParams DisambiguatedWithoutClassName(Object::NameVisibility visibility)
Definition object.h:2948
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static Reference Element(intptr_t offset)
static Reference Property(const char *name)
const uintptr_t id
#define TIMELINE_DURATION(thread, stream, name)
Definition timeline.h:39
#define NOT_IN_PRECOMPILED(code)
Definition globals.h:100