Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Namespaces | Macros | Functions | Variables
app_snapshot.cc File Reference
#include <memory>
#include <utility>
#include "vm/app_snapshot.h"
#include "platform/assert.h"
#include "vm/bootstrap.h"
#include "vm/bss_relocs.h"
#include "vm/canonical_tables.h"
#include "vm/class_id.h"
#include "vm/code_observers.h"
#include "vm/compiler/api/print_filter.h"
#include "vm/compiler/assembler/disassembler.h"
#include "vm/dart.h"
#include "vm/dart_entry.h"
#include "vm/dispatch_table.h"
#include "vm/flag_list.h"
#include "vm/growable_array.h"
#include "vm/heap/heap.h"
#include "vm/image_snapshot.h"
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/object_store.h"
#include "vm/program_visitor.h"
#include "vm/raw_object_fields.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/timeline.h"
#include "vm/v8_snapshot_writer.h"
#include "vm/version.h"
#include "vm/zone_text_buffer.h"
#include "vm/compiler/backend/code_statistics.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/relocation.h"

Go to the source code of this file.

Classes

class  dart::SerializationCluster
 
class  dart::DeserializationCluster
 
class  dart::SerializationRoots
 
class  dart::DeserializationRoots
 
class  dart::Serializer
 
class  dart::Serializer::WritingObjectScope
 
class  dart::Deserializer
 
class  dart::Deserializer::Local
 
class  dart::ClassSerializationCluster
 
class  dart::ClassDeserializationCluster
 
class  dart::CanonicalSetSerializationCluster< SetType, HandleType, PointerType, kAllCanonicalObjectsAreIncludedIntoSet >
 
class  dart::CanonicalSetDeserializationCluster< SetType, kAllCanonicalObjectsAreIncludedIntoSet >
 
class  dart::TypeParametersSerializationCluster
 
class  dart::TypeParametersDeserializationCluster
 
class  dart::TypeArgumentsSerializationCluster
 
class  dart::TypeArgumentsDeserializationCluster
 
class  dart::PatchClassSerializationCluster
 
class  dart::PatchClassDeserializationCluster
 
class  dart::FunctionSerializationCluster
 
class  dart::FunctionDeserializationCluster
 
class  dart::ClosureDataSerializationCluster
 
class  dart::ClosureDataDeserializationCluster
 
class  dart::FfiTrampolineDataSerializationCluster
 
class  dart::FfiTrampolineDataDeserializationCluster
 
class  dart::FieldSerializationCluster
 
class  dart::FieldDeserializationCluster
 
class  dart::ScriptSerializationCluster
 
class  dart::ScriptDeserializationCluster
 
class  dart::LibrarySerializationCluster
 
class  dart::LibraryDeserializationCluster
 
class  dart::NamespaceSerializationCluster
 
class  dart::NamespaceDeserializationCluster
 
class  dart::KernelProgramInfoSerializationCluster
 
class  dart::KernelProgramInfoDeserializationCluster
 
class  dart::CodeSerializationCluster
 
struct  dart::CodeSerializationCluster::CodeOrderInfo
 
class  dart::CodeDeserializationCluster
 
class  dart::ObjectPoolSerializationCluster
 
class  dart::ObjectPoolDeserializationCluster
 
class  dart::PcDescriptorsSerializationCluster
 
class  dart::PcDescriptorsDeserializationCluster
 
class  dart::CodeSourceMapSerializationCluster
 
class  dart::CodeSourceMapDeserializationCluster
 
class  dart::CompressedStackMapsSerializationCluster
 
class  dart::CompressedStackMapsDeserializationCluster
 
class  dart::RODataSerializationCluster
 
class  dart::RODataDeserializationCluster
 
class  dart::ExceptionHandlersSerializationCluster
 
class  dart::ExceptionHandlersDeserializationCluster
 
class  dart::ContextSerializationCluster
 
class  dart::ContextDeserializationCluster
 
class  dart::ContextScopeSerializationCluster
 
class  dart::ContextScopeDeserializationCluster
 
class  dart::UnlinkedCallSerializationCluster
 
class  dart::UnlinkedCallDeserializationCluster
 
class  dart::ICDataSerializationCluster
 
class  dart::ICDataDeserializationCluster
 
class  dart::MegamorphicCacheSerializationCluster
 
class  dart::MegamorphicCacheDeserializationCluster
 
class  dart::SubtypeTestCacheSerializationCluster
 
class  dart::SubtypeTestCacheDeserializationCluster
 
class  dart::LoadingUnitSerializationCluster
 
class  dart::LoadingUnitDeserializationCluster
 
class  dart::LanguageErrorSerializationCluster
 
class  dart::LanguageErrorDeserializationCluster
 
class  dart::UnhandledExceptionSerializationCluster
 
class  dart::UnhandledExceptionDeserializationCluster
 
class  dart::InstanceSerializationCluster
 
class  dart::AbstractInstanceDeserializationCluster
 
class  dart::InstanceDeserializationCluster
 
class  dart::LibraryPrefixSerializationCluster
 
class  dart::LibraryPrefixDeserializationCluster
 
class  dart::TypeSerializationCluster
 
class  dart::TypeDeserializationCluster
 
class  dart::FunctionTypeSerializationCluster
 
class  dart::FunctionTypeDeserializationCluster
 
class  dart::RecordTypeSerializationCluster
 
class  dart::RecordTypeDeserializationCluster
 
class  dart::TypeParameterSerializationCluster
 
class  dart::TypeParameterDeserializationCluster
 
class  dart::ClosureSerializationCluster
 
class  dart::ClosureDeserializationCluster
 
class  dart::MintSerializationCluster
 
class  dart::MintDeserializationCluster
 
class  dart::DoubleSerializationCluster
 
class  dart::DoubleDeserializationCluster
 
class  dart::Simd128SerializationCluster
 
class  dart::Simd128DeserializationCluster
 
class  dart::GrowableObjectArraySerializationCluster
 
class  dart::GrowableObjectArrayDeserializationCluster
 
class  dart::RecordSerializationCluster
 
class  dart::RecordDeserializationCluster
 
class  dart::TypedDataSerializationCluster
 
class  dart::TypedDataDeserializationCluster
 
class  dart::TypedDataViewSerializationCluster
 
class  dart::TypedDataViewDeserializationCluster
 
class  dart::ExternalTypedDataSerializationCluster
 
class  dart::ExternalTypedDataDeserializationCluster
 
class  dart::DeltaEncodedTypedDataSerializationCluster
 
class  dart::DeltaEncodedTypedDataDeserializationCluster
 
class  dart::StackTraceSerializationCluster
 
class  dart::StackTraceDeserializationCluster
 
class  dart::RegExpSerializationCluster
 
class  dart::RegExpDeserializationCluster
 
class  dart::WeakPropertySerializationCluster
 
class  dart::WeakPropertyDeserializationCluster
 
class  dart::MapSerializationCluster
 
class  dart::MapDeserializationCluster
 
class  dart::SetSerializationCluster
 
class  dart::SetDeserializationCluster
 
class  dart::ArraySerializationCluster
 
class  dart::ArrayDeserializationCluster
 
class  dart::WeakArraySerializationCluster
 
class  dart::WeakArrayDeserializationCluster
 
class  dart::StringSerializationCluster
 
class  dart::StringDeserializationCluster
 
class  dart::FakeSerializationCluster
 
class  dart::VMSerializationRoots
 
class  dart::VMDeserializationRoots
 
class  dart::ProgramSerializationRoots
 
class  dart::ProgramDeserializationRoots
 
class  dart::UnitSerializationRoots
 
class  dart::UnitDeserializationRoots
 
class  dart::HeapLocker
 

Namespaces

namespace  dart
 

Macros

#define AutoTraceObject(obj)    Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)
 
#define AutoTraceObjectName(obj, str)    Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, str)
 
#define WriteFieldValue(field, value)   s->WritePropertyRef(value, #field);
 
#define WriteFromTo(obj, ...)   s->WriteFromTo(obj, ##__VA_ARGS__);
 
#define PushFromTo(obj, ...)   s->PushFromTo(obj, ##__VA_ARGS__);
 
#define WriteField(obj, field)   s->WritePropertyRef(obj->untag()->field, #field)
 
#define WriteCompressedField(obj, name)    s->WritePropertyRef(obj->untag()->name(), #name "_")
 
#define DECLARE_OBJECT_STORE_FIELD(Type, Name)   #Name,
 
#define RESET_ROOT_LIST(V)
 
#define ONLY_IN_AOT(code)
 
#define SAVE_AND_RESET_ROOT(name, Type, init)
 
#define ONLY_IN_AOT(code)
 
#define RESTORE_ROOT(name, Type, init)    object_store_->set_##name(saved_##name##_);
 
#define ONLY_IN_AOT(code)   code
 
#define DECLARE_FIELD(name, Type, init)   Type& saved_##name##_ = Type::Handle();
 
#define CASE_FFI_CID(name)   case kFfi##name##Cid:
 
#define CID_CLUSTER(Type)    reinterpret_cast<Type##SerializationCluster*>(clusters_by_cid_[k##Type##Cid])
 
#define ADD_CANONICAL_NEXT(cid)
 
#define ADD_NON_CANONICAL_NEXT(cid)
 
#define CASE_FFI_CID(name)   case kFfi##name##Cid:
 
#define SET_FLAG(name)
 
#define CHECK_FLAG(name, mode)
 
#define SET_P(name, T, DV, C)   SET_FLAG(name)
 
#define SET_OR_CHECK_R(name, PV, T, DV, C)   SET_FLAG(name)
 
#define SET_OR_CHECK_C(name, PV, T, DV, C)   SET_FLAG(name)
 
#define SET_OR_CHECK_D(name, T, DV, C)   CHECK_FLAG(name, "non-debug mode")
 

Functions

 dart::DEFINE_FLAG (bool, print_cluster_information, false, "Print information about clusters written to snapshot")
 
 dart::COMPILE_ASSERT (kUnreachableReference==WeakTable::kNoValue)
 
static constexpr bool dart::IsAllocatedReference (intptr_t ref)
 
static constexpr bool dart::IsArtificialReference (intptr_t ref)
 
static constexpr bool dart::IsReachableReference (intptr_t ref)
 
static UnboxedFieldBitmap dart::CalculateTargetUnboxedFieldsBitmap (Serializer *s, intptr_t class_id)
 
template<bool need_entry_point_for_non_discarded>
static DART_FORCE_INLINE CodePtr dart::GetCodeAndEntryPointByIndex (const Deserializer *d, intptr_t code_index, uword *entry_point)
 
static int dart::CompareClusters (SerializationCluster *const *a, SerializationCluster *const *b)
 

Variables

static constexpr intptr_t dart::kUnreachableReference = 0
 
static constexpr intptr_t dart::kFirstReference = 1
 
static constexpr intptr_t dart::kUnallocatedReference = -1
 
static const char *const dart::kObjectStoreFieldNames []
 

Macro Definition Documentation

◆ ADD_CANONICAL_NEXT

#define ADD_CANONICAL_NEXT (   cid)
Value:
if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
clusters.Add(cluster); \
canonical_clusters_by_cid_[cid] = nullptr; \
}

◆ ADD_NON_CANONICAL_NEXT

#define ADD_NON_CANONICAL_NEXT (   cid)
Value:
if (auto const cluster = clusters_by_cid_[cid]) { \
clusters.Add(cluster); \
clusters_by_cid_[cid] = nullptr; \
}

◆ AutoTraceObject

#define AutoTraceObject (   obj)     Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)

Definition at line 649 of file app_snapshot.cc.

664 : public ThreadStackResource {
665 public:
666 Deserializer(Thread* thread,
667 Snapshot::Kind kind,
668 const uint8_t* buffer,
669 intptr_t size,
670 const uint8_t* data_buffer,
671 const uint8_t* instructions_buffer,
672 bool is_non_root_unit,
673 intptr_t offset = 0);
674 ~Deserializer();
675
676 // Verifies the image alignment.
677 //
678 // Returns ApiError::null() on success and an ApiError with an an appropriate
679 // message otherwise.
680 ApiErrorPtr VerifyImageAlignment();
681
682 ObjectPtr Allocate(intptr_t size);
683 static void InitializeHeader(ObjectPtr raw,
684 intptr_t cid,
685 intptr_t size,
686 bool is_canonical = false) {
687 InitializeHeader(raw, cid, size, is_canonical,
689 }
690 static void InitializeHeader(ObjectPtr raw,
691 intptr_t cid,
692 intptr_t size,
693 bool is_canonical,
694 bool is_immutable);
695
696 // Reads raw data (for basic types).
697 // sizeof(T) must be in {1,2,4,8}.
698 template <typename T>
699 T Read() {
700 return ReadStream::Raw<sizeof(T), T>::Read(&stream_);
701 }
702 intptr_t ReadRefId() { return stream_.ReadRefId(); }
703 intptr_t ReadUnsigned() { return stream_.ReadUnsigned(); }
704 uint64_t ReadUnsigned64() { return stream_.ReadUnsigned<uint64_t>(); }
705 void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); }
706
707 uword ReadWordWith32BitReads() { return stream_.ReadWordWith32BitReads(); }
708
709 intptr_t position() const { return stream_.Position(); }
710 void set_position(intptr_t p) { stream_.SetPosition(p); }
711 const uint8_t* AddressOfCurrentPosition() const {
712 return stream_.AddressOfCurrentPosition();
713 }
714
715 void Advance(intptr_t value) { stream_.Advance(value); }
716 void Align(intptr_t alignment, intptr_t offset = 0) {
717 stream_.Align(alignment, offset);
718 }
719
720 void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
721
722 void AssignRef(ObjectPtr object) {
723 ASSERT(next_ref_index_ <= num_objects_);
724 refs_->untag()->data()[next_ref_index_] = object;
725 next_ref_index_++;
726 }
727
728 ObjectPtr Ref(intptr_t index) const {
729 ASSERT(index > 0);
730 ASSERT(index <= num_objects_);
731 return refs_->untag()->element(index);
732 }
733
734 CodePtr GetCodeByIndex(intptr_t code_index, uword* entry_point) const;
735 uword GetEntryPointByCodeIndex(intptr_t code_index) const;
736
737 // If |code_index| corresponds to a non-discarded Code object returns
738 // index within the code cluster that corresponds to this Code object.
739 // Otherwise, if |code_index| corresponds to the discarded Code then
740 // returns -1.
741 static intptr_t CodeIndexToClusterIndex(const InstructionsTable& table,
742 intptr_t code_index);
743
744 ObjectPtr ReadRef() { return Ref(ReadRefId()); }
745
746 TokenPosition ReadTokenPosition() {
747 return TokenPosition::Deserialize(Read<int32_t>());
748 }
749
750 intptr_t ReadCid() {
751 COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
752 return Read<int32_t>();
753 }
754
755 void ReadInstructions(CodePtr code, bool deferred);
756 void EndInstructions();
757 ObjectPtr GetObjectAt(uint32_t offset) const;
758
759 void Deserialize(DeserializationRoots* roots);
760
761 DeserializationCluster* ReadCluster();
762
763 void ReadDispatchTable() {
764 ReadDispatchTable(&stream_, /*deferred=*/false, InstructionsTable::Handle(),
765 -1, -1);
766 }
767 void ReadDispatchTable(ReadStream* stream,
768 bool deferred,
769 const InstructionsTable& root_instruction_table,
770 intptr_t deferred_code_start_index,
771 intptr_t deferred_code_end_index);
772
773 intptr_t next_index() const { return next_ref_index_; }
774 Heap* heap() const { return heap_; }
775 Zone* zone() const { return zone_; }
776 Snapshot::Kind kind() const {
777#if defined(DART_PRECOMPILED_RUNTIME)
778 return Snapshot::kFullAOT;
779#else
780 return kind_;
781#endif
782 }
783 bool is_non_root_unit() const { return is_non_root_unit_; }
784 void set_code_start_index(intptr_t value) { code_start_index_ = value; }
785 intptr_t code_start_index() const { return code_start_index_; }
786 void set_code_stop_index(intptr_t value) { code_stop_index_ = value; }
787 intptr_t code_stop_index() const { return code_stop_index_; }
788 const InstructionsTable& instructions_table() const {
789 return instructions_table_;
790 }
791 intptr_t num_base_objects() const { return num_base_objects_; }
792
793 // This serves to make the snapshot cursor, ref table and null be locals
794 // during ReadFill, which allows the C compiler to see they are not aliased
795 // and can be kept in registers.
796 class Local : public ReadStream {
797 public:
798 explicit Local(Deserializer* d)
799 : ReadStream(d->stream_.buffer_, d->stream_.current_, d->stream_.end_),
800 d_(d),
801 refs_(d->refs_),
802 null_(Object::null()) {
803#if defined(DEBUG)
804 // Can't mix use of Deserializer::Read*.
805 d->stream_.current_ = nullptr;
806#endif
807 }
808 ~Local() { d_->stream_.current_ = current_; }
809
810 ObjectPtr Ref(intptr_t index) const {
811 ASSERT(index > 0);
812 ASSERT(index <= d_->num_objects_);
813 return refs_->untag()->element(index);
814 }
815
816 template <typename T>
817 T Read() {
818 return ReadStream::Raw<sizeof(T), T>::Read(this);
819 }
820 uint64_t ReadUnsigned64() { return ReadUnsigned<uint64_t>(); }
821
822 ObjectPtr ReadRef() { return Ref(ReadRefId()); }
823 TokenPosition ReadTokenPosition() {
824 return TokenPosition::Deserialize(Read<int32_t>());
825 }
826
827 intptr_t ReadCid() {
828 COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
829 return Read<int32_t>();
830 }
831
832 template <typename T, typename... P>
833 void ReadFromTo(T obj, P&&... params) {
834 auto* from = obj->untag()->from();
835 auto* to_snapshot = obj->untag()->to_snapshot(d_->kind(), params...);
836 auto* to = obj->untag()->to(params...);
837 for (auto* p = from; p <= to_snapshot; p++) {
838 *p = ReadRef();
839 }
840 // This is necessary because, unlike Object::Allocate, the clustered
841 // deserializer allocates object without null-initializing them. Instead,
842 // each deserialization cluster is responsible for initializing every
843 // field, ensuring that every field is written to exactly once.
844 for (auto* p = to_snapshot + 1; p <= to; p++) {
845 *p = null_;
846 }
847 }
848
849 private:
850 Deserializer* const d_;
851 const ArrayPtr refs_;
852 const ObjectPtr null_;
853 };
854
855 private:
856 Heap* heap_;
857 PageSpace* old_space_;
858 FreeList* freelist_;
859 Zone* zone_;
860 Snapshot::Kind kind_;
861 ReadStream stream_;
862 ImageReader* image_reader_;
863 intptr_t num_base_objects_;
864 intptr_t num_objects_;
865 intptr_t num_clusters_;
866 ArrayPtr refs_;
867 intptr_t next_ref_index_;
868 intptr_t code_start_index_ = 0;
869 intptr_t code_stop_index_ = 0;
870 intptr_t instructions_index_ = 0;
871 DeserializationCluster** clusters_;
872 const bool is_non_root_unit_;
873 InstructionsTable& instructions_table_;
874};
875
876DART_FORCE_INLINE
877ObjectPtr Deserializer::Allocate(intptr_t size) {
878 return UntaggedObject::FromAddr(
879 old_space_->AllocateSnapshotLocked(freelist_, size));
880}
881
882void Deserializer::InitializeHeader(ObjectPtr raw,
883 intptr_t class_id,
884 intptr_t size,
885 bool is_canonical,
886 bool is_immutable) {
887 ASSERT(Utils::IsAligned(size, kObjectAlignment));
888 uword tags = 0;
889 tags = UntaggedObject::ClassIdTag::update(class_id, tags);
890 tags = UntaggedObject::SizeTag::update(size, tags);
891 tags = UntaggedObject::CanonicalBit::update(is_canonical, tags);
892 tags = UntaggedObject::AlwaysSetBit::update(true, tags);
893 tags = UntaggedObject::NotMarkedBit::update(true, tags);
894 tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
895 tags = UntaggedObject::NewBit::update(false, tags);
896 tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
897 raw->untag()->tags_ = tags;
898}
899
900#if !defined(DART_PRECOMPILED_RUNTIME)
901void SerializationCluster::WriteAndMeasureAlloc(Serializer* serializer) {
902 intptr_t start_size = serializer->bytes_written();
903 intptr_t start_data = serializer->GetDataSize();
904 intptr_t start_objects = serializer->next_ref_index();
905 uint32_t tags = UntaggedObject::ClassIdTag::encode(cid_) |
906 UntaggedObject::CanonicalBit::encode(is_canonical()) |
907 UntaggedObject::ImmutableBit::encode(is_immutable());
908 serializer->Write<uint32_t>(tags);
909 WriteAlloc(serializer);
910 intptr_t stop_size = serializer->bytes_written();
911 intptr_t stop_data = serializer->GetDataSize();
912 intptr_t stop_objects = serializer->next_ref_index();
913 if (FLAG_print_cluster_information) {
914 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), ", start_size,
915 stop_size - start_size);
916 OS::PrintErr("Data 0x%" Pp " (%" Pd "): ", start_data,
917 stop_data - start_data);
918 OS::PrintErr("Alloc %s (%" Pd ")\n", name(), stop_objects - start_objects);
919 }
920 size_ += (stop_size - start_size) + (stop_data - start_data);
921 num_objects_ += (stop_objects - start_objects);
922 if (target_instance_size_ != kSizeVaries) {
923 target_memory_size_ += num_objects_ * target_instance_size_;
924 }
925}
926
927void SerializationCluster::WriteAndMeasureFill(Serializer* serializer) {
928 intptr_t start = serializer->bytes_written();
929 WriteFill(serializer);
930 intptr_t stop = serializer->bytes_written();
931 if (FLAG_print_cluster_information) {
932 OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n", start, stop - start,
933 name());
934 }
935 size_ += (stop - start);
936}
937#endif // !DART_PRECOMPILED_RUNTIME
938
939DART_NOINLINE
940void DeserializationCluster::ReadAllocFixedSize(Deserializer* d,
941 intptr_t instance_size) {
942 start_index_ = d->next_index();
943 intptr_t count = d->ReadUnsigned();
944 for (intptr_t i = 0; i < count; i++) {
945 d->AssignRef(d->Allocate(instance_size));
946 }
947 stop_index_ = d->next_index();
948}
949
950#if !defined(DART_PRECOMPILED_RUNTIME)
951static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(
952 Serializer* s,
953 intptr_t class_id) {
954 const auto unboxed_fields_bitmap_host =
955 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(class_id);
956
957 UnboxedFieldBitmap unboxed_fields_bitmap;
958 if (unboxed_fields_bitmap_host.IsEmpty() ||
959 kWordSize == compiler::target::kWordSize) {
960 unboxed_fields_bitmap = unboxed_fields_bitmap_host;
961 } else {
962 ASSERT(kWordSize == 8 && compiler::target::kWordSize == 4);
963 // A new bitmap is built if the word sizes in the target and
964 // host are different
965 unboxed_fields_bitmap.Reset();
966 intptr_t target_i = 0, host_i = 0;
967
968 while (host_i < UnboxedFieldBitmap::Length()) {
969 // Each unboxed field has constant length, therefore the number of
970 // words used by it should double when compiling from 64-bit to 32-bit.
971 if (unboxed_fields_bitmap_host.Get(host_i++)) {
972 unboxed_fields_bitmap.Set(target_i++);
973 unboxed_fields_bitmap.Set(target_i++);
974 } else {
975 // For object pointers, the field is always one word length
976 target_i++;
977 }
978 }
979 }
980
981 return unboxed_fields_bitmap;
982}
983
984class ClassSerializationCluster : public SerializationCluster {
985 public:
986 explicit ClassSerializationCluster(intptr_t num_cids)
987 : SerializationCluster("Class",
988 kClassCid,
989 compiler::target::Class::InstanceSize()),
990 predefined_(kNumPredefinedCids),
991 objects_(num_cids) {}
992 ~ClassSerializationCluster() {}
993
994 void Trace(Serializer* s, ObjectPtr object) {
995 ClassPtr cls = Class::RawCast(object);
996 intptr_t class_id = cls->untag()->id_;
997
998 if (class_id == kIllegalCid) {
999 // Classes expected to be dropped by the precompiler should not be traced.
1000 s->UnexpectedObject(cls, "Class with illegal cid");
1001 }
1002 if (class_id < kNumPredefinedCids) {
1003 // These classes are allocated by Object::Init or Object::InitOnce, so the
1004 // deserializer must find them in the class table instead of allocating
1005 // them.
1006 predefined_.Add(cls);
1007 } else {
1008 objects_.Add(cls);
1009 }
1010
1011 PushFromTo(cls);
1012 }
1013
1014 void WriteAlloc(Serializer* s) {
1015 intptr_t count = predefined_.length();
1016 s->WriteUnsigned(count);
1017 for (intptr_t i = 0; i < count; i++) {
1018 ClassPtr cls = predefined_[i];
1019 s->AssignRef(cls);
1020 AutoTraceObject(cls);
1021 intptr_t class_id = cls->untag()->id_;
1022 s->WriteCid(class_id);
1023 }
1024 count = objects_.length();
1025 s->WriteUnsigned(count);
1026 for (intptr_t i = 0; i < count; i++) {
1027 ClassPtr cls = objects_[i];
1028 s->AssignRef(cls);
1029 }
1030 }
1031
1032 void WriteFill(Serializer* s) {
1033 intptr_t count = predefined_.length();
1034 for (intptr_t i = 0; i < count; i++) {
1035 WriteClass(s, predefined_[i]);
1036 }
1037 count = objects_.length();
1038 for (intptr_t i = 0; i < count; i++) {
1039 WriteClass(s, objects_[i]);
1040 }
1041 }
1042
1043 private:
1044 void WriteClass(Serializer* s, ClassPtr cls) {
1045 AutoTraceObjectName(cls, cls->untag()->name());
1046 WriteFromTo(cls);
1047 intptr_t class_id = cls->untag()->id_;
1048 if (class_id == kIllegalCid) {
1049 s->UnexpectedObject(cls, "Class with illegal cid");
1050 }
1051 s->WriteCid(class_id);
1052 if (s->kind() != Snapshot::kFullAOT) {
1053 s->Write<uint32_t>(cls->untag()->kernel_offset_);
1054 }
1055 s->Write<int32_t>(Class::target_instance_size_in_words(cls));
1056 s->Write<int32_t>(Class::target_next_field_offset_in_words(cls));
1057 s->Write<int32_t>(Class::target_type_arguments_field_offset_in_words(cls));
1058 s->Write<int16_t>(cls->untag()->num_type_arguments_);
1059 s->Write<uint16_t>(cls->untag()->num_native_fields_);
1060 if (s->kind() != Snapshot::kFullAOT) {
1061 s->WriteTokenPosition(cls->untag()->token_pos_);
1062 s->WriteTokenPosition(cls->untag()->end_token_pos_);
1063 s->WriteCid(cls->untag()->implementor_cid_);
1064 }
1065 s->Write<uint32_t>(cls->untag()->state_bits_);
1066
1067 if (!ClassTable::IsTopLevelCid(class_id)) {
1068 const auto unboxed_fields_map =
1070 s->WriteUnsigned64(unboxed_fields_map.Value());
1071 }
1072 }
1073
1074 GrowableArray<ClassPtr> predefined_;
1075 GrowableArray<ClassPtr> objects_;
1076};
1077#endif // !DART_PRECOMPILED_RUNTIME
1078
1079class ClassDeserializationCluster : public DeserializationCluster {
1080 public:
1081 ClassDeserializationCluster() : DeserializationCluster("Class") {}
1082 ~ClassDeserializationCluster() {}
1083
1084 void ReadAlloc(Deserializer* d) override {
1085 predefined_start_index_ = d->next_index();
1086 intptr_t count = d->ReadUnsigned();
1087 ClassTable* table = d->isolate_group()->class_table();
1088 for (intptr_t i = 0; i < count; i++) {
1089 intptr_t class_id = d->ReadCid();
1090 ASSERT(table->HasValidClassAt(class_id));
1091 ClassPtr cls = table->At(class_id);
1092 ASSERT(cls != nullptr);
1093 d->AssignRef(cls);
1094 }
1095 predefined_stop_index_ = d->next_index();
1096
1097 start_index_ = d->next_index();
1098 count = d->ReadUnsigned();
1099 for (intptr_t i = 0; i < count; i++) {
1100 d->AssignRef(d->Allocate(Class::InstanceSize()));
1101 }
1102 stop_index_ = d->next_index();
1103 }
1104
1105 void ReadFill(Deserializer* d_) override {
1106 Deserializer::Local d(d_);
1107
1108 for (intptr_t id = predefined_start_index_; id < predefined_stop_index_;
1109 id++) {
1110 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1111 d.ReadFromTo(cls);
1112 intptr_t class_id = d.ReadCid();
1113 cls->untag()->id_ = class_id;
1114#if !defined(DART_PRECOMPILED_RUNTIME)
1115 ASSERT(d_->kind() != Snapshot::kFullAOT);
1116 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1117#endif
1118 if (!IsInternalVMdefinedClassId(class_id)) {
1119 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1120 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1121#if defined(DART_PRECOMPILER)
1122 // Only one pair is serialized. The target field only exists when
1123 // DART_PRECOMPILER is defined
1124 cls->untag()->target_instance_size_in_words_ =
1125 cls->untag()->host_instance_size_in_words_;
1126 cls->untag()->target_next_field_offset_in_words_ =
1127 cls->untag()->host_next_field_offset_in_words_;
1128#endif // defined(DART_PRECOMPILER)
1129 } else {
1130 d.Read<int32_t>(); // Skip.
1131 d.Read<int32_t>(); // Skip.
1132 }
1133 cls->untag()->host_type_arguments_field_offset_in_words_ =
1134 d.Read<int32_t>();
1135#if defined(DART_PRECOMPILER)
1136 cls->untag()->target_type_arguments_field_offset_in_words_ =
1137 cls->untag()->host_type_arguments_field_offset_in_words_;
1138#endif // defined(DART_PRECOMPILER)
1139 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1140 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1141#if !defined(DART_PRECOMPILED_RUNTIME)
1142 ASSERT(d_->kind() != Snapshot::kFullAOT);
1143 cls->untag()->token_pos_ = d.ReadTokenPosition();
1144 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1145 cls->untag()->implementor_cid_ = d.ReadCid();
1146#endif // !defined(DART_PRECOMPILED_RUNTIME)
1147 cls->untag()->state_bits_ = d.Read<uint32_t>();
1148 d.ReadUnsigned64(); // Skip unboxed fields bitmap.
1149 }
1150
1151 ClassTable* table = d_->isolate_group()->class_table();
1152 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1153 ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
1154 Deserializer::InitializeHeader(cls, kClassCid, Class::InstanceSize());
1155 d.ReadFromTo(cls);
1156
1157 intptr_t class_id = d.ReadCid();
1158 ASSERT(class_id >= kNumPredefinedCids);
1159 cls->untag()->id_ = class_id;
1160
1161#if !defined(DART_PRECOMPILED_RUNTIME)
1162 ASSERT(d_->kind() != Snapshot::kFullAOT);
1163 cls->untag()->kernel_offset_ = d.Read<uint32_t>();
1164#endif
1165 cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
1166 cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
1167 cls->untag()->host_type_arguments_field_offset_in_words_ =
1168 d.Read<int32_t>();
1169#if defined(DART_PRECOMPILER)
1170 cls->untag()->target_instance_size_in_words_ =
1171 cls->untag()->host_instance_size_in_words_;
1172 cls->untag()->target_next_field_offset_in_words_ =
1173 cls->untag()->host_next_field_offset_in_words_;
1174 cls->untag()->target_type_arguments_field_offset_in_words_ =
1175 cls->untag()->host_type_arguments_field_offset_in_words_;
1176#endif // defined(DART_PRECOMPILER)
1177 cls->untag()->num_type_arguments_ = d.Read<int16_t>();
1178 cls->untag()->num_native_fields_ = d.Read<uint16_t>();
1179#if !defined(DART_PRECOMPILED_RUNTIME)
1180 ASSERT(d_->kind() != Snapshot::kFullAOT);
1181 cls->untag()->token_pos_ = d.ReadTokenPosition();
1182 cls->untag()->end_token_pos_ = d.ReadTokenPosition();
1183 cls->untag()->implementor_cid_ = d.ReadCid();
1184#endif // !defined(DART_PRECOMPILED_RUNTIME)
1185 cls->untag()->state_bits_ = d.Read<uint32_t>();
1186
1187 table->AllocateIndex(class_id);
1188 table->SetAt(class_id, cls);
1189
1190 if (!ClassTable::IsTopLevelCid(class_id)) {
1191 const UnboxedFieldBitmap unboxed_fields_map(d.ReadUnsigned64());
1192 table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
1193 }
1194 }
1195 }
1196
1197 private:
1198 intptr_t predefined_start_index_;
1199 intptr_t predefined_stop_index_;
1200};
1201
1202// Super classes for writing out clusters which contain objects grouped into
1203// a canonical set (e.g. String, Type, TypeArguments, etc).
1204// To save space in the snapshot we avoid writing such canonical sets
1205// explicitly as Array objects into the snapshot and instead utilize a different
1206// encoding: objects in a cluster representing a canonical set are sorted
1207// to appear in the same order they appear in the Array representing the set,
1208// and we additionally write out array of values describing gaps between
1209// objects.
1210//
1211// In some situations not all canonical objects of the some type need to
1212// be added to the resulting canonical set because they are cached in some
1213// special way (see Type::Canonicalize as an example, which caches declaration
1214// types in a special way). In this case subclass can set
1215// kAllCanonicalObjectsAreIncludedIntoSet to |false| and override
1216// IsInCanonicalSet filter.
1217#if !defined(DART_PRECOMPILED_RUNTIME)
1218template <typename SetType,
1219 typename HandleType,
1220 typename PointerType,
1221 bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1222class CanonicalSetSerializationCluster : public SerializationCluster {
1223 protected:
1224 CanonicalSetSerializationCluster(intptr_t cid,
1225 bool is_canonical,
1226 bool represents_canonical_set,
1227 const char* name,
1228 intptr_t target_instance_size = 0)
1229 : SerializationCluster(name, cid, target_instance_size, is_canonical),
1230 represents_canonical_set_(represents_canonical_set) {}
1231
1232 virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) {
1233 // Must override this function if kAllCanonicalObjectsAreIncludedIntoSet
1234 // is set to |false|.
1235 ASSERT(kAllCanonicalObjectsAreIncludedIntoSet);
1236 return true;
1237 }
1238
1239 void ReorderObjects(Serializer* s) {
1240 if (!represents_canonical_set_) {
1241 return;
1242 }
1243
1244 // Sort objects before writing them out so that they appear in the same
1245 // order as they would appear in a CanonicalStringSet.
1246 using ZoneCanonicalSet =
1247 HashTable<typename SetType::Traits, 0, 0, GrowableArrayStorageTraits>;
1248
1249 // Compute required capacity for the hashtable (to avoid overallocating).
1250 intptr_t required_capacity = 0;
1251 for (auto ptr : objects_) {
1252 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1253 required_capacity++;
1254 }
1255 }
1256 // Over-allocate capacity so a few inserts can happen at startup without
1257 // causing a rehash.
1258 const intptr_t kSpareCapacity = 32;
1259 required_capacity = static_cast<intptr_t>(
1260 static_cast<double>(required_capacity + kSpareCapacity) /
1261 HashTables::kMaxLoadFactor);
1262
1263 intptr_t num_occupied = 0;
1264
1265 // Build canonical set out of objects that should belong to it.
1266 // Objects that don't belong to it are copied to the prefix of objects_.
1267 ZoneCanonicalSet table(
1268 s->zone(), HashTables::New<ZoneCanonicalSet>(required_capacity));
1269 HandleType& element = HandleType::Handle(s->zone());
1270 for (auto ptr : objects_) {
1271 if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
1272 element ^= ptr;
1273 intptr_t entry = -1;
1274 const bool present = table.FindKeyOrDeletedOrUnused(element, &entry);
1275 ASSERT(!present);
1276 table.InsertKey(entry, element);
1277 } else {
1278 objects_[num_occupied++] = ptr;
1279 }
1280 }
1281
1282 const auto prefix_length = num_occupied;
1283
1284 // Compute objects_ order and gaps based on canonical set layout.
1285 auto& arr = table.Release();
1286 intptr_t last_occupied = ZoneCanonicalSet::kFirstKeyIndex - 1;
1287 for (intptr_t i = ZoneCanonicalSet::kFirstKeyIndex, length = arr.Length();
1288 i < length; i++) {
1289 ObjectPtr v = arr.At(i);
1290 ASSERT(v != ZoneCanonicalSet::DeletedMarker().ptr());
1291 if (v != ZoneCanonicalSet::UnusedMarker().ptr()) {
1292 const intptr_t unused_run_length = (i - 1) - last_occupied;
1293 gaps_.Add(unused_run_length);
1294 objects_[num_occupied++] = static_cast<PointerType>(v);
1295 last_occupied = i;
1296 }
1297 }
1298 ASSERT(num_occupied == objects_.length());
1299 ASSERT(prefix_length == (objects_.length() - gaps_.length()));
1300 table_length_ = arr.Length();
1301 }
1302
1303 void WriteCanonicalSetLayout(Serializer* s) {
1304 if (represents_canonical_set_) {
1305 s->WriteUnsigned(table_length_);
1306 s->WriteUnsigned(objects_.length() - gaps_.length());
1307 for (auto gap : gaps_) {
1308 s->WriteUnsigned(gap);
1309 }
1310 target_memory_size_ +=
1311 compiler::target::Array::InstanceSize(table_length_);
1312 }
1313 }
1314
1315 GrowableArray<PointerType> objects_;
1316
1317 private:
1318 const bool represents_canonical_set_;
1319 GrowableArray<intptr_t> gaps_;
1320 intptr_t table_length_ = 0;
1321};
1322#endif
1323
1324template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true>
1325class CanonicalSetDeserializationCluster : public DeserializationCluster {
1326 public:
1327 CanonicalSetDeserializationCluster(bool is_canonical,
1328 bool is_root_unit,
1329 const char* name)
1330 : DeserializationCluster(name, is_canonical),
1331 is_root_unit_(is_root_unit),
1332 table_(SetType::ArrayHandle::Handle()) {}
1333
1334 void BuildCanonicalSetFromLayout(Deserializer* d) {
1335 if (!is_root_unit_ || !is_canonical()) {
1336 return;
1337 }
1338
1339 const auto table_length = d->ReadUnsigned();
1340 first_element_ = d->ReadUnsigned();
1341 const intptr_t count = stop_index_ - (start_index_ + first_element_);
1342 auto table = StartDeserialization(d, table_length, count);
1343 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1344 table.FillGap(d->ReadUnsigned());
1345 table.WriteElement(d, d->Ref(i));
1346 }
1347 table_ = table.Finish();
1348 }
1349
1350 protected:
1351 const bool is_root_unit_;
1352 intptr_t first_element_;
1353 typename SetType::ArrayHandle& table_;
1354
1355 void VerifyCanonicalSet(Deserializer* d,
1356 const Array& refs,
1357 const typename SetType::ArrayHandle& current_table) {
1358#if defined(DEBUG)
1359 // First check that we are not overwriting a table and loosing information.
1360 if (!current_table.IsNull()) {
1361 SetType current_set(d->zone(), current_table.ptr());
1362 ASSERT(current_set.NumOccupied() == 0);
1363 current_set.Release();
1364 }
1365
1366 // Now check that manually created table behaves correctly as a canonical
1367 // set.
1368 SetType canonical_set(d->zone(), table_.ptr());
1369 Object& key = Object::Handle();
1370 for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
1371 key = refs.At(i);
1372 ASSERT(canonical_set.GetOrNull(key) != Object::null());
1373 }
1374 canonical_set.Release();
1375#endif // defined(DEBUG)
1376 }
1377
1378 private:
1379 struct DeserializationFinger {
1380 typename SetType::ArrayPtr table;
1381 intptr_t current_index;
1382 ObjectPtr gap_element;
1383
1384 void FillGap(int length) {
1385 for (intptr_t j = 0; j < length; j++) {
1386 table->untag()->data()[current_index + j] = gap_element;
1387 }
1388 current_index += length;
1389 }
1390
1391 void WriteElement(Deserializer* d, ObjectPtr object) {
1392 table->untag()->data()[current_index++] = object;
1393 }
1394
1395 typename SetType::ArrayPtr Finish() {
1396 if (table != SetType::ArrayHandle::null()) {
1397 FillGap(Smi::Value(table->untag()->length()) - current_index);
1398 }
1399 auto result = table;
1400 table = SetType::ArrayHandle::null();
1401 return result;
1402 }
1403 };
1404
1405 static DeserializationFinger StartDeserialization(Deserializer* d,
1406 intptr_t length,
1407 intptr_t count) {
1408 const intptr_t instance_size = SetType::ArrayHandle::InstanceSize(length);
1409 typename SetType::ArrayPtr table =
1410 static_cast<typename SetType::ArrayPtr>(d->Allocate(instance_size));
1411 Deserializer::InitializeHeader(table, SetType::Storage::ArrayCid,
1412 instance_size);
1413 if ((SetType::Storage::ArrayCid == kArrayCid) &&
1414 Array::UseCardMarkingForAllocation(length)) {
1415 table->untag()->SetCardRememberedBitUnsynchronized();
1416 }
1417 InitTypeArgsOrNext(table);
1418 table->untag()->length_ = Smi::New(length);
1419 for (intptr_t i = 0; i < SetType::kFirstKeyIndex; i++) {
1420 table->untag()->data()[i] = Smi::New(0);
1421 }
1422 table->untag()->data()[SetType::kOccupiedEntriesIndex] = Smi::New(count);
1423 return {table, SetType::kFirstKeyIndex, SetType::UnusedMarker().ptr()};
1424 }
1425
1426 static void InitTypeArgsOrNext(ArrayPtr table) {
1427 table->untag()->type_arguments_ = TypeArguments::null();
1428 }
1429 static void InitTypeArgsOrNext(WeakArrayPtr table) {
1430 table->untag()->next_seen_by_gc_ = WeakArray::null();
1431 }
1432};
1433
1434#if !defined(DART_PRECOMPILED_RUNTIME)
1435class TypeParametersSerializationCluster : public SerializationCluster {
1436 public:
1437 TypeParametersSerializationCluster()
1438 : SerializationCluster("TypeParameters",
1439 kTypeParametersCid,
1440 compiler::target::TypeParameters::InstanceSize()) {
1441 }
1442 ~TypeParametersSerializationCluster() {}
1443
1444 void Trace(Serializer* s, ObjectPtr object) {
1445 TypeParametersPtr type_params = TypeParameters::RawCast(object);
1446 objects_.Add(type_params);
1447 PushFromTo(type_params);
1448 }
1449
1450 void WriteAlloc(Serializer* s) {
1451 const intptr_t count = objects_.length();
1452 s->WriteUnsigned(count);
1453 for (intptr_t i = 0; i < count; i++) {
1454 TypeParametersPtr type_params = objects_[i];
1455 s->AssignRef(type_params);
1456 }
1457 }
1458
1459 void WriteFill(Serializer* s) {
1460 const intptr_t count = objects_.length();
1461 for (intptr_t i = 0; i < count; i++) {
1462 TypeParametersPtr type_params = objects_[i];
1463 AutoTraceObject(type_params);
1464 WriteFromTo(type_params);
1465 }
1466 }
1467
1468 private:
1469 GrowableArray<TypeParametersPtr> objects_;
1470};
1471#endif // !DART_PRECOMPILED_RUNTIME
1472
1473class TypeParametersDeserializationCluster : public DeserializationCluster {
1474 public:
1475 TypeParametersDeserializationCluster()
1476 : DeserializationCluster("TypeParameters") {}
1477 ~TypeParametersDeserializationCluster() {}
1478
1479 void ReadAlloc(Deserializer* d) override {
1480 ReadAllocFixedSize(d, TypeParameters::InstanceSize());
1481 }
1482
1483 void ReadFill(Deserializer* d_) override {
1484 Deserializer::Local d(d_);
1485
1486 ASSERT(!is_canonical()); // Never canonical.
1487 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1488 TypeParametersPtr type_params = static_cast<TypeParametersPtr>(d.Ref(id));
1489 Deserializer::InitializeHeader(type_params, kTypeParametersCid,
1490 TypeParameters::InstanceSize());
1491 d.ReadFromTo(type_params);
1492 }
1493 }
1494};
1495
1496#if !defined(DART_PRECOMPILED_RUNTIME)
1497class TypeArgumentsSerializationCluster
1498 : public CanonicalSetSerializationCluster<CanonicalTypeArgumentsSet,
1499 TypeArguments,
1500 TypeArgumentsPtr> {
1501 public:
1502 TypeArgumentsSerializationCluster(bool is_canonical,
1503 bool represents_canonical_set)
1504 : CanonicalSetSerializationCluster(kTypeArgumentsCid,
1505 is_canonical,
1506 represents_canonical_set,
1507 "TypeArguments") {}
1508 ~TypeArgumentsSerializationCluster() {}
1509
1510 void Trace(Serializer* s, ObjectPtr object) {
1511 TypeArgumentsPtr type_args = TypeArguments::RawCast(object);
1512 objects_.Add(type_args);
1513
1514 s->Push(type_args->untag()->instantiations());
1515 const intptr_t length = Smi::Value(type_args->untag()->length());
1516 for (intptr_t i = 0; i < length; i++) {
1517 s->Push(type_args->untag()->element(i));
1518 }
1519 }
1520
1521 void WriteAlloc(Serializer* s) {
1522 const intptr_t count = objects_.length();
1523 s->WriteUnsigned(count);
1524 ReorderObjects(s);
1525 for (intptr_t i = 0; i < count; i++) {
1526 TypeArgumentsPtr type_args = objects_[i];
1527 s->AssignRef(type_args);
1528 AutoTraceObject(type_args);
1529 const intptr_t length = Smi::Value(type_args->untag()->length());
1530 s->WriteUnsigned(length);
1531 target_memory_size_ +=
1532 compiler::target::TypeArguments::InstanceSize(length);
1533 }
1534 WriteCanonicalSetLayout(s);
1535 }
1536
1537 void WriteFill(Serializer* s) {
1538 const intptr_t count = objects_.length();
1539 for (intptr_t i = 0; i < count; i++) {
1540 TypeArgumentsPtr type_args = objects_[i];
1541 AutoTraceObject(type_args);
1542 const intptr_t length = Smi::Value(type_args->untag()->length());
1543 s->WriteUnsigned(length);
1544 intptr_t hash = Smi::Value(type_args->untag()->hash());
1545 s->Write<int32_t>(hash);
1546 const intptr_t nullability =
1547 Smi::Value(type_args->untag()->nullability());
1548 s->WriteUnsigned(nullability);
1549 WriteField(type_args, instantiations());
1550 for (intptr_t j = 0; j < length; j++) {
1551 s->WriteElementRef(type_args->untag()->element(j), j);
1552 }
1553 }
1554 }
1555};
1556#endif // !DART_PRECOMPILED_RUNTIME
1557
1558class TypeArgumentsDeserializationCluster
1559 : public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> {
1560 public:
1561 explicit TypeArgumentsDeserializationCluster(bool is_canonical,
1562 bool is_root_unit)
1563 : CanonicalSetDeserializationCluster(is_canonical,
1564 is_root_unit,
1565 "TypeArguments") {}
1566 ~TypeArgumentsDeserializationCluster() {}
1567
1568 void ReadAlloc(Deserializer* d) override {
1569 start_index_ = d->next_index();
1570 const intptr_t count = d->ReadUnsigned();
1571 for (intptr_t i = 0; i < count; i++) {
1572 const intptr_t length = d->ReadUnsigned();
1573 d->AssignRef(d->Allocate(TypeArguments::InstanceSize(length)));
1574 }
1575 stop_index_ = d->next_index();
1576 BuildCanonicalSetFromLayout(d);
1577 }
1578
1579 void ReadFill(Deserializer* d_) override {
1580 Deserializer::Local d(d_);
1581
1582 const bool mark_canonical = is_root_unit_ && is_canonical();
1583 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1584 TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d.Ref(id));
1585 const intptr_t length = d.ReadUnsigned();
1586 Deserializer::InitializeHeader(type_args, kTypeArgumentsCid,
1587 TypeArguments::InstanceSize(length),
1588 mark_canonical);
1589 type_args->untag()->length_ = Smi::New(length);
1590 type_args->untag()->hash_ = Smi::New(d.Read<int32_t>());
1591 type_args->untag()->nullability_ = Smi::New(d.ReadUnsigned());
1592 type_args->untag()->instantiations_ = static_cast<ArrayPtr>(d.ReadRef());
1593 for (intptr_t j = 0; j < length; j++) {
1594 type_args->untag()->types()[j] =
1595 static_cast<AbstractTypePtr>(d.ReadRef());
1596 }
1597 }
1598 }
1599
1600 void PostLoad(Deserializer* d, const Array& refs) override {
1601 if (!table_.IsNull()) {
1602 auto object_store = d->isolate_group()->object_store();
1603 VerifyCanonicalSet(
1604 d, refs, Array::Handle(object_store->canonical_type_arguments()));
1605 object_store->set_canonical_type_arguments(table_);
1606 } else if (!is_root_unit_ && is_canonical()) {
1607 TypeArguments& type_arg = TypeArguments::Handle(d->zone());
1608 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1609 type_arg ^= refs.At(i);
1610 type_arg = type_arg.Canonicalize(d->thread());
1611 refs.SetAt(i, type_arg);
1612 }
1613 }
1614 }
1615};
1616
1617#if !defined(DART_PRECOMPILED_RUNTIME)
1618class PatchClassSerializationCluster : public SerializationCluster {
1619 public:
1620 PatchClassSerializationCluster()
1621 : SerializationCluster("PatchClass",
1622 kPatchClassCid,
1623 compiler::target::PatchClass::InstanceSize()) {}
1624 ~PatchClassSerializationCluster() {}
1625
1626 void Trace(Serializer* s, ObjectPtr object) {
1627 PatchClassPtr cls = PatchClass::RawCast(object);
1628 objects_.Add(cls);
1629 PushFromTo(cls);
1630 }
1631
1632 void WriteAlloc(Serializer* s) {
1633 const intptr_t count = objects_.length();
1634 s->WriteUnsigned(count);
1635 for (intptr_t i = 0; i < count; i++) {
1636 PatchClassPtr cls = objects_[i];
1637 s->AssignRef(cls);
1638 }
1639 }
1640
1641 void WriteFill(Serializer* s) {
1642 const intptr_t count = objects_.length();
1643 for (intptr_t i = 0; i < count; i++) {
1644 PatchClassPtr cls = objects_[i];
1645 AutoTraceObject(cls);
1646 WriteFromTo(cls);
1647 if (s->kind() != Snapshot::kFullAOT) {
1648 s->Write<int32_t>(cls->untag()->kernel_library_index_);
1649 }
1650 }
1651 }
1652
1653 private:
1654 GrowableArray<PatchClassPtr> objects_;
1655};
1656#endif // !DART_PRECOMPILED_RUNTIME
1657
1658class PatchClassDeserializationCluster : public DeserializationCluster {
1659 public:
1660 PatchClassDeserializationCluster() : DeserializationCluster("PatchClass") {}
1661 ~PatchClassDeserializationCluster() {}
1662
1663 void ReadAlloc(Deserializer* d) override {
1664 ReadAllocFixedSize(d, PatchClass::InstanceSize());
1665 }
1666
1667 void ReadFill(Deserializer* d_) override {
1668 Deserializer::Local d(d_);
1669
1670 ASSERT(!is_canonical()); // Never canonical.
1671 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1672 PatchClassPtr cls = static_cast<PatchClassPtr>(d.Ref(id));
1673 Deserializer::InitializeHeader(cls, kPatchClassCid,
1674 PatchClass::InstanceSize());
1675 d.ReadFromTo(cls);
1676#if !defined(DART_PRECOMPILED_RUNTIME)
1677 ASSERT(d_->kind() != Snapshot::kFullAOT);
1678 cls->untag()->kernel_library_index_ = d.Read<int32_t>();
1679#endif
1680 }
1681 }
1682};
1683
1684#if !defined(DART_PRECOMPILED_RUNTIME)
1685class FunctionSerializationCluster : public SerializationCluster {
1686 public:
1687 FunctionSerializationCluster()
1688 : SerializationCluster("Function",
1689 kFunctionCid,
1690 compiler::target::Function::InstanceSize()) {}
1691 ~FunctionSerializationCluster() {}
1692
1693 void Trace(Serializer* s, ObjectPtr object) {
1694 Snapshot::Kind kind = s->kind();
1695 FunctionPtr func = Function::RawCast(object);
1696 objects_.Add(func);
1697
1698 PushFromTo(func);
1699 if (kind == Snapshot::kFullAOT) {
1700 s->Push(func->untag()->code());
1701 } else if (kind == Snapshot::kFullJIT) {
1702 NOT_IN_PRECOMPILED(s->Push(func->untag()->unoptimized_code()));
1703 s->Push(func->untag()->code());
1704 s->Push(func->untag()->ic_data_array());
1705 }
1706 if (kind != Snapshot::kFullAOT) {
1707 NOT_IN_PRECOMPILED(s->Push(func->untag()->positional_parameter_names()));
1708 }
1709 }
1710
1711 void WriteAlloc(Serializer* s) {
1712 const intptr_t count = objects_.length();
1713 s->WriteUnsigned(count);
1714 for (intptr_t i = 0; i < count; i++) {
1715 FunctionPtr func = objects_[i];
1716 s->AssignRef(func);
1717 }
1718 }
1719
1720 void WriteFill(Serializer* s) {
1721 Snapshot::Kind kind = s->kind();
1722 const intptr_t count = objects_.length();
1723 for (intptr_t i = 0; i < count; i++) {
1724 FunctionPtr func = objects_[i];
1725 AutoTraceObjectName(func, MakeDisambiguatedFunctionName(s, func));
1726 WriteFromTo(func);
1727 if (kind == Snapshot::kFullAOT) {
1728#if defined(DART_PRECOMPILER)
1729 CodePtr code = func->untag()->code();
1730 const auto code_index = s->GetCodeIndex(code);
1731 s->WriteUnsigned(code_index);
1732 s->AttributePropertyRef(code, "code_");
1733#else
1734 UNREACHABLE();
1735#endif
1736 } else if (s->kind() == Snapshot::kFullJIT) {
1737 NOT_IN_PRECOMPILED(WriteCompressedField(func, unoptimized_code));
1738 WriteCompressedField(func, code);
1739 WriteCompressedField(func, ic_data_array);
1740 }
1741
1742 if (kind != Snapshot::kFullAOT) {
1744 WriteCompressedField(func, positional_parameter_names));
1745 }
1746
1747#if defined(DART_PRECOMPILER) && !defined(PRODUCT)
1748 TokenPosition token_pos = func->untag()->token_pos_;
1749 if (kind == Snapshot::kFullAOT) {
1750 // We use then token_pos property to store the line number
1751 // in AOT snapshots.
1752 intptr_t line = -1;
1753 const Function& function = Function::Handle(func);
1754 const Script& script = Script::Handle(function.script());
1755 if (!script.IsNull()) {
1756 script.GetTokenLocation(token_pos, &line, nullptr);
1757 }
1758 token_pos = line == -1 ? TokenPosition::kNoSource
1759 : TokenPosition::Deserialize(line);
1760 }
1761 s->WriteTokenPosition(token_pos);
1762#else
1763 if (kind != Snapshot::kFullAOT) {
1764 s->WriteTokenPosition(func->untag()->token_pos_);
1765 }
1766#endif
1767 if (kind != Snapshot::kFullAOT) {
1768 s->WriteTokenPosition(func->untag()->end_token_pos_);
1769 s->Write<uint32_t>(func->untag()->kernel_offset_);
1770 s->Write<uint32_t>(func->untag()->packed_fields_);
1771 }
1772 s->Write<uint32_t>(func->untag()->kind_tag_);
1773 }
1774 }
1775
1776 static const char* MakeDisambiguatedFunctionName(Serializer* s,
1777 FunctionPtr f) {
1778 if (s->profile_writer() == nullptr) {
1779 return nullptr;
1780 }
1781
1783 Function& fun = reused_function_handle.Handle();
1784 fun = f;
1785 ZoneTextBuffer printer(s->thread()->zone());
1786 fun.PrintName(NameFormattingParams::DisambiguatedUnqualified(
1787 Object::NameVisibility::kInternalName),
1788 &printer);
1789 return printer.buffer();
1790 }
1791
1792 private:
1793 GrowableArray<FunctionPtr> objects_;
1794};
1795#endif // !DART_PRECOMPILED_RUNTIME
1796
1797template <bool need_entry_point_for_non_discarded>
1798DART_FORCE_INLINE static CodePtr GetCodeAndEntryPointByIndex(
1799 const Deserializer* d,
1800 intptr_t code_index,
1801 uword* entry_point) {
1802 code_index -= 1; // 0 is reserved for LazyCompile stub.
1803
1804 // In root unit and VM isolate snapshot code_indices are self-contained
1805 // they point into instruction table and/or into the code cluster.
1806 // In non-root units we might also refer to code objects from the
1807 // parent unit which means code_index is biased by num_base_objects_
1808 const intptr_t base = d->is_non_root_unit() ? d->num_base_objects() : 0;
1809 if (code_index < base) {
1810 CodePtr code = static_cast<CodePtr>(d->Ref(code_index));
1811 if (need_entry_point_for_non_discarded) {
1812 *entry_point = Code::EntryPointOf(code);
1813 }
1814 return code;
1815 }
1816 code_index -= base;
1817
1818 // At this point code_index is referring to a code object which is either
1819 // discarded or exists in the Code cluster. Non-discarded Code objects
1820 // are associated with the tail of the instruction table and have the
1821 // same order there and in the Code cluster. This means that
1822 // subtracting first_entry_with_code yields index into the Code cluster.
1823 // This also works for deferred code objects in root unit's snapshot
1824 // due to the choice of encoding (see Serializer::GetCodeIndex).
1825 const intptr_t first_entry_with_code =
1826 d->instructions_table().rodata()->first_entry_with_code;
1827 if (code_index < first_entry_with_code) {
1828 *entry_point = d->instructions_table().EntryPointAt(code_index);
1829 return StubCode::UnknownDartCode().ptr();
1830 } else {
1831 const intptr_t cluster_index = code_index - first_entry_with_code;
1832 CodePtr code =
1833 static_cast<CodePtr>(d->Ref(d->code_start_index() + cluster_index));
1834 if (need_entry_point_for_non_discarded) {
1835 *entry_point = Code::EntryPointOf(code);
1836 }
1837 return code;
1838 }
1839}
1840
1841CodePtr Deserializer::GetCodeByIndex(intptr_t code_index,
1842 uword* entry_point) const {
1843 // See Serializer::GetCodeIndex for how code_index is encoded.
1844 if (code_index == 0) {
1845 return StubCode::LazyCompile().ptr();
1846 } else if (FLAG_precompiled_mode) {
1848 /*need_entry_point_for_non_discarded=*/false>(this, code_index,
1849 entry_point);
1850 } else {
1851 // -1 below because 0 is reserved for LazyCompile stub.
1852 const intptr_t ref = code_start_index_ + code_index - 1;
1853 ASSERT(code_start_index_ <= ref && ref < code_stop_index_);
1854 return static_cast<CodePtr>(Ref(ref));
1855 }
1856}
1857
1858intptr_t Deserializer::CodeIndexToClusterIndex(const InstructionsTable& table,
1859 intptr_t code_index) {
1860 // Note: code indices we are interpreting here originate from the root
1861 // loading unit which means base is equal to 0.
1862 // See comments which clarify the connection between code_index and
1863 // index into the Code cluster.
1864 ASSERT(FLAG_precompiled_mode);
1865 const intptr_t first_entry_with_code = table.rodata()->first_entry_with_code;
1866 return code_index - 1 - first_entry_with_code;
1867}
1868
1869uword Deserializer::GetEntryPointByCodeIndex(intptr_t code_index) const {
1870 // See Deserializer::GetCodeByIndex which this code repeats.
1871 ASSERT(FLAG_precompiled_mode);
1872 uword entry_point = 0;
1873 GetCodeAndEntryPointByIndex</*need_entry_point_for_non_discarded=*/true>(
1874 this, code_index, &entry_point);
1875 return entry_point;
1876}
1877
1878class FunctionDeserializationCluster : public DeserializationCluster {
1879 public:
1880 FunctionDeserializationCluster() : DeserializationCluster("Function") {}
1881 ~FunctionDeserializationCluster() {}
1882
1883 void ReadAlloc(Deserializer* d) override {
1884 ReadAllocFixedSize(d, Function::InstanceSize());
1885 }
1886
1887 void ReadFill(Deserializer* d_) override {
1888 Deserializer::Local d(d_);
1889
1890 ASSERT(!is_canonical()); // Never canonical.
1891 Snapshot::Kind kind = d_->kind();
1892
1893 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
1894 FunctionPtr func = static_cast<FunctionPtr>(d.Ref(id));
1895 Deserializer::InitializeHeader(func, kFunctionCid,
1896 Function::InstanceSize());
1897 d.ReadFromTo(func);
1898
1899#if defined(DEBUG)
1900 func->untag()->entry_point_ = 0;
1901 func->untag()->unchecked_entry_point_ = 0;
1902#endif
1903
1904#if defined(DART_PRECOMPILED_RUNTIME)
1905 ASSERT(kind == Snapshot::kFullAOT);
1906 const intptr_t code_index = d.ReadUnsigned();
1907 uword entry_point = 0;
1908 CodePtr code = d_->GetCodeByIndex(code_index, &entry_point);
1909 func->untag()->code_ = code;
1910 if (entry_point != 0) {
1911 func->untag()->entry_point_ = entry_point;
1912 func->untag()->unchecked_entry_point_ = entry_point;
1913 }
1914#else
1915 ASSERT(kind != Snapshot::kFullAOT);
1916 if (kind == Snapshot::kFullJIT) {
1917 func->untag()->unoptimized_code_ = static_cast<CodePtr>(d.ReadRef());
1918 func->untag()->code_ = static_cast<CodePtr>(d.ReadRef());
1919 func->untag()->ic_data_array_ = static_cast<ArrayPtr>(d.ReadRef());
1920 }
1921#endif
1922
1923#if !defined(DART_PRECOMPILED_RUNTIME)
1924 ASSERT(kind != Snapshot::kFullAOT);
1925 func->untag()->positional_parameter_names_ =
1926 static_cast<ArrayPtr>(d.ReadRef());
1927#endif
1928#if !defined(DART_PRECOMPILED_RUNTIME) || \
1929 (defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT))
1930 func->untag()->token_pos_ = d.ReadTokenPosition();
1931#endif
1932#if !defined(DART_PRECOMPILED_RUNTIME)
1933 func->untag()->end_token_pos_ = d.ReadTokenPosition();
1934 func->untag()->kernel_offset_ = d.Read<uint32_t>();
1935 func->untag()->unboxed_parameters_info_.Reset();
1936 func->untag()->packed_fields_ = d.Read<uint32_t>();
1937#endif
1938
1939 func->untag()->kind_tag_ = d.Read<uint32_t>();
1940#if !defined(DART_PRECOMPILED_RUNTIME)
1941 func->untag()->usage_counter_ = 0;
1942 func->untag()->optimized_instruction_count_ = 0;
1943 func->untag()->optimized_call_site_count_ = 0;
1944 func->untag()->deoptimization_counter_ = 0;
1945 func->untag()->state_bits_ = 0;
1946 func->untag()->inlining_depth_ = 0;
1947#endif
1948 }
1949 }
1950
1951 void PostLoad(Deserializer* d, const Array& refs) override {
1952 if (d->kind() == Snapshot::kFullAOT) {
1953 Function& func = Function::Handle(d->zone());
1954 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1955 func ^= refs.At(i);
1956 auto const code = func.ptr()->untag()->code();
1957 ASSERT(code->IsCode());
1958 if (!Code::IsUnknownDartCode(code)) {
1959 uword entry_point = code->untag()->entry_point_;
1960 ASSERT(entry_point != 0);
1961 func.ptr()->untag()->entry_point_ = entry_point;
1962 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
1963 ASSERT(unchecked_entry_point != 0);
1964 func.ptr()->untag()->unchecked_entry_point_ = unchecked_entry_point;
1965 }
1966 }
1967 } else if (d->kind() == Snapshot::kFullJIT) {
1968 Function& func = Function::Handle(d->zone());
1969 Code& code = Code::Handle(d->zone());
1970 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1971 func ^= refs.At(i);
1972 code = func.CurrentCode();
1973 if (func.HasCode() && !code.IsDisabled()) {
1974 func.SetInstructionsSafe(code); // Set entrypoint.
1975 func.SetWasCompiled(true);
1976 } else {
1977 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub
1978 }
1979 }
1980 } else {
1981 Function& func = Function::Handle(d->zone());
1982 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
1983 func ^= refs.At(i);
1984 func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub.
1985 }
1986 }
1987 }
1988};
1989
1990#if !defined(DART_PRECOMPILED_RUNTIME)
1991class ClosureDataSerializationCluster : public SerializationCluster {
1992 public:
1993 ClosureDataSerializationCluster()
1994 : SerializationCluster("ClosureData",
1995 kClosureDataCid,
1996 compiler::target::ClosureData::InstanceSize()) {}
1997 ~ClosureDataSerializationCluster() {}
1998
1999 void Trace(Serializer* s, ObjectPtr object) {
2000 ClosureDataPtr data = ClosureData::RawCast(object);
2001 objects_.Add(data);
2002
2003 if (s->kind() != Snapshot::kFullAOT) {
2004 s->Push(data->untag()->context_scope());
2005 }
2006 s->Push(data->untag()->parent_function());
2007 s->Push(data->untag()->closure());
2008 }
2009
2010 void WriteAlloc(Serializer* s) {
2011 const intptr_t count = objects_.length();
2012 s->WriteUnsigned(count);
2013 for (intptr_t i = 0; i < count; i++) {
2014 ClosureDataPtr data = objects_[i];
2015 s->AssignRef(data);
2016 }
2017 }
2018
2019 void WriteFill(Serializer* s) {
2020 const intptr_t count = objects_.length();
2021 for (intptr_t i = 0; i < count; i++) {
2022 ClosureDataPtr data = objects_[i];
2023 AutoTraceObject(data);
2024 if (s->kind() != Snapshot::kFullAOT) {
2025 WriteCompressedField(data, context_scope);
2026 }
2027 WriteCompressedField(data, parent_function);
2028 WriteCompressedField(data, closure);
2029 s->WriteUnsigned(static_cast<uint32_t>(data->untag()->packed_fields_));
2030 }
2031 }
2032
2033 private:
2034 GrowableArray<ClosureDataPtr> objects_;
2035};
2036#endif // !DART_PRECOMPILED_RUNTIME
2037
2038class ClosureDataDeserializationCluster : public DeserializationCluster {
2039 public:
2040 ClosureDataDeserializationCluster() : DeserializationCluster("ClosureData") {}
2041 ~ClosureDataDeserializationCluster() {}
2042
2043 void ReadAlloc(Deserializer* d) override {
2044 ReadAllocFixedSize(d, ClosureData::InstanceSize());
2045 }
2046
2047 void ReadFill(Deserializer* d_) override {
2048 Deserializer::Local d(d_);
2049
2050 ASSERT(!is_canonical()); // Never canonical.
2051 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2052 ClosureDataPtr data = static_cast<ClosureDataPtr>(d.Ref(id));
2053 Deserializer::InitializeHeader(data, kClosureDataCid,
2054 ClosureData::InstanceSize());
2055 if (d_->kind() == Snapshot::kFullAOT) {
2056 data->untag()->context_scope_ = ContextScope::null();
2057 } else {
2058 data->untag()->context_scope_ =
2059 static_cast<ContextScopePtr>(d.ReadRef());
2060 }
2061 data->untag()->parent_function_ = static_cast<FunctionPtr>(d.ReadRef());
2062 data->untag()->closure_ = static_cast<ClosurePtr>(d.ReadRef());
2063 data->untag()->packed_fields_ = d.ReadUnsigned<uint32_t>();
2064 }
2065 }
2066};
2067
2068#if !defined(DART_PRECOMPILED_RUNTIME)
2069class FfiTrampolineDataSerializationCluster : public SerializationCluster {
2070 public:
2071 FfiTrampolineDataSerializationCluster()
2072 : SerializationCluster(
2073 "FfiTrampolineData",
2074 kFfiTrampolineDataCid,
2075 compiler::target::FfiTrampolineData::InstanceSize()) {}
2076 ~FfiTrampolineDataSerializationCluster() {}
2077
2078 void Trace(Serializer* s, ObjectPtr object) {
2079 FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object);
2080 objects_.Add(data);
2081 PushFromTo(data);
2082 }
2083
2084 void WriteAlloc(Serializer* s) {
2085 const intptr_t count = objects_.length();
2086 s->WriteUnsigned(count);
2087 for (intptr_t i = 0; i < count; i++) {
2088 s->AssignRef(objects_[i]);
2089 }
2090 }
2091
2092 void WriteFill(Serializer* s) {
2093 const intptr_t count = objects_.length();
2094 for (intptr_t i = 0; i < count; i++) {
2095 FfiTrampolineDataPtr const data = objects_[i];
2096 AutoTraceObject(data);
2097 WriteFromTo(data);
2098 s->Write<int32_t>(data->untag()->callback_id_);
2099 s->Write<uint8_t>(data->untag()->ffi_function_kind_);
2100 }
2101 }
2102
2103 private:
2104 GrowableArray<FfiTrampolineDataPtr> objects_;
2105};
2106#endif // !DART_PRECOMPILED_RUNTIME
2107
2108class FfiTrampolineDataDeserializationCluster : public DeserializationCluster {
2109 public:
2110 FfiTrampolineDataDeserializationCluster()
2111 : DeserializationCluster("FfiTrampolineData") {}
2112 ~FfiTrampolineDataDeserializationCluster() {}
2113
2114 void ReadAlloc(Deserializer* d) override {
2115 ReadAllocFixedSize(d, FfiTrampolineData::InstanceSize());
2116 }
2117
2118 void ReadFill(Deserializer* d_) override {
2119 Deserializer::Local d(d_);
2120
2121 ASSERT(!is_canonical()); // Never canonical.
2122 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2123 FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d.Ref(id));
2124 Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
2125 FfiTrampolineData::InstanceSize());
2126 d.ReadFromTo(data);
2127 data->untag()->callback_id_ = d.Read<int32_t>();
2128 data->untag()->ffi_function_kind_ = d.Read<uint8_t>();
2129 }
2130 }
2131};
2132
2133#if !defined(DART_PRECOMPILED_RUNTIME)
2134class FieldSerializationCluster : public SerializationCluster {
2135 public:
2136 FieldSerializationCluster()
2137 : SerializationCluster("Field",
2138 kFieldCid,
2139 compiler::target::Field::InstanceSize()) {}
2140 ~FieldSerializationCluster() {}
2141
2142 void Trace(Serializer* s, ObjectPtr object) {
2143 FieldPtr field = Field::RawCast(object);
2144 objects_.Add(field);
2145
2146 Snapshot::Kind kind = s->kind();
2147
2148 s->Push(field->untag()->name());
2149 s->Push(field->untag()->owner());
2150 s->Push(field->untag()->type());
2151 // Write out the initializer function
2152 s->Push(field->untag()->initializer_function());
2153
2154 if (kind != Snapshot::kFullAOT) {
2155 s->Push(field->untag()->guarded_list_length());
2156 }
2157 if (kind == Snapshot::kFullJIT) {
2158 s->Push(field->untag()->dependent_code());
2159 }
2160 // Write out either the initial static value or field offset.
2161 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2162 s->Push(field->untag()->host_offset_or_field_id());
2163 } else {
2164 s->Push(Smi::New(Field::TargetOffsetOf(field)));
2165 }
2166 }
2167
2168 void WriteAlloc(Serializer* s) {
2169 const intptr_t count = objects_.length();
2170 s->WriteUnsigned(count);
2171 for (intptr_t i = 0; i < count; i++) {
2172 FieldPtr field = objects_[i];
2173 s->AssignRef(field);
2174 }
2175 }
2176
2177 void WriteFill(Serializer* s) {
2178 Snapshot::Kind kind = s->kind();
2179 const intptr_t count = objects_.length();
2180 for (intptr_t i = 0; i < count; i++) {
2181 FieldPtr field = objects_[i];
2182 AutoTraceObjectName(field, field->untag()->name());
2183
2184 WriteCompressedField(field, name);
2185 WriteCompressedField(field, owner);
2186 WriteCompressedField(field, type);
2187 // Write out the initializer function and initial value if not in AOT.
2188 WriteCompressedField(field, initializer_function);
2189 if (kind != Snapshot::kFullAOT) {
2190 WriteCompressedField(field, guarded_list_length);
2191 }
2192 if (kind == Snapshot::kFullJIT) {
2193 WriteCompressedField(field, dependent_code);
2194 }
2195
2196 if (kind != Snapshot::kFullAOT) {
2197 s->WriteTokenPosition(field->untag()->token_pos_);
2198 s->WriteTokenPosition(field->untag()->end_token_pos_);
2199 s->WriteCid(field->untag()->guarded_cid_);
2200 s->WriteCid(field->untag()->is_nullable_);
2201 s->Write<int8_t>(field->untag()->static_type_exactness_state_);
2202 s->Write<uint32_t>(field->untag()->kernel_offset_);
2203 }
2204 s->Write<uint16_t>(field->untag()->kind_bits_);
2205
2206 // Write out either the initial static value or field offset.
2207 if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
2208 WriteFieldValue("id", field->untag()->host_offset_or_field_id());
2209 } else {
2210 WriteFieldValue("offset", Smi::New(Field::TargetOffsetOf(field)));
2211 }
2212 }
2213 }
2214
2215 private:
2216 GrowableArray<FieldPtr> objects_;
2217};
2218#endif // !DART_PRECOMPILED_RUNTIME
2219
2220class FieldDeserializationCluster : public DeserializationCluster {
2221 public:
2222 FieldDeserializationCluster() : DeserializationCluster("Field") {}
2223 ~FieldDeserializationCluster() {}
2224
2225 void ReadAlloc(Deserializer* d) override {
2226 ReadAllocFixedSize(d, Field::InstanceSize());
2227 }
2228
2229 void ReadFill(Deserializer* d_) override {
2230 Deserializer::Local d(d_);
2231
2232 ASSERT(!is_canonical()); // Never canonical.
2233#if !defined(DART_PRECOMPILED_RUNTIME)
2234 Snapshot::Kind kind = d_->kind();
2235#endif
2236 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2237 FieldPtr field = static_cast<FieldPtr>(d.Ref(id));
2238 Deserializer::InitializeHeader(field, kFieldCid, Field::InstanceSize());
2239 d.ReadFromTo(field);
2240#if !defined(DART_PRECOMPILED_RUNTIME)
2241 ASSERT(d_->kind() != Snapshot::kFullAOT);
2242 field->untag()->guarded_list_length_ = static_cast<SmiPtr>(d.ReadRef());
2243 if (kind == Snapshot::kFullJIT) {
2244 field->untag()->dependent_code_ =
2245 static_cast<WeakArrayPtr>(d.ReadRef());
2246 }
2247 field->untag()->token_pos_ = d.ReadTokenPosition();
2248 field->untag()->end_token_pos_ = d.ReadTokenPosition();
2249 field->untag()->guarded_cid_ = d.ReadCid();
2250 field->untag()->is_nullable_ = d.ReadCid();
2251 const int8_t static_type_exactness_state = d.Read<int8_t>();
2252#if defined(TARGET_ARCH_X64)
2253 field->untag()->static_type_exactness_state_ =
2254 static_type_exactness_state;
2255#else
2256 // We might produce core snapshots using X64 VM and then consume
2257 // them in IA32 or ARM VM. In which case we need to simply ignore
2258 // static type exactness state written into snapshot because non-X64
2259 // builds don't have this feature enabled.
2260 // TODO(dartbug.com/34170) Support other architectures.
2261 USE(static_type_exactness_state);
2262 field->untag()->static_type_exactness_state_ =
2263 StaticTypeExactnessState::NotTracking().Encode();
2264#endif // defined(TARGET_ARCH_X64)
2265 field->untag()->kernel_offset_ = d.Read<uint32_t>();
2266#endif
2267 field->untag()->kind_bits_ = d.Read<uint16_t>();
2268
2269 field->untag()->host_offset_or_field_id_ =
2270 static_cast<SmiPtr>(d.ReadRef());
2271#if !defined(DART_PRECOMPILED_RUNTIME)
2272 field->untag()->target_offset_ =
2273 Smi::Value(field->untag()->host_offset_or_field_id());
2274#endif // !defined(DART_PRECOMPILED_RUNTIME)
2275 }
2276 }
2277
2278 void PostLoad(Deserializer* d, const Array& refs) override {
2279 Field& field = Field::Handle(d->zone());
2280 if (!IsolateGroup::Current()->use_field_guards()) {
2281 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2282 field ^= refs.At(i);
2283 field.set_guarded_cid_unsafe(kDynamicCid);
2284 field.set_is_nullable_unsafe(true);
2285 field.set_guarded_list_length_unsafe(Field::kNoFixedLength);
2286 field.set_guarded_list_length_in_object_offset_unsafe(
2287 Field::kUnknownLengthOffset);
2288 field.set_static_type_exactness_state_unsafe(
2289 StaticTypeExactnessState::NotTracking());
2290 }
2291 } else {
2292 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
2293 field ^= refs.At(i);
2294 field.InitializeGuardedListLengthInObjectOffset(/*unsafe=*/true);
2295 }
2296 }
2297 }
2298};
2299
2300#if !defined(DART_PRECOMPILED_RUNTIME)
2301class ScriptSerializationCluster : public SerializationCluster {
2302 public:
2303 ScriptSerializationCluster()
2304 : SerializationCluster("Script",
2305 kScriptCid,
2306 compiler::target::Script::InstanceSize()) {}
2307 ~ScriptSerializationCluster() {}
2308
2309 void Trace(Serializer* s, ObjectPtr object) {
2310 ScriptPtr script = Script::RawCast(object);
2311 objects_.Add(script);
2312 auto* from = script->untag()->from();
2313 auto* to = script->untag()->to_snapshot(s->kind());
2314 for (auto* p = from; p <= to; p++) {
2315 const intptr_t offset =
2316 reinterpret_cast<uword>(p) - reinterpret_cast<uword>(script->untag());
2317 const ObjectPtr obj = p->Decompress(script->heap_base());
2318 if (offset == Script::line_starts_offset()) {
2319 // Line starts are delta encoded.
2320 s->Push(obj, kDeltaEncodedTypedDataCid);
2321 } else {
2322 s->Push(obj);
2323 }
2324 }
2325 }
2326
2327 void WriteAlloc(Serializer* s) {
2328 const intptr_t count = objects_.length();
2329 s->WriteUnsigned(count);
2330 for (intptr_t i = 0; i < count; i++) {
2331 ScriptPtr script = objects_[i];
2332 s->AssignRef(script);
2333 }
2334 }
2335
2336 void WriteFill(Serializer* s) {
2337 const intptr_t count = objects_.length();
2338 for (intptr_t i = 0; i < count; i++) {
2339 ScriptPtr script = objects_[i];
2340 AutoTraceObjectName(script, script->untag()->url());
2341 WriteFromTo(script);
2342 if (s->kind() != Snapshot::kFullAOT) {
2343 // Clear out the max position cache in snapshots to ensure no
2344 // differences in the snapshot due to triggering caching vs. not.
2345 int32_t written_flags =
2346 UntaggedScript::CachedMaxPositionBitField::update(
2347 0, script->untag()->flags_and_max_position_);
2348 written_flags = UntaggedScript::HasCachedMaxPositionBit::update(
2349 false, written_flags);
2350 s->Write<int32_t>(written_flags);
2351 }
2352 s->Write<int32_t>(script->untag()->kernel_script_index_);
2353 }
2354 }
2355
2356 private:
2357 GrowableArray<ScriptPtr> objects_;
2358};
2359#endif // !DART_PRECOMPILED_RUNTIME
2360
2361class ScriptDeserializationCluster : public DeserializationCluster {
2362 public:
2363 ScriptDeserializationCluster() : DeserializationCluster("Script") {}
2364 ~ScriptDeserializationCluster() {}
2365
2366 void ReadAlloc(Deserializer* d) override {
2367 ReadAllocFixedSize(d, Script::InstanceSize());
2368 }
2369
2370 void ReadFill(Deserializer* d_) override {
2371 Deserializer::Local d(d_);
2372
2373 ASSERT(!is_canonical()); // Never canonical.
2374 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2375 ScriptPtr script = static_cast<ScriptPtr>(d.Ref(id));
2376 Deserializer::InitializeHeader(script, kScriptCid,
2377 Script::InstanceSize());
2378 d.ReadFromTo(script);
2379#if !defined(DART_PRECOMPILED_RUNTIME)
2380 script->untag()->flags_and_max_position_ = d.Read<int32_t>();
2381#endif
2382 script->untag()->kernel_script_index_ = d.Read<int32_t>();
2383 script->untag()->load_timestamp_ = 0;
2384 }
2385 }
2386};
2387
2388#if !defined(DART_PRECOMPILED_RUNTIME)
2389class LibrarySerializationCluster : public SerializationCluster {
2390 public:
2391 LibrarySerializationCluster()
2392 : SerializationCluster("Library",
2393 kLibraryCid,
2394 compiler::target::Library::InstanceSize()) {}
2395 ~LibrarySerializationCluster() {}
2396
2397 void Trace(Serializer* s, ObjectPtr object) {
2398 LibraryPtr lib = Library::RawCast(object);
2399 objects_.Add(lib);
2400 PushFromTo(lib);
2401 }
2402
2403 void WriteAlloc(Serializer* s) {
2404 const intptr_t count = objects_.length();
2405 s->WriteUnsigned(count);
2406 for (intptr_t i = 0; i < count; i++) {
2407 LibraryPtr lib = objects_[i];
2408 s->AssignRef(lib);
2409 }
2410 }
2411
2412 void WriteFill(Serializer* s) {
2413 const intptr_t count = objects_.length();
2414 for (intptr_t i = 0; i < count; i++) {
2415 LibraryPtr lib = objects_[i];
2416 AutoTraceObjectName(lib, lib->untag()->url());
2417 WriteFromTo(lib);
2418 s->Write<int32_t>(lib->untag()->index_);
2419 s->Write<uint16_t>(lib->untag()->num_imports_);
2420 s->Write<int8_t>(lib->untag()->load_state_);
2421 s->Write<uint8_t>(lib->untag()->flags_);
2422 if (s->kind() != Snapshot::kFullAOT) {
2423 s->Write<uint32_t>(lib->untag()->kernel_library_index_);
2424 }
2425 }
2426 }
2427
2428 private:
2429 GrowableArray<LibraryPtr> objects_;
2430};
2431#endif // !DART_PRECOMPILED_RUNTIME
2432
2433class LibraryDeserializationCluster : public DeserializationCluster {
2434 public:
2435 LibraryDeserializationCluster() : DeserializationCluster("Library") {}
2436 ~LibraryDeserializationCluster() {}
2437
2438 void ReadAlloc(Deserializer* d) override {
2439 ReadAllocFixedSize(d, Library::InstanceSize());
2440 }
2441
2442 void ReadFill(Deserializer* d_) override {
2443 Deserializer::Local d(d_);
2444
2445 ASSERT(!is_canonical()); // Never canonical.
2446 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2447 LibraryPtr lib = static_cast<LibraryPtr>(d.Ref(id));
2448 Deserializer::InitializeHeader(lib, kLibraryCid, Library::InstanceSize());
2449 d.ReadFromTo(lib);
2450 lib->untag()->native_entry_resolver_ = nullptr;
2451 lib->untag()->native_entry_symbol_resolver_ = nullptr;
2452 lib->untag()->ffi_native_resolver_ = nullptr;
2453 lib->untag()->index_ = d.Read<int32_t>();
2454 lib->untag()->num_imports_ = d.Read<uint16_t>();
2455 lib->untag()->load_state_ = d.Read<int8_t>();
2456 lib->untag()->flags_ =
2457 UntaggedLibrary::InFullSnapshotBit::update(true, d.Read<uint8_t>());
2458#if !defined(DART_PRECOMPILED_RUNTIME)
2459 ASSERT(d_->kind() != Snapshot::kFullAOT);
2460 lib->untag()->kernel_library_index_ = d.Read<uint32_t>();
2461#endif
2462 }
2463 }
2464};
2465
2466#if !defined(DART_PRECOMPILED_RUNTIME)
2467class NamespaceSerializationCluster : public SerializationCluster {
2468 public:
2469 NamespaceSerializationCluster()
2470 : SerializationCluster("Namespace",
2471 kNamespaceCid,
2472 compiler::target::Namespace::InstanceSize()) {}
2473 ~NamespaceSerializationCluster() {}
2474
2475 void Trace(Serializer* s, ObjectPtr object) {
2476 NamespacePtr ns = Namespace::RawCast(object);
2477 objects_.Add(ns);
2478 PushFromTo(ns);
2479 }
2480
2481 void WriteAlloc(Serializer* s) {
2482 const intptr_t count = objects_.length();
2483 s->WriteUnsigned(count);
2484 for (intptr_t i = 0; i < count; i++) {
2485 NamespacePtr ns = objects_[i];
2486 s->AssignRef(ns);
2487 }
2488 }
2489
2490 void WriteFill(Serializer* s) {
2491 const intptr_t count = objects_.length();
2492 for (intptr_t i = 0; i < count; i++) {
2493 NamespacePtr ns = objects_[i];
2494 AutoTraceObject(ns);
2495 WriteFromTo(ns);
2496 }
2497 }
2498
2499 private:
2500 GrowableArray<NamespacePtr> objects_;
2501};
2502#endif // !DART_PRECOMPILED_RUNTIME
2503
2504class NamespaceDeserializationCluster : public DeserializationCluster {
2505 public:
2506 NamespaceDeserializationCluster() : DeserializationCluster("Namespace") {}
2507 ~NamespaceDeserializationCluster() {}
2508
2509 void ReadAlloc(Deserializer* d) override {
2510 ReadAllocFixedSize(d, Namespace::InstanceSize());
2511 }
2512
2513 void ReadFill(Deserializer* d_) override {
2514 Deserializer::Local d(d_);
2515
2516 ASSERT(!is_canonical()); // Never canonical.
2517 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2518 NamespacePtr ns = static_cast<NamespacePtr>(d.Ref(id));
2519 Deserializer::InitializeHeader(ns, kNamespaceCid,
2520 Namespace::InstanceSize());
2521 d.ReadFromTo(ns);
2522 }
2523 }
2524};
2525
2526#if !defined(DART_PRECOMPILED_RUNTIME)
2527// KernelProgramInfo objects are not written into a full AOT snapshot.
2528class KernelProgramInfoSerializationCluster : public SerializationCluster {
2529 public:
2530 KernelProgramInfoSerializationCluster()
2531 : SerializationCluster(
2532 "KernelProgramInfo",
2533 kKernelProgramInfoCid,
2534 compiler::target::KernelProgramInfo::InstanceSize()) {}
2535 ~KernelProgramInfoSerializationCluster() {}
2536
2537 void Trace(Serializer* s, ObjectPtr object) {
2538 KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object);
2539 objects_.Add(info);
2541 }
2542
2543 void WriteAlloc(Serializer* s) {
2544 const intptr_t count = objects_.length();
2545 s->WriteUnsigned(count);
2546 for (intptr_t i = 0; i < count; i++) {
2547 KernelProgramInfoPtr info = objects_[i];
2548 s->AssignRef(info);
2549 }
2550 }
2551
2552 void WriteFill(Serializer* s) {
2553 const intptr_t count = objects_.length();
2554 for (intptr_t i = 0; i < count; i++) {
2555 KernelProgramInfoPtr info = objects_[i];
2558 }
2559 }
2560
2561 private:
2562 GrowableArray<KernelProgramInfoPtr> objects_;
2563};
2564
2565// Since KernelProgramInfo objects are not written into full AOT snapshots,
2566// one will never need to read them from a full AOT snapshot.
2567class KernelProgramInfoDeserializationCluster : public DeserializationCluster {
2568 public:
2569 KernelProgramInfoDeserializationCluster()
2570 : DeserializationCluster("KernelProgramInfo") {}
2571 ~KernelProgramInfoDeserializationCluster() {}
2572
2573 void ReadAlloc(Deserializer* d) override {
2574 ReadAllocFixedSize(d, KernelProgramInfo::InstanceSize());
2575 }
2576
2577 void ReadFill(Deserializer* d_) override {
2578 Deserializer::Local d(d_);
2579
2580 ASSERT(!is_canonical()); // Never canonical.
2581 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2582 KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d.Ref(id));
2583 Deserializer::InitializeHeader(info, kKernelProgramInfoCid,
2584 KernelProgramInfo::InstanceSize());
2585 d.ReadFromTo(info);
2586 }
2587 }
2588
2589 void PostLoad(Deserializer* d, const Array& refs) override {
2590 Array& array = Array::Handle(d->zone());
2591 KernelProgramInfo& info = KernelProgramInfo::Handle(d->zone());
2592 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
2593 info ^= refs.At(id);
2594 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2595 info.set_libraries_cache(array);
2596 array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
2597 info.set_classes_cache(array);
2598 }
2599 }
2600};
2601
2602class CodeSerializationCluster : public SerializationCluster {
2603 public:
2604 explicit CodeSerializationCluster(Heap* heap)
2605 : SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {}
2606 ~CodeSerializationCluster() {}
2607
2608 void Trace(Serializer* s, ObjectPtr object) {
2609 CodePtr code = Code::RawCast(object);
2610
2611 const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code);
2612 if (is_deferred) {
2613 s->RecordDeferredCode(code);
2614 } else {
2615 objects_.Add(code);
2616 }
2617
2618 // Even if this code object is itself deferred we still need to scan
2619 // the pool for references to other code objects (which might reside
2620 // in the current loading unit).
2621 ObjectPoolPtr pool = code->untag()->object_pool_;
2622 if (s->kind() == Snapshot::kFullAOT) {
2623 TracePool(s, pool, /*only_call_targets=*/is_deferred);
2624 } else {
2625 if (s->InCurrentLoadingUnitOrRoot(pool)) {
2626 s->Push(pool);
2627 } else {
2628 TracePool(s, pool, /*only_call_targets=*/true);
2629 }
2630 }
2631
2632 if (s->kind() == Snapshot::kFullJIT) {
2633 s->Push(code->untag()->deopt_info_array_);
2634 s->Push(code->untag()->static_calls_target_table_);
2635 s->Push(code->untag()->compressed_stackmaps_);
2636 } else if (s->kind() == Snapshot::kFullAOT) {
2637 // Note: we don't trace compressed_stackmaps_ because we are going to emit
2638 // a separate mapping table into RO data which is not going to be a real
2639 // heap object.
2640#if defined(DART_PRECOMPILER)
2641 auto const calls_array = code->untag()->static_calls_target_table_;
2642 if (calls_array != Array::null()) {
2643 // Some Code entries in the static calls target table may only be
2644 // accessible via here, so push the Code objects.
2645 array_ = calls_array;
2646 for (auto entry : StaticCallsTable(array_)) {
2647 auto kind = Code::KindField::decode(
2648 Smi::Value(entry.Get<Code::kSCallTableKindAndOffset>()));
2649 switch (kind) {
2650 case Code::kCallViaCode:
2651 // Code object in the pool.
2652 continue;
2653 case Code::kPcRelativeTTSCall:
2654 // TTS will be reachable through type object which itself is
2655 // in the pool.
2656 continue;
2657 case Code::kPcRelativeCall:
2658 case Code::kPcRelativeTailCall:
2659 auto destination = entry.Get<Code::kSCallTableCodeOrTypeTarget>();
2660 ASSERT(destination->IsHeapObject() && destination->IsCode());
2661 s->Push(destination);
2662 }
2663 }
2664 }
2665#else
2666 UNREACHABLE();
2667#endif
2668 }
2669
2670 if (Code::IsDiscarded(code)) {
2671 ASSERT(s->kind() == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2672 !FLAG_retain_code_objects);
2673 // Only object pool and static call table entries and the compressed
2674 // stack maps should be pushed.
2675 return;
2676 }
2677
2678 s->Push(code->untag()->owner_);
2679 s->Push(code->untag()->exception_handlers_);
2680 s->Push(code->untag()->pc_descriptors_);
2681 s->Push(code->untag()->catch_entry_);
2682 if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) {
2683 s->Push(code->untag()->inlined_id_to_function_);
2684 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2685 s->Push(code->untag()->code_source_map_);
2686 }
2687 }
2688#if !defined(PRODUCT)
2689 s->Push(code->untag()->return_address_metadata_);
2690 if (FLAG_code_comments) {
2691 s->Push(code->untag()->comments_);
2692 }
2693#endif
2694 }
2695
2696 void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_call_targets) {
2697 if (pool == ObjectPool::null()) {
2698 return;
2699 }
2700
2701 const intptr_t length = pool->untag()->length_;
2702 uint8_t* entry_bits = pool->untag()->entry_bits();
2703 for (intptr_t i = 0; i < length; i++) {
2704 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
2705 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
2706 const ObjectPtr target = pool->untag()->data()[i].raw_obj_;
2707 // A field is a call target because its initializer may be called
2708 // indirectly by passing the field to the runtime. A const closure
2709 // is a call target because its function may be called indirectly
2710 // via a closure call.
2711 intptr_t cid = target->GetClassIdMayBeSmi();
2712 if (!only_call_targets || (cid == kCodeCid) || (cid == kFunctionCid) ||
2713 (cid == kFieldCid) || (cid == kClosureCid)) {
2714 s->Push(target);
2715 } else if (cid >= kNumPredefinedCids) {
2716 s->Push(s->isolate_group()->class_table()->At(cid));
2717 }
2718 }
2719 }
2720 }
2721
2722 struct CodeOrderInfo {
2723 CodePtr code;
2724 intptr_t not_discarded; // 1 if this code was not discarded and
2725 // 0 otherwise.
2726 intptr_t instructions_id;
2727 };
2728
2729 // We sort code objects in such a way that code objects with the same
2730 // instructions are grouped together and ensure that all instructions
2731 // without associated code objects are grouped together at the beginning of
2732 // the code section. InstructionsTable encoding assumes that all
2733 // instructions with non-discarded Code objects are grouped at the end.
2734 //
2735 // Note that in AOT mode we expect that all Code objects pointing to
2736 // the same instructions are deduplicated, as in bare instructions mode
2737 // there is no way to identify which specific Code object (out of those
2738 // which point to the specific instructions range) actually corresponds
2739 // to a particular frame.
2740 static int CompareCodeOrderInfo(CodeOrderInfo const* a,
2741 CodeOrderInfo const* b) {
2742 if (a->not_discarded < b->not_discarded) return -1;
2743 if (a->not_discarded > b->not_discarded) return 1;
2744 if (a->instructions_id < b->instructions_id) return -1;
2745 if (a->instructions_id > b->instructions_id) return 1;
2746 return 0;
2747 }
2748
2749 static void Insert(Serializer* s,
2750 GrowableArray<CodeOrderInfo>* order_list,
2751 IntMap<intptr_t>* order_map,
2752 CodePtr code) {
2753 InstructionsPtr instr = code->untag()->instructions_;
2754 intptr_t key = static_cast<intptr_t>(instr);
2755 intptr_t instructions_id = 0;
2756
2757 if (order_map->HasKey(key)) {
2758 // We are expected to merge code objects which point to the same
2759 // instructions in the precompiled mode.
2760 RELEASE_ASSERT(!FLAG_precompiled_mode);
2761 instructions_id = order_map->Lookup(key);
2762 } else {
2763 instructions_id = order_map->Length() + 1;
2764 order_map->Insert(key, instructions_id);
2765 }
2766 CodeOrderInfo info;
2767 info.code = code;
2768 info.instructions_id = instructions_id;
2769 info.not_discarded = Code::IsDiscarded(code) ? 0 : 1;
2770 order_list->Add(info);
2771 }
2772
2773 static void Sort(Serializer* s, GrowableArray<CodePtr>* codes) {
2774 GrowableArray<CodeOrderInfo> order_list;
2775 IntMap<intptr_t> order_map;
2776 for (intptr_t i = 0; i < codes->length(); i++) {
2777 Insert(s, &order_list, &order_map, (*codes)[i]);
2778 }
2779 order_list.Sort(CompareCodeOrderInfo);
2780 ASSERT(order_list.length() == codes->length());
2781 for (intptr_t i = 0; i < order_list.length(); i++) {
2782 (*codes)[i] = order_list[i].code;
2783 }
2784 }
2785
2786 static void Sort(Serializer* s, GrowableArray<Code*>* codes) {
2787 GrowableArray<CodeOrderInfo> order_list;
2788 IntMap<intptr_t> order_map;
2789 for (intptr_t i = 0; i < codes->length(); i++) {
2790 Insert(s, &order_list, &order_map, (*codes)[i]->ptr());
2791 }
2792 order_list.Sort(CompareCodeOrderInfo);
2793 ASSERT(order_list.length() == codes->length());
2794 for (intptr_t i = 0; i < order_list.length(); i++) {
2795 *(*codes)[i] = order_list[i].code;
2796 }
2797 }
2798
2799 intptr_t NonDiscardedCodeCount() {
2800 intptr_t count = 0;
2801 for (auto code : objects_) {
2802 if (!Code::IsDiscarded(code)) {
2803 count++;
2804 }
2805 }
2806 return count;
2807 }
2808
2809 void WriteAlloc(Serializer* s) {
2810 const intptr_t non_discarded_count = NonDiscardedCodeCount();
2811 const intptr_t count = objects_.length();
2812 ASSERT(count == non_discarded_count || (s->kind() == Snapshot::kFullAOT));
2813
2814 first_ref_ = s->next_ref_index();
2815 s->WriteUnsigned(non_discarded_count);
2816 for (auto code : objects_) {
2817 if (!Code::IsDiscarded(code)) {
2818 WriteAlloc(s, code);
2819 } else {
2820 // Mark discarded code unreachable, so that we could later
2821 // assign artificial references to it.
2822 s->heap()->SetObjectId(code, kUnreachableReference);
2823 }
2824 }
2825
2826 s->WriteUnsigned(deferred_objects_.length());
2827 first_deferred_ref_ = s->next_ref_index();
2828 for (auto code : deferred_objects_) {
2829 ASSERT(!Code::IsDiscarded(code));
2830 WriteAlloc(s, code);
2831 }
2832 last_ref_ = s->next_ref_index() - 1;
2833 }
2834
2835 void WriteAlloc(Serializer* s, CodePtr code) {
2836 ASSERT(!Code::IsDiscarded(code));
2837 s->AssignRef(code);
2838 AutoTraceObjectName(code, MakeDisambiguatedCodeName(s, code));
2839 const int32_t state_bits = code->untag()->state_bits_;
2840 s->Write<int32_t>(state_bits);
2841 target_memory_size_ += compiler::target::Code::InstanceSize(0);
2842 }
2843
2844 void WriteFill(Serializer* s) {
2845 Snapshot::Kind kind = s->kind();
2846 const intptr_t count = objects_.length();
2847 for (intptr_t i = 0; i < count; i++) {
2848 CodePtr code = objects_[i];
2849#if defined(DART_PRECOMPILER)
2850 if (FLAG_write_v8_snapshot_profile_to != nullptr &&
2851 Code::IsDiscarded(code)) {
2852 s->CreateArtificialNodeIfNeeded(code);
2853 }
2854#endif
2855 // Note: for discarded code this function will not write anything out
2856 // it is only called to produce information into snapshot profile.
2857 WriteFill(s, kind, code, /*deferred=*/false);
2858 }
2859 const intptr_t deferred_count = deferred_objects_.length();
2860 for (intptr_t i = 0; i < deferred_count; i++) {
2861 CodePtr code = deferred_objects_[i];
2862 WriteFill(s, kind, code, /*deferred=*/true);
2863 }
2864 }
2865
2866 void WriteFill(Serializer* s,
2867 Snapshot::Kind kind,
2868 CodePtr code,
2869 bool deferred) {
2870 const intptr_t bytes_written = s->bytes_written();
2871 AutoTraceObjectName(code, MakeDisambiguatedCodeName(s, code));
2872
2873 intptr_t pointer_offsets_length =
2874 Code::PtrOffBits::decode(code->untag()->state_bits_);
2875 if (pointer_offsets_length != 0) {
2876 FATAL("Cannot serialize code with embedded pointers");
2877 }
2878 if (kind == Snapshot::kFullAOT && Code::IsDisabled(code)) {
2879 // Disabled code is fatal in AOT since we cannot recompile.
2880 s->UnexpectedObject(code, "Disabled code");
2881 }
2882
2883 s->WriteInstructions(code->untag()->instructions_,
2884 code->untag()->unchecked_offset_, code, deferred);
2885 if (kind == Snapshot::kFullJIT) {
2886 // TODO(rmacnak): Fix references to disabled code before serializing.
2887 // For now, we may write the FixCallersTarget or equivalent stub. This
2888 // will cause a fixup if this code is called.
2889 const uint32_t active_unchecked_offset =
2890 code->untag()->unchecked_entry_point_ - code->untag()->entry_point_;
2891 s->WriteInstructions(code->untag()->active_instructions_,
2892 active_unchecked_offset, code, deferred);
2893 }
2894
2895#if defined(DART_PRECOMPILER)
2896 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2897 // If we are writing V8 snapshot profile then attribute references going
2898 // through the object pool and static calls to the code object itself.
2899 if (kind == Snapshot::kFullAOT &&
2900 code->untag()->object_pool_ != ObjectPool::null()) {
2901 ObjectPoolPtr pool = code->untag()->object_pool_;
2902 // Non-empty per-code object pools should not be reachable in this mode.
2903 ASSERT(!s->HasRef(pool) || pool == Object::empty_object_pool().ptr());
2904 s->CreateArtificialNodeIfNeeded(pool);
2905 s->AttributePropertyRef(pool, "object_pool_");
2906 }
2907 if (kind != Snapshot::kFullJIT &&
2908 code->untag()->static_calls_target_table_ != Array::null()) {
2909 auto const table = code->untag()->static_calls_target_table_;
2910 // Non-empty static call target tables shouldn't be reachable in this
2911 // mode.
2912 ASSERT(!s->HasRef(table) || table == Object::empty_array().ptr());
2913 s->CreateArtificialNodeIfNeeded(table);
2914 s->AttributePropertyRef(table, "static_calls_target_table_");
2915 }
2916 }
2917#endif // defined(DART_PRECOMPILER)
2918
2919 if (Code::IsDiscarded(code)) {
2920 // No bytes should be written to represent this code.
2921 ASSERT(s->bytes_written() == bytes_written);
2922 // Only write instructions, compressed stackmaps and state bits
2923 // for the discarded Code objects.
2924 ASSERT(kind == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
2925 !FLAG_retain_code_objects);
2926#if defined(DART_PRECOMPILER)
2927 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
2928 // Keep the owner as a (possibly artificial) node for snapshot analysis.
2929 const auto& owner = code->untag()->owner_;
2930 s->CreateArtificialNodeIfNeeded(owner);
2931 s->AttributePropertyRef(owner, "owner_");
2932 }
2933#endif
2934 return;
2935 }
2936
2937 // No need to write object pool out if we are producing full AOT
2938 // snapshot with bare instructions.
2939 if (kind != Snapshot::kFullAOT) {
2940 if (s->InCurrentLoadingUnitOrRoot(code->untag()->object_pool_)) {
2941 WriteField(code, object_pool_);
2942 } else {
2943 WriteFieldValue(object_pool_, ObjectPool::null());
2944 }
2945 }
2946 WriteField(code, owner_);
2947 WriteField(code, exception_handlers_);
2948 WriteField(code, pc_descriptors_);
2949 WriteField(code, catch_entry_);
2950 if (s->kind() == Snapshot::kFullJIT) {
2951 WriteField(code, compressed_stackmaps_);
2952 }
2953 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) {
2954 WriteFieldValue(inlined_id_to_function_, Array::null());
2955 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2956 } else {
2957 WriteField(code, inlined_id_to_function_);
2958 if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
2959 WriteField(code, code_source_map_);
2960 } else {
2961 WriteFieldValue(code_source_map_, CodeSourceMap::null());
2962 }
2963 }
2964 if (kind == Snapshot::kFullJIT) {
2965 WriteField(code, deopt_info_array_);
2966 WriteField(code, static_calls_target_table_);
2967 }
2968
2969#if !defined(PRODUCT)
2970 WriteField(code, return_address_metadata_);
2971 if (FLAG_code_comments) {
2972 WriteField(code, comments_);
2973 }
2974#endif
2975 }
2976
2977 GrowableArray<CodePtr>* objects() { return &objects_; }
2978 GrowableArray<CodePtr>* deferred_objects() { return &deferred_objects_; }
2979
2980 static const char* MakeDisambiguatedCodeName(Serializer* s, CodePtr c) {
2981 if (s->profile_writer() == nullptr) {
2982 return nullptr;
2983 }
2984
2985 REUSABLE_CODE_HANDLESCOPE(s->thread());
2986 Code& code = reused_code_handle.Handle();
2987 code = c;
2988 return code.QualifiedName(
2989 NameFormattingParams::DisambiguatedWithoutClassName(
2990 Object::NameVisibility::kInternalName));
2991 }
2992
2993 intptr_t first_ref() const { return first_ref_; }
2994 intptr_t first_deferred_ref() const { return first_deferred_ref_; }
2995 intptr_t last_ref() const { return last_ref_; }
2996
2997 private:
2998 intptr_t first_ref_;
2999 intptr_t first_deferred_ref_;
3000 intptr_t last_ref_;
3001 GrowableArray<CodePtr> objects_;
3002 GrowableArray<CodePtr> deferred_objects_;
3003 Array& array_;
3004};
3005#endif // !DART_PRECOMPILED_RUNTIME
3006
3007class CodeDeserializationCluster : public DeserializationCluster {
3008 public:
3009 CodeDeserializationCluster() : DeserializationCluster("Code") {}
3010 ~CodeDeserializationCluster() {}
3011
3012 void ReadAlloc(Deserializer* d) override {
3013 start_index_ = d->next_index();
3014 d->set_code_start_index(start_index_);
3015 const intptr_t count = d->ReadUnsigned();
3016 for (intptr_t i = 0; i < count; i++) {
3017 ReadAllocOneCode(d);
3018 }
3019 stop_index_ = d->next_index();
3020 d->set_code_stop_index(stop_index_);
3021 deferred_start_index_ = d->next_index();
3022 const intptr_t deferred_count = d->ReadUnsigned();
3023 for (intptr_t i = 0; i < deferred_count; i++) {
3024 ReadAllocOneCode(d);
3025 }
3026 deferred_stop_index_ = d->next_index();
3027 }
3028
3029 void ReadAllocOneCode(Deserializer* d) {
3030 const int32_t state_bits = d->Read<int32_t>();
3031 ASSERT(!Code::DiscardedBit::decode(state_bits));
3032 auto code = static_cast<CodePtr>(d->Allocate(Code::InstanceSize(0)));
3033 d->AssignRef(code);
3034 code->untag()->state_bits_ = state_bits;
3035 }
3036
3037 void ReadFill(Deserializer* d) override {
3038 ASSERT(!is_canonical()); // Never canonical.
3039 ReadFill(d, start_index_, stop_index_, false);
3040#if defined(DART_PRECOMPILED_RUNTIME)
3041 ReadFill(d, deferred_start_index_, deferred_stop_index_, true);
3042#else
3043 ASSERT(deferred_start_index_ == deferred_stop_index_);
3044#endif
3045 }
3046
3047 void ReadFill(Deserializer* d,
3048 intptr_t start_index,
3049 intptr_t stop_index,
3050 bool deferred) {
3051 for (intptr_t id = start_index, n = stop_index; id < n; id++) {
3052 auto const code = static_cast<CodePtr>(d->Ref(id));
3053
3054 ASSERT(!Code::IsUnknownDartCode(code));
3055
3056 Deserializer::InitializeHeader(code, kCodeCid, Code::InstanceSize(0));
3057 ASSERT(!Code::IsDiscarded(code));
3058
3059 d->ReadInstructions(code, deferred);
3060
3061#if !defined(DART_PRECOMPILED_RUNTIME)
3062 ASSERT(d->kind() == Snapshot::kFullJIT);
3063 code->untag()->object_pool_ = static_cast<ObjectPoolPtr>(d->ReadRef());
3064#else
3065 ASSERT(d->kind() == Snapshot::kFullAOT);
3066 // There is a single global pool.
3067 code->untag()->object_pool_ = ObjectPool::null();
3068#endif
3069 code->untag()->owner_ = d->ReadRef();
3070 code->untag()->exception_handlers_ =
3071 static_cast<ExceptionHandlersPtr>(d->ReadRef());
3072 code->untag()->pc_descriptors_ =
3073 static_cast<PcDescriptorsPtr>(d->ReadRef());
3074 code->untag()->catch_entry_ = d->ReadRef();
3075#if !defined(DART_PRECOMPILED_RUNTIME)
3076 ASSERT(d->kind() == Snapshot::kFullJIT);
3077 code->untag()->compressed_stackmaps_ =
3078 static_cast<CompressedStackMapsPtr>(d->ReadRef());
3079#else
3080 ASSERT(d->kind() == Snapshot::kFullAOT);
3081 code->untag()->compressed_stackmaps_ = CompressedStackMaps::null();
3082#endif
3083 code->untag()->inlined_id_to_function_ =
3084 static_cast<ArrayPtr>(d->ReadRef());
3085 code->untag()->code_source_map_ =
3086 static_cast<CodeSourceMapPtr>(d->ReadRef());
3087
3088#if !defined(DART_PRECOMPILED_RUNTIME)
3089 ASSERT(d->kind() == Snapshot::kFullJIT);
3090 code->untag()->deopt_info_array_ = static_cast<ArrayPtr>(d->ReadRef());
3091 code->untag()->static_calls_target_table_ =
3092 static_cast<ArrayPtr>(d->ReadRef());
3093#endif // !DART_PRECOMPILED_RUNTIME
3094
3095#if !defined(PRODUCT)
3096 code->untag()->return_address_metadata_ = d->ReadRef();
3097 code->untag()->var_descriptors_ = LocalVarDescriptors::null();
3098 code->untag()->comments_ = FLAG_code_comments
3099 ? static_cast<ArrayPtr>(d->ReadRef())
3100 : Array::null();
3101 code->untag()->compile_timestamp_ = 0;
3102#endif
3103 }
3104 }
3105
3106 void PostLoad(Deserializer* d, const Array& refs) override {
3107 d->EndInstructions();
3108
3109#if !defined(PRODUCT)
3110 if (!CodeObservers::AreActive() && !FLAG_support_disassembler) return;
3111#endif
3112 Code& code = Code::Handle(d->zone());
3113#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3114 Object& owner = Object::Handle(d->zone());
3115#endif
3116 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3117 code ^= refs.At(id);
3118#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT)
3119 if (CodeObservers::AreActive()) {
3120 Code::NotifyCodeObservers(code, code.is_optimized());
3121 }
3122#endif
3123#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3124 owner = code.owner();
3125 if (owner.IsFunction()) {
3126 if ((FLAG_disassemble ||
3127 (code.is_optimized() && FLAG_disassemble_optimized)) &&
3128 compiler::PrintFilter::ShouldPrint(Function::Cast(owner))) {
3129 Disassembler::DisassembleCode(Function::Cast(owner), code,
3130 code.is_optimized());
3131 }
3132 } else if (FLAG_disassemble_stubs) {
3133 Disassembler::DisassembleStub(code.Name(), code);
3134 }
3135#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
3136 }
3137 }
3138
3139 private:
3140 intptr_t deferred_start_index_;
3141 intptr_t deferred_stop_index_;
3142};
3143
3144#if !defined(DART_PRECOMPILED_RUNTIME)
3145class ObjectPoolSerializationCluster : public SerializationCluster {
3146 public:
3147 ObjectPoolSerializationCluster()
3148 : SerializationCluster("ObjectPool", kObjectPoolCid) {}
3149 ~ObjectPoolSerializationCluster() {}
3150
3151 void Trace(Serializer* s, ObjectPtr object) {
3152 ObjectPoolPtr pool = ObjectPool::RawCast(object);
3153 objects_.Add(pool);
3154
3155 if (s->kind() != Snapshot::kFullAOT) {
3156 const intptr_t length = pool->untag()->length_;
3157 uint8_t* entry_bits = pool->untag()->entry_bits();
3158 for (intptr_t i = 0; i < length; i++) {
3159 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
3160 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
3161 s->Push(pool->untag()->data()[i].raw_obj_);
3162 }
3163 }
3164 }
3165 }
3166
3167 void WriteAlloc(Serializer* s) {
3168 const intptr_t count = objects_.length();
3169 s->WriteUnsigned(count);
3170 for (intptr_t i = 0; i < count; i++) {
3171 ObjectPoolPtr pool = objects_[i];
3172 s->AssignRef(pool);
3174 const intptr_t length = pool->untag()->length_;
3175 s->WriteUnsigned(length);
3176 target_memory_size_ += compiler::target::ObjectPool::InstanceSize(length);
3177 }
3178 }
3179
3180 void WriteFill(Serializer* s) {
3181 bool weak = s->kind() == Snapshot::kFullAOT;
3182
3183 const intptr_t count = objects_.length();
3184 for (intptr_t i = 0; i < count; i++) {
3185 ObjectPoolPtr pool = objects_[i];
3187 const intptr_t length = pool->untag()->length_;
3188 s->WriteUnsigned(length);
3189 uint8_t* entry_bits = pool->untag()->entry_bits();
3190 for (intptr_t j = 0; j < length; j++) {
3191 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3192 uint8_t bits = entry_bits[j];
3193 ObjectPool::EntryType type = ObjectPool::TypeBits::decode(bits);
3194 auto snapshot_behavior = ObjectPool::SnapshotBehaviorBits::decode(bits);
3195 ASSERT(snapshot_behavior !=
3196 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3197 s->Write<uint8_t>(bits);
3198 if (snapshot_behavior != ObjectPool::SnapshotBehavior::kSnapshotable) {
3199 // The deserializer will reset this to a specific value, no need to
3200 // write anything.
3201 continue;
3202 }
3203 switch (type) {
3204 case ObjectPool::EntryType::kTaggedObject: {
3205 if (weak && !s->HasRef(entry.raw_obj_)) {
3206 // Any value will do, but null has the shortest id.
3207 s->WriteElementRef(Object::null(), j);
3208 } else {
3209 s->WriteElementRef(entry.raw_obj_, j);
3210 }
3211 break;
3212 }
3213 case ObjectPool::EntryType::kImmediate: {
3214 s->Write<intptr_t>(entry.raw_value_);
3215 break;
3216 }
3217 case ObjectPool::EntryType::kNativeFunction: {
3218 // Write nothing. Will initialize with the lazy link entry.
3219 break;
3220 }
3221 default:
3222 UNREACHABLE();
3223 }
3224 }
3225 }
3226 }
3227
3228 private:
3229 GrowableArray<ObjectPoolPtr> objects_;
3230};
3231#endif // !DART_PRECOMPILED_RUNTIME
3232
3233class ObjectPoolDeserializationCluster : public DeserializationCluster {
3234 public:
3235 ObjectPoolDeserializationCluster() : DeserializationCluster("ObjectPool") {}
3236 ~ObjectPoolDeserializationCluster() {}
3237
3238 void ReadAlloc(Deserializer* d) override {
3239 start_index_ = d->next_index();
3240 const intptr_t count = d->ReadUnsigned();
3241 for (intptr_t i = 0; i < count; i++) {
3242 const intptr_t length = d->ReadUnsigned();
3243 d->AssignRef(d->Allocate(ObjectPool::InstanceSize(length)));
3244 }
3245 stop_index_ = d->next_index();
3246 }
3247
3248 void ReadFill(Deserializer* d_) override {
3249 Deserializer::Local d(d_);
3250
3251 ASSERT(!is_canonical()); // Never canonical.
3252 fill_position_ = d.Position();
3253#if defined(DART_PRECOMPILED_RUNTIME)
3254 const uint8_t immediate_bits = ObjectPool::EncodeBits(
3255 ObjectPool::EntryType::kImmediate, ObjectPool::Patchability::kPatchable,
3256 ObjectPool::SnapshotBehavior::kSnapshotable);
3257 uword switchable_call_miss_entry_point =
3258 StubCode::SwitchableCallMiss().MonomorphicEntryPoint();
3259#endif // defined(DART_PRECOMPILED_RUNTIME)
3260
3261 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3262 const intptr_t length = d.ReadUnsigned();
3263 ObjectPoolPtr pool = static_cast<ObjectPoolPtr>(d.Ref(id));
3264 Deserializer::InitializeHeader(pool, kObjectPoolCid,
3265 ObjectPool::InstanceSize(length));
3266 pool->untag()->length_ = length;
3267 for (intptr_t j = 0; j < length; j++) {
3268 const uint8_t entry_bits = d.Read<uint8_t>();
3269 pool->untag()->entry_bits()[j] = entry_bits;
3270 UntaggedObjectPool::Entry& entry = pool->untag()->data()[j];
3271 const auto snapshot_behavior =
3272 ObjectPool::SnapshotBehaviorBits::decode(entry_bits);
3273 ASSERT(snapshot_behavior !=
3274 ObjectPool::SnapshotBehavior::kNotSnapshotable);
3275 switch (snapshot_behavior) {
3276 case ObjectPool::SnapshotBehavior::kSnapshotable:
3277 // Handled below.
3278 break;
3279 case ObjectPool::SnapshotBehavior::kResetToBootstrapNative:
3280 entry.raw_obj_ = StubCode::CallBootstrapNative().ptr();
3281 continue;
3282#if defined(DART_PRECOMPILED_RUNTIME)
3283 case ObjectPool::SnapshotBehavior::
3284 kResetToSwitchableCallMissEntryPoint:
3285 pool->untag()->entry_bits()[j] = immediate_bits;
3286 entry.raw_value_ =
3287 static_cast<intptr_t>(switchable_call_miss_entry_point);
3288 continue;
3289#endif // defined(DART_PRECOMPILED_RUNTIME)
3290 case ObjectPool::SnapshotBehavior::kSetToZero:
3291 entry.raw_value_ = 0;
3292 continue;
3293 default:
3294 FATAL("Unexpected snapshot behavior: %d\n", snapshot_behavior);
3295 }
3296 switch (ObjectPool::TypeBits::decode(entry_bits)) {
3297 case ObjectPool::EntryType::kTaggedObject:
3298 entry.raw_obj_ = d.ReadRef();
3299 break;
3300 case ObjectPool::EntryType::kImmediate:
3301 entry.raw_value_ = d.Read<intptr_t>();
3302 break;
3303 case ObjectPool::EntryType::kNativeFunction: {
3304 // Read nothing. Initialize with the lazy link entry.
3305 uword new_entry = NativeEntry::LinkNativeCallEntry();
3306 entry.raw_value_ = static_cast<intptr_t>(new_entry);
3307 break;
3308 }
3309 default:
3310 UNREACHABLE();
3311 }
3312 }
3313 }
3314 }
3315
3316 void PostLoad(Deserializer* d, const Array& refs) override {
3317#if defined(DART_PRECOMPILED_RUNTIME) && \
3318 (!defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER))
3319 if (FLAG_disassemble) {
3320 ObjectPool& pool = ObjectPool::Handle(
3321 d->isolate_group()->object_store()->global_object_pool());
3322 THR_Print("Global object pool:\n");
3323 pool.DebugPrint();
3324 }
3325#endif
3326 }
3327
3328 private:
3329 intptr_t fill_position_ = 0;
3330};
3331
3332#if defined(DART_PRECOMPILER)
3333class WeakSerializationReferenceSerializationCluster
3334 : public SerializationCluster {
3335 public:
3336 WeakSerializationReferenceSerializationCluster()
3337 : SerializationCluster(
3338 "WeakSerializationReference",
3339 compiler::target::WeakSerializationReference::InstanceSize()) {}
3340 ~WeakSerializationReferenceSerializationCluster() {}
3341
3342 void Trace(Serializer* s, ObjectPtr object) {
3343 ASSERT(s->kind() == Snapshot::kFullAOT);
3344 objects_.Add(WeakSerializationReference::RawCast(object));
3345 }
3346
3347 void RetraceEphemerons(Serializer* s) {
3348 for (intptr_t i = 0; i < objects_.length(); i++) {
3349 WeakSerializationReferencePtr weak = objects_[i];
3350 if (!s->IsReachable(weak->untag()->target())) {
3351 s->Push(weak->untag()->replacement());
3352 }
3353 }
3354 }
3355
3356 intptr_t Count(Serializer* s) { return objects_.length(); }
3357
3358 void CreateArtificialTargetNodesIfNeeded(Serializer* s) {
3359 for (intptr_t i = 0; i < objects_.length(); i++) {
3360 WeakSerializationReferencePtr weak = objects_[i];
3361 s->CreateArtificialNodeIfNeeded(weak->untag()->target());
3362 }
3363 }
3364
3365 void WriteAlloc(Serializer* s) {
3366 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3367 }
3368
3369 void WriteFill(Serializer* s) {
3370 UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added.
3371 }
3372
3373 private:
3374 GrowableArray<WeakSerializationReferencePtr> objects_;
3375};
3376#endif
3377
3378#if !defined(DART_PRECOMPILED_RUNTIME)
3379class PcDescriptorsSerializationCluster : public SerializationCluster {
3380 public:
3381 PcDescriptorsSerializationCluster()
3382 : SerializationCluster("PcDescriptors", kPcDescriptorsCid) {}
3383 ~PcDescriptorsSerializationCluster() {}
3384
3385 void Trace(Serializer* s, ObjectPtr object) {
3386 PcDescriptorsPtr desc = PcDescriptors::RawCast(object);
3387 objects_.Add(desc);
3388 }
3389
3390 void WriteAlloc(Serializer* s) {
3391 const intptr_t count = objects_.length();
3392 s->WriteUnsigned(count);
3393 for (intptr_t i = 0; i < count; i++) {
3394 PcDescriptorsPtr desc = objects_[i];
3395 s->AssignRef(desc);
3396 AutoTraceObject(desc);
3397 const intptr_t length = desc->untag()->length_;
3398 s->WriteUnsigned(length);
3399 target_memory_size_ +=
3400 compiler::target::PcDescriptors::InstanceSize(length);
3401 }
3402 }
3403
3404 void WriteFill(Serializer* s) {
3405 const intptr_t count = objects_.length();
3406 for (intptr_t i = 0; i < count; i++) {
3407 PcDescriptorsPtr desc = objects_[i];
3408 AutoTraceObject(desc);
3409 const intptr_t length = desc->untag()->length_;
3410 s->WriteUnsigned(length);
3411 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3412 s->WriteBytes(cdata, length);
3413 }
3414 }
3415
3416 private:
3417 GrowableArray<PcDescriptorsPtr> objects_;
3418};
3419#endif // !DART_PRECOMPILED_RUNTIME
3420
3421class PcDescriptorsDeserializationCluster : public DeserializationCluster {
3422 public:
3423 PcDescriptorsDeserializationCluster()
3424 : DeserializationCluster("PcDescriptors") {}
3425 ~PcDescriptorsDeserializationCluster() {}
3426
3427 void ReadAlloc(Deserializer* d) override {
3428 start_index_ = d->next_index();
3429 const intptr_t count = d->ReadUnsigned();
3430 for (intptr_t i = 0; i < count; i++) {
3431 const intptr_t length = d->ReadUnsigned();
3432 d->AssignRef(d->Allocate(PcDescriptors::InstanceSize(length)));
3433 }
3434 stop_index_ = d->next_index();
3435 }
3436
3437 void ReadFill(Deserializer* d_) override {
3438 Deserializer::Local d(d_);
3439
3440 ASSERT(!is_canonical()); // Never canonical.
3441 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3442 const intptr_t length = d.ReadUnsigned();
3443 PcDescriptorsPtr desc = static_cast<PcDescriptorsPtr>(d.Ref(id));
3444 Deserializer::InitializeHeader(desc, kPcDescriptorsCid,
3445 PcDescriptors::InstanceSize(length));
3446 desc->untag()->length_ = length;
3447 uint8_t* cdata = reinterpret_cast<uint8_t*>(desc->untag()->data());
3448 d.ReadBytes(cdata, length);
3449 }
3450 }
3451};
3452
3453#if !defined(DART_PRECOMPILED_RUNTIME)
3454class CodeSourceMapSerializationCluster : public SerializationCluster {
3455 public:
3456 CodeSourceMapSerializationCluster()
3457 : SerializationCluster("CodeSourceMap", kCodeSourceMapCid) {}
3458 ~CodeSourceMapSerializationCluster() {}
3459
3460 void Trace(Serializer* s, ObjectPtr object) {
3461 CodeSourceMapPtr map = CodeSourceMap::RawCast(object);
3462 objects_.Add(map);
3463 }
3464
3465 void WriteAlloc(Serializer* s) {
3466 const intptr_t count = objects_.length();
3467 s->WriteUnsigned(count);
3468 for (intptr_t i = 0; i < count; i++) {
3469 CodeSourceMapPtr map = objects_[i];
3470 s->AssignRef(map);
3471 AutoTraceObject(map);
3472 const intptr_t length = map->untag()->length_;
3473 s->WriteUnsigned(length);
3474 target_memory_size_ +=
3475 compiler::target::PcDescriptors::InstanceSize(length);
3476 }
3477 }
3478
3479 void WriteFill(Serializer* s) {
3480 const intptr_t count = objects_.length();
3481 for (intptr_t i = 0; i < count; i++) {
3482 CodeSourceMapPtr map = objects_[i];
3483 AutoTraceObject(map);
3484 const intptr_t length = map->untag()->length_;
3485 s->WriteUnsigned(length);
3486 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3487 s->WriteBytes(cdata, length);
3488 }
3489 }
3490
3491 private:
3492 GrowableArray<CodeSourceMapPtr> objects_;
3493};
3494#endif // !DART_PRECOMPILED_RUNTIME
3495
3496class CodeSourceMapDeserializationCluster : public DeserializationCluster {
3497 public:
3498 CodeSourceMapDeserializationCluster()
3499 : DeserializationCluster("CodeSourceMap") {}
3500 ~CodeSourceMapDeserializationCluster() {}
3501
3502 void ReadAlloc(Deserializer* d) override {
3503 start_index_ = d->next_index();
3504 const intptr_t count = d->ReadUnsigned();
3505 for (intptr_t i = 0; i < count; i++) {
3506 const intptr_t length = d->ReadUnsigned();
3507 d->AssignRef(d->Allocate(CodeSourceMap::InstanceSize(length)));
3508 }
3509 stop_index_ = d->next_index();
3510 }
3511
3512 void ReadFill(Deserializer* d_) override {
3513 Deserializer::Local d(d_);
3514
3515 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3516 const intptr_t length = d.ReadUnsigned();
3517 CodeSourceMapPtr map = static_cast<CodeSourceMapPtr>(d.Ref(id));
3518 Deserializer::InitializeHeader(map, kPcDescriptorsCid,
3519 CodeSourceMap::InstanceSize(length));
3520 map->untag()->length_ = length;
3521 uint8_t* cdata = reinterpret_cast<uint8_t*>(map->untag()->data());
3522 d.ReadBytes(cdata, length);
3523 }
3524 }
3525};
3526
3527#if !defined(DART_PRECOMPILED_RUNTIME)
3528class CompressedStackMapsSerializationCluster : public SerializationCluster {
3529 public:
3530 CompressedStackMapsSerializationCluster()
3531 : SerializationCluster("CompressedStackMaps", kCompressedStackMapsCid) {}
3532 ~CompressedStackMapsSerializationCluster() {}
3533
3534 void Trace(Serializer* s, ObjectPtr object) {
3535 CompressedStackMapsPtr desc = CompressedStackMaps::RawCast(object);
3536 objects_.Add(desc);
3537 }
3538
3539 void WriteAlloc(Serializer* s) {
3540 const intptr_t count = objects_.length();
3541 s->WriteUnsigned(count);
3542 for (intptr_t i = 0; i < count; i++) {
3543 CompressedStackMapsPtr map = objects_[i];
3544 s->AssignRef(map);
3545 AutoTraceObject(map);
3546 const intptr_t length = UntaggedCompressedStackMaps::SizeField::decode(
3547 map->untag()->payload()->flags_and_size());
3548 s->WriteUnsigned(length);
3549 target_memory_size_ +=
3550 compiler::target::CompressedStackMaps::InstanceSize(length);
3551 }
3552 }
3553
3554 void WriteFill(Serializer* s) {
3555 const intptr_t count = objects_.length();
3556 for (intptr_t i = 0; i < count; i++) {
3557 CompressedStackMapsPtr map = objects_[i];
3558 AutoTraceObject(map);
3559 s->WriteUnsigned(map->untag()->payload()->flags_and_size());
3560 const intptr_t length = UntaggedCompressedStackMaps::SizeField::decode(
3561 map->untag()->payload()->flags_and_size());
3562 uint8_t* cdata =
3563 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3564 s->WriteBytes(cdata, length);
3565 }
3566 }
3567
3568 private:
3569 GrowableArray<CompressedStackMapsPtr> objects_;
3570};
3571#endif // !DART_PRECOMPILED_RUNTIME
3572
3573class CompressedStackMapsDeserializationCluster
3574 : public DeserializationCluster {
3575 public:
3576 CompressedStackMapsDeserializationCluster()
3577 : DeserializationCluster("CompressedStackMaps") {}
3578 ~CompressedStackMapsDeserializationCluster() {}
3579
3580 void ReadAlloc(Deserializer* d) override {
3581 start_index_ = d->next_index();
3582 const intptr_t count = d->ReadUnsigned();
3583 for (intptr_t i = 0; i < count; i++) {
3584 const intptr_t length = d->ReadUnsigned();
3585 d->AssignRef(d->Allocate(CompressedStackMaps::InstanceSize(length)));
3586 }
3587 stop_index_ = d->next_index();
3588 }
3589
3590 void ReadFill(Deserializer* d_) override {
3591 Deserializer::Local d(d_);
3592
3593 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3594 const intptr_t flags_and_size = d.ReadUnsigned();
3595 const intptr_t length =
3596 UntaggedCompressedStackMaps::SizeField::decode(flags_and_size);
3597 CompressedStackMapsPtr map =
3598 static_cast<CompressedStackMapsPtr>(d.Ref(id));
3599 Deserializer::InitializeHeader(map, kCompressedStackMapsCid,
3600 CompressedStackMaps::InstanceSize(length));
3601 map->untag()->payload()->set_flags_and_size(flags_and_size);
3602 uint8_t* cdata =
3603 reinterpret_cast<uint8_t*>(map->untag()->payload()->data());
3604 d.ReadBytes(cdata, length);
3605 }
3606 }
3607};
3608
3609#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_COMPRESSED_POINTERS)
3610// PcDescriptor, CompressedStackMaps, OneByteString, TwoByteString
3611class RODataSerializationCluster
3612 : public CanonicalSetSerializationCluster<CanonicalStringSet,
3613 String,
3614 ObjectPtr> {
3615 public:
3616 RODataSerializationCluster(Zone* zone,
3617 const char* type,
3618 intptr_t cid,
3619 bool is_canonical)
3620 : CanonicalSetSerializationCluster(
3621 cid,
3622 is_canonical,
3623 is_canonical && IsStringClassId(cid),
3624 ImageWriter::TagObjectTypeAsReadOnly(zone, type)),
3625 zone_(zone),
3626 cid_(cid),
3627 type_(type) {}
3628 ~RODataSerializationCluster() {}
3629
3630 void Trace(Serializer* s, ObjectPtr object) {
3631 // A string's hash must already be computed when we write it because it
3632 // will be loaded into read-only memory. Extra bytes due to allocation
3633 // rounding need to be deterministically set for reliable deduplication in
3634 // shared images.
3635 if (object->untag()->InVMIsolateHeap() ||
3636 s->heap()->old_space()->IsObjectFromImagePages(object)) {
3637 // This object is already read-only.
3638 } else {
3639 Object::FinalizeReadOnlyObject(object);
3640 }
3641
3642 objects_.Add(object);
3643 }
3644
3645 void WriteAlloc(Serializer* s) {
3646 const bool is_string_cluster = IsStringClassId(cid_);
3647
3648 intptr_t count = objects_.length();
3649 s->WriteUnsigned(count);
3650 ReorderObjects(s);
3651
3652 uint32_t running_offset = 0;
3653 for (intptr_t i = 0; i < count; i++) {
3654 ObjectPtr object = objects_[i];
3655 s->AssignRef(object);
3656 const StringPtr name =
3657 is_string_cluster ? String::RawCast(object) : nullptr;
3658 Serializer::WritingObjectScope scope(s, type_, object, name);
3659 uint32_t offset = s->GetDataOffset(object);
3660 s->TraceDataOffset(offset);
3661 ASSERT(Utils::IsAligned(
3662 offset, compiler::target::ObjectAlignment::kObjectAlignment));
3663 ASSERT(offset > running_offset);
3664 s->WriteUnsigned((offset - running_offset) >>
3665 compiler::target::ObjectAlignment::kObjectAlignmentLog2);
3666 running_offset = offset;
3667 }
3668 WriteCanonicalSetLayout(s);
3669 }
3670
3671 void WriteFill(Serializer* s) {
3672 // No-op.
3673 }
3674
3675 private:
3676 Zone* zone_;
3677 const intptr_t cid_;
3678 const char* const type_;
3679};
3680#endif // !DART_PRECOMPILED_RUNTIME && !DART_COMPRESSED_POINTERS
3681
3682#if !defined(DART_COMPRESSED_POINTERS)
3683class RODataDeserializationCluster
3684 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
3685 public:
3686 explicit RODataDeserializationCluster(intptr_t cid,
3687 bool is_canonical,
3688 bool is_root_unit)
3689 : CanonicalSetDeserializationCluster(is_canonical,
3690 is_root_unit,
3691 "ROData"),
3692 cid_(cid) {}
3693 ~RODataDeserializationCluster() {}
3694
3695 void ReadAlloc(Deserializer* d) override {
3696 start_index_ = d->next_index();
3697 intptr_t count = d->ReadUnsigned();
3698 uint32_t running_offset = 0;
3699 for (intptr_t i = 0; i < count; i++) {
3700 running_offset += d->ReadUnsigned() << kObjectAlignmentLog2;
3701 ObjectPtr object = d->GetObjectAt(running_offset);
3702 d->AssignRef(object);
3703 }
3704 stop_index_ = d->next_index();
3705 if (cid_ == kStringCid) {
3706 BuildCanonicalSetFromLayout(d);
3707 }
3708 }
3709
3710 void ReadFill(Deserializer* d_) override {
3711 Deserializer::Local d(d_);
3712
3713 // No-op.
3714 }
3715
3716 void PostLoad(Deserializer* d, const Array& refs) override {
3717 if (!table_.IsNull()) {
3718 auto object_store = d->isolate_group()->object_store();
3719 VerifyCanonicalSet(d, refs,
3720 WeakArray::Handle(object_store->symbol_table()));
3721 object_store->set_symbol_table(table_);
3722 if (d->isolate_group() == Dart::vm_isolate_group()) {
3723 Symbols::InitFromSnapshot(d->isolate_group());
3724 }
3725 } else if (!is_root_unit_ && is_canonical()) {
3726 FATAL("Cannot recanonicalize RO objects.");
3727 }
3728 }
3729
3730 private:
3731 const intptr_t cid_;
3732};
3733#endif // !DART_COMPRESSED_POINTERS
3734
3735#if !defined(DART_PRECOMPILED_RUNTIME)
3736class ExceptionHandlersSerializationCluster : public SerializationCluster {
3737 public:
3738 ExceptionHandlersSerializationCluster()
3739 : SerializationCluster("ExceptionHandlers", kExceptionHandlersCid) {}
3740 ~ExceptionHandlersSerializationCluster() {}
3741
3742 void Trace(Serializer* s, ObjectPtr object) {
3743 ExceptionHandlersPtr handlers = ExceptionHandlers::RawCast(object);
3744 objects_.Add(handlers);
3745
3746 s->Push(handlers->untag()->handled_types_data());
3747 }
3748
3749 void WriteAlloc(Serializer* s) {
3750 const intptr_t count = objects_.length();
3751 s->WriteUnsigned(count);
3752 for (intptr_t i = 0; i < count; i++) {
3753 ExceptionHandlersPtr handlers = objects_[i];
3754 s->AssignRef(handlers);
3755 AutoTraceObject(handlers);
3756 const intptr_t length = handlers->untag()->num_entries();
3757 s->WriteUnsigned(length);
3758 target_memory_size_ +=
3759 compiler::target::ExceptionHandlers::InstanceSize(length);
3760 }
3761 }
3762
3763 void WriteFill(Serializer* s) {
3764 const intptr_t count = objects_.length();
3765 for (intptr_t i = 0; i < count; i++) {
3766 ExceptionHandlersPtr handlers = objects_[i];
3767 AutoTraceObject(handlers);
3768 const intptr_t packed_fields = handlers->untag()->packed_fields_;
3769 const intptr_t length =
3770 UntaggedExceptionHandlers::NumEntriesBits::decode(packed_fields);
3771 s->WriteUnsigned(packed_fields);
3772 WriteCompressedField(handlers, handled_types_data);
3773 for (intptr_t j = 0; j < length; j++) {
3774 const ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3775 s->Write<uint32_t>(info.handler_pc_offset);
3776 s->Write<int16_t>(info.outer_try_index);
3777 s->Write<int8_t>(info.needs_stacktrace);
3778 s->Write<int8_t>(info.has_catch_all);
3779 s->Write<int8_t>(info.is_generated);
3780 }
3781 }
3782 }
3783
3784 private:
3785 GrowableArray<ExceptionHandlersPtr> objects_;
3786};
3787#endif // !DART_PRECOMPILED_RUNTIME
3788
3789class ExceptionHandlersDeserializationCluster : public DeserializationCluster {
3790 public:
3791 ExceptionHandlersDeserializationCluster()
3792 : DeserializationCluster("ExceptionHandlers") {}
3793 ~ExceptionHandlersDeserializationCluster() {}
3794
3795 void ReadAlloc(Deserializer* d) override {
3796 start_index_ = d->next_index();
3797 const intptr_t count = d->ReadUnsigned();
3798 for (intptr_t i = 0; i < count; i++) {
3799 const intptr_t length = d->ReadUnsigned();
3800 d->AssignRef(d->Allocate(ExceptionHandlers::InstanceSize(length)));
3801 }
3802 stop_index_ = d->next_index();
3803 }
3804
3805 void ReadFill(Deserializer* d_) override {
3806 Deserializer::Local d(d_);
3807
3808 ASSERT(!is_canonical()); // Never canonical.
3809 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3810 ExceptionHandlersPtr handlers =
3811 static_cast<ExceptionHandlersPtr>(d.Ref(id));
3812 const intptr_t packed_fields = d.ReadUnsigned();
3813 const intptr_t length =
3814 UntaggedExceptionHandlers::NumEntriesBits::decode(packed_fields);
3815 Deserializer::InitializeHeader(handlers, kExceptionHandlersCid,
3816 ExceptionHandlers::InstanceSize(length));
3817 handlers->untag()->packed_fields_ = packed_fields;
3818 handlers->untag()->handled_types_data_ =
3819 static_cast<ArrayPtr>(d.ReadRef());
3820 for (intptr_t j = 0; j < length; j++) {
3821 ExceptionHandlerInfo& info = handlers->untag()->data()[j];
3822 info.handler_pc_offset = d.Read<uint32_t>();
3823 info.outer_try_index = d.Read<int16_t>();
3824 info.needs_stacktrace = d.Read<int8_t>();
3825 info.has_catch_all = d.Read<int8_t>();
3826 info.is_generated = d.Read<int8_t>();
3827 }
3828 }
3829 }
3830};
3831
3832#if !defined(DART_PRECOMPILED_RUNTIME)
3833class ContextSerializationCluster : public SerializationCluster {
3834 public:
3835 ContextSerializationCluster()
3836 : SerializationCluster("Context", kContextCid) {}
3837 ~ContextSerializationCluster() {}
3838
3839 void Trace(Serializer* s, ObjectPtr object) {
3840 ContextPtr context = Context::RawCast(object);
3841 objects_.Add(context);
3842
3843 s->Push(context->untag()->parent());
3844 const intptr_t length = context->untag()->num_variables_;
3845 for (intptr_t i = 0; i < length; i++) {
3846 s->Push(context->untag()->element(i));
3847 }
3848 }
3849
3850 void WriteAlloc(Serializer* s) {
3851 const intptr_t count = objects_.length();
3852 s->WriteUnsigned(count);
3853 for (intptr_t i = 0; i < count; i++) {
3854 ContextPtr context = objects_[i];
3855 s->AssignRef(context);
3856 AutoTraceObject(context);
3857 const intptr_t length = context->untag()->num_variables_;
3858 s->WriteUnsigned(length);
3859 target_memory_size_ += compiler::target::Context::InstanceSize(length);
3860 }
3861 }
3862
3863 void WriteFill(Serializer* s) {
3864 const intptr_t count = objects_.length();
3865 for (intptr_t i = 0; i < count; i++) {
3866 ContextPtr context = objects_[i];
3867 AutoTraceObject(context);
3868 const intptr_t length = context->untag()->num_variables_;
3869 s->WriteUnsigned(length);
3870 WriteField(context, parent());
3871 for (intptr_t j = 0; j < length; j++) {
3872 s->WriteElementRef(context->untag()->element(j), j);
3873 }
3874 }
3875 }
3876
3877 private:
3878 GrowableArray<ContextPtr> objects_;
3879};
3880#endif // !DART_PRECOMPILED_RUNTIME
3881
3882class ContextDeserializationCluster : public DeserializationCluster {
3883 public:
3884 ContextDeserializationCluster() : DeserializationCluster("Context") {}
3885 ~ContextDeserializationCluster() {}
3886
3887 void ReadAlloc(Deserializer* d) override {
3888 start_index_ = d->next_index();
3889 const intptr_t count = d->ReadUnsigned();
3890 for (intptr_t i = 0; i < count; i++) {
3891 const intptr_t length = d->ReadUnsigned();
3892 d->AssignRef(d->Allocate(Context::InstanceSize(length)));
3893 }
3894 stop_index_ = d->next_index();
3895 }
3896
3897 void ReadFill(Deserializer* d_) override {
3898 Deserializer::Local d(d_);
3899
3900 ASSERT(!is_canonical()); // Never canonical.
3901 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3902 ContextPtr context = static_cast<ContextPtr>(d.Ref(id));
3903 const intptr_t length = d.ReadUnsigned();
3904 Deserializer::InitializeHeader(context, kContextCid,
3905 Context::InstanceSize(length));
3906 context->untag()->num_variables_ = length;
3907 context->untag()->parent_ = static_cast<ContextPtr>(d.ReadRef());
3908 for (intptr_t j = 0; j < length; j++) {
3909 context->untag()->data()[j] = d.ReadRef();
3910 }
3911 }
3912 }
3913};
3914
3915#if !defined(DART_PRECOMPILED_RUNTIME)
3916class ContextScopeSerializationCluster : public SerializationCluster {
3917 public:
3918 ContextScopeSerializationCluster()
3919 : SerializationCluster("ContextScope", kContextScopeCid) {}
3920 ~ContextScopeSerializationCluster() {}
3921
3922 void Trace(Serializer* s, ObjectPtr object) {
3923 ContextScopePtr scope = ContextScope::RawCast(object);
3924 objects_.Add(scope);
3925
3926 const intptr_t length = scope->untag()->num_variables_;
3927 PushFromTo(scope, length);
3928 }
3929
3930 void WriteAlloc(Serializer* s) {
3931 const intptr_t count = objects_.length();
3932 s->WriteUnsigned(count);
3933 for (intptr_t i = 0; i < count; i++) {
3934 ContextScopePtr scope = objects_[i];
3935 s->AssignRef(scope);
3936 AutoTraceObject(scope);
3937 const intptr_t length = scope->untag()->num_variables_;
3938 s->WriteUnsigned(length);
3939 target_memory_size_ +=
3940 compiler::target::ContextScope::InstanceSize(length);
3941 }
3942 }
3943
3944 void WriteFill(Serializer* s) {
3945 const intptr_t count = objects_.length();
3946 for (intptr_t i = 0; i < count; i++) {
3947 ContextScopePtr scope = objects_[i];
3948 AutoTraceObject(scope);
3949 const intptr_t length = scope->untag()->num_variables_;
3950 s->WriteUnsigned(length);
3951 s->Write<bool>(scope->untag()->is_implicit_);
3952 WriteFromTo(scope, length);
3953 }
3954 }
3955
3956 private:
3957 GrowableArray<ContextScopePtr> objects_;
3958};
3959#endif // !DART_PRECOMPILED_RUNTIME
3960
3961class ContextScopeDeserializationCluster : public DeserializationCluster {
3962 public:
3963 ContextScopeDeserializationCluster()
3964 : DeserializationCluster("ContextScope") {}
3965 ~ContextScopeDeserializationCluster() {}
3966
3967 void ReadAlloc(Deserializer* d) override {
3968 start_index_ = d->next_index();
3969 const intptr_t count = d->ReadUnsigned();
3970 for (intptr_t i = 0; i < count; i++) {
3971 const intptr_t length = d->ReadUnsigned();
3972 d->AssignRef(d->Allocate(ContextScope::InstanceSize(length)));
3973 }
3974 stop_index_ = d->next_index();
3975 }
3976
3977 void ReadFill(Deserializer* d_) override {
3978 Deserializer::Local d(d_);
3979
3980 ASSERT(!is_canonical()); // Never canonical.
3981 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
3982 ContextScopePtr scope = static_cast<ContextScopePtr>(d.Ref(id));
3983 const intptr_t length = d.ReadUnsigned();
3984 Deserializer::InitializeHeader(scope, kContextScopeCid,
3985 ContextScope::InstanceSize(length));
3986 scope->untag()->num_variables_ = length;
3987 scope->untag()->is_implicit_ = d.Read<bool>();
3988 d.ReadFromTo(scope, length);
3989 }
3990 }
3991};
3992
3993#if !defined(DART_PRECOMPILED_RUNTIME)
3994class UnlinkedCallSerializationCluster : public SerializationCluster {
3995 public:
3996 UnlinkedCallSerializationCluster()
3997 : SerializationCluster("UnlinkedCall",
3998 kUnlinkedCallCid,
3999 compiler::target::UnlinkedCall::InstanceSize()) {}
4000 ~UnlinkedCallSerializationCluster() {}
4001
4002 void Trace(Serializer* s, ObjectPtr object) {
4003 UnlinkedCallPtr unlinked = UnlinkedCall::RawCast(object);
4004 objects_.Add(unlinked);
4005 PushFromTo(unlinked);
4006 }
4007
4008 void WriteAlloc(Serializer* s) {
4009 const intptr_t count = objects_.length();
4010 s->WriteUnsigned(count);
4011 for (intptr_t i = 0; i < count; i++) {
4012 UnlinkedCallPtr unlinked = objects_[i];
4013 s->AssignRef(unlinked);
4014 }
4015 }
4016
4017 void WriteFill(Serializer* s) {
4018 const intptr_t count = objects_.length();
4019 for (intptr_t i = 0; i < count; i++) {
4020 UnlinkedCallPtr unlinked = objects_[i];
4021 AutoTraceObjectName(unlinked, unlinked->untag()->target_name_);
4022 WriteFromTo(unlinked);
4023 s->Write<bool>(unlinked->untag()->can_patch_to_monomorphic_);
4024 }
4025 }
4026
4027 private:
4028 GrowableArray<UnlinkedCallPtr> objects_;
4029};
4030#endif // !DART_PRECOMPILED_RUNTIME
4031
4032class UnlinkedCallDeserializationCluster : public DeserializationCluster {
4033 public:
4034 UnlinkedCallDeserializationCluster()
4035 : DeserializationCluster("UnlinkedCall") {}
4036 ~UnlinkedCallDeserializationCluster() {}
4037
4038 void ReadAlloc(Deserializer* d) override {
4039 ReadAllocFixedSize(d, UnlinkedCall::InstanceSize());
4040 }
4041
4042 void ReadFill(Deserializer* d_) override {
4043 Deserializer::Local d(d_);
4044
4045 ASSERT(!is_canonical()); // Never canonical.
4046 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4047 UnlinkedCallPtr unlinked = static_cast<UnlinkedCallPtr>(d.Ref(id));
4048 Deserializer::InitializeHeader(unlinked, kUnlinkedCallCid,
4049 UnlinkedCall::InstanceSize());
4050 d.ReadFromTo(unlinked);
4051 unlinked->untag()->can_patch_to_monomorphic_ = d.Read<bool>();
4052 }
4053 }
4054};
4055
4056#if !defined(DART_PRECOMPILED_RUNTIME)
4057class ICDataSerializationCluster : public SerializationCluster {
4058 public:
4059 ICDataSerializationCluster()
4060 : SerializationCluster("ICData",
4061 kICDataCid,
4062 compiler::target::ICData::InstanceSize()) {}
4063 ~ICDataSerializationCluster() {}
4064
4065 void Trace(Serializer* s, ObjectPtr object) {
4066 ICDataPtr ic = ICData::RawCast(object);
4067 objects_.Add(ic);
4068 PushFromTo(ic);
4069 }
4070
4071 void WriteAlloc(Serializer* s) {
4072 const intptr_t count = objects_.length();
4073 s->WriteUnsigned(count);
4074 for (intptr_t i = 0; i < count; i++) {
4075 ICDataPtr ic = objects_[i];
4076 s->AssignRef(ic);
4077 }
4078 }
4079
4080 void WriteFill(Serializer* s) {
4081 Snapshot::Kind kind = s->kind();
4082 const intptr_t count = objects_.length();
4083 for (intptr_t i = 0; i < count; i++) {
4084 ICDataPtr ic = objects_[i];
4085 AutoTraceObjectName(ic, ic->untag()->target_name_);
4086 WriteFromTo(ic);
4087 if (kind != Snapshot::kFullAOT) {
4088 NOT_IN_PRECOMPILED(s->Write<int32_t>(ic->untag()->deopt_id_));
4089 }
4090 s->Write<uint32_t>(ic->untag()->state_bits_);
4091 }
4092 }
4093
4094 private:
4095 GrowableArray<ICDataPtr> objects_;
4096};
4097#endif // !DART_PRECOMPILED_RUNTIME
4098
4099class ICDataDeserializationCluster : public DeserializationCluster {
4100 public:
4101 ICDataDeserializationCluster() : DeserializationCluster("ICData") {}
4102 ~ICDataDeserializationCluster() {}
4103
4104 void ReadAlloc(Deserializer* d) override {
4105 ReadAllocFixedSize(d, ICData::InstanceSize());
4106 }
4107
4108 void ReadFill(Deserializer* d_) override {
4109 Deserializer::Local d(d_);
4110
4111 ASSERT(!is_canonical()); // Never canonical.
4112 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4113 ICDataPtr ic = static_cast<ICDataPtr>(d.Ref(id));
4114 Deserializer::InitializeHeader(ic, kICDataCid, ICData::InstanceSize());
4115 d.ReadFromTo(ic);
4116 NOT_IN_PRECOMPILED(ic->untag()->deopt_id_ = d.Read<int32_t>());
4117 ic->untag()->state_bits_ = d.Read<int32_t>();
4118 }
4119 }
4120};
4121
4122#if !defined(DART_PRECOMPILED_RUNTIME)
4123class MegamorphicCacheSerializationCluster : public SerializationCluster {
4124 public:
4125 MegamorphicCacheSerializationCluster()
4126 : SerializationCluster(
4127 "MegamorphicCache",
4128 kMegamorphicCacheCid,
4129 compiler::target::MegamorphicCache::InstanceSize()) {}
4130 ~MegamorphicCacheSerializationCluster() {}
4131
4132 void Trace(Serializer* s, ObjectPtr object) {
4133 MegamorphicCachePtr cache = MegamorphicCache::RawCast(object);
4134 objects_.Add(cache);
4135 PushFromTo(cache);
4136 }
4137
4138 void WriteAlloc(Serializer* s) {
4139 const intptr_t count = objects_.length();
4140 s->WriteUnsigned(count);
4141 for (intptr_t i = 0; i < count; i++) {
4142 MegamorphicCachePtr cache = objects_[i];
4143 s->AssignRef(cache);
4144 }
4145 }
4146
4147 void WriteFill(Serializer* s) {
4148 const intptr_t count = objects_.length();
4149 for (intptr_t i = 0; i < count; i++) {
4150 MegamorphicCachePtr cache = objects_[i];
4151 AutoTraceObjectName(cache, cache->untag()->target_name_);
4152 WriteFromTo(cache);
4153 s->Write<int32_t>(cache->untag()->filled_entry_count_);
4154 }
4155 }
4156
4157 private:
4158 GrowableArray<MegamorphicCachePtr> objects_;
4159};
4160#endif // !DART_PRECOMPILED_RUNTIME
4161
4162class MegamorphicCacheDeserializationCluster : public DeserializationCluster {
4163 public:
4164 MegamorphicCacheDeserializationCluster()
4165 : DeserializationCluster("MegamorphicCache") {}
4166 ~MegamorphicCacheDeserializationCluster() {}
4167
4168 void ReadAlloc(Deserializer* d) override {
4169 ReadAllocFixedSize(d, MegamorphicCache::InstanceSize());
4170 }
4171
4172 void ReadFill(Deserializer* d_) override {
4173 Deserializer::Local d(d_);
4174
4175 ASSERT(!is_canonical()); // Never canonical.
4176 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4177 MegamorphicCachePtr cache = static_cast<MegamorphicCachePtr>(d.Ref(id));
4178 Deserializer::InitializeHeader(cache, kMegamorphicCacheCid,
4179 MegamorphicCache::InstanceSize());
4180 d.ReadFromTo(cache);
4181 cache->untag()->filled_entry_count_ = d.Read<int32_t>();
4182 }
4183 }
4184};
4185
4186#if !defined(DART_PRECOMPILED_RUNTIME)
4187class SubtypeTestCacheSerializationCluster : public SerializationCluster {
4188 public:
4189 SubtypeTestCacheSerializationCluster()
4190 : SerializationCluster(
4191 "SubtypeTestCache",
4192 kSubtypeTestCacheCid,
4193 compiler::target::SubtypeTestCache::InstanceSize()) {}
4194 ~SubtypeTestCacheSerializationCluster() {}
4195
4196 void Trace(Serializer* s, ObjectPtr object) {
4197 SubtypeTestCachePtr cache = SubtypeTestCache::RawCast(object);
4198 objects_.Add(cache);
4199 s->Push(cache->untag()->cache_);
4200 }
4201
4202 void WriteAlloc(Serializer* s) {
4203 const intptr_t count = objects_.length();
4204 s->WriteUnsigned(count);
4205 for (intptr_t i = 0; i < count; i++) {
4206 SubtypeTestCachePtr cache = objects_[i];
4207 s->AssignRef(cache);
4208 }
4209 }
4210
4211 void WriteFill(Serializer* s) {
4212 const intptr_t count = objects_.length();
4213 for (intptr_t i = 0; i < count; i++) {
4214 SubtypeTestCachePtr cache = objects_[i];
4215 AutoTraceObject(cache);
4216 WriteField(cache, cache_);
4217 s->Write<uint32_t>(cache->untag()->num_inputs_);
4218 s->Write<uint32_t>(cache->untag()->num_occupied_);
4219 }
4220 }
4221
4222 private:
4223 GrowableArray<SubtypeTestCachePtr> objects_;
4224};
4225#endif // !DART_PRECOMPILED_RUNTIME
4226
4227class SubtypeTestCacheDeserializationCluster : public DeserializationCluster {
4228 public:
4229 SubtypeTestCacheDeserializationCluster()
4230 : DeserializationCluster("SubtypeTestCache") {}
4231 ~SubtypeTestCacheDeserializationCluster() {}
4232
4233 void ReadAlloc(Deserializer* d) override {
4234 ReadAllocFixedSize(d, SubtypeTestCache::InstanceSize());
4235 }
4236
4237 void ReadFill(Deserializer* d_) override {
4238 Deserializer::Local d(d_);
4239
4240 ASSERT(!is_canonical()); // Never canonical.
4241 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4242 SubtypeTestCachePtr cache = static_cast<SubtypeTestCachePtr>(d.Ref(id));
4243 Deserializer::InitializeHeader(cache, kSubtypeTestCacheCid,
4244 SubtypeTestCache::InstanceSize());
4245 cache->untag()->cache_ = static_cast<ArrayPtr>(d.ReadRef());
4246 cache->untag()->num_inputs_ = d.Read<uint32_t>();
4247 cache->untag()->num_occupied_ = d.Read<uint32_t>();
4248 }
4249 }
4250};
4251
4252#if !defined(DART_PRECOMPILED_RUNTIME)
4253class LoadingUnitSerializationCluster : public SerializationCluster {
4254 public:
4255 LoadingUnitSerializationCluster()
4256 : SerializationCluster("LoadingUnit",
4257 kLoadingUnitCid,
4258 compiler::target::LoadingUnit::InstanceSize()) {}
4259 ~LoadingUnitSerializationCluster() {}
4260
4261 void Trace(Serializer* s, ObjectPtr object) {
4262 LoadingUnitPtr unit = LoadingUnit::RawCast(object);
4263 objects_.Add(unit);
4264 s->Push(unit->untag()->parent());
4265 }
4266
4267 void WriteAlloc(Serializer* s) {
4268 const intptr_t count = objects_.length();
4269 s->WriteUnsigned(count);
4270 for (intptr_t i = 0; i < count; i++) {
4271 LoadingUnitPtr unit = objects_[i];
4272 s->AssignRef(unit);
4273 }
4274 }
4275
4276 void WriteFill(Serializer* s) {
4277 const intptr_t count = objects_.length();
4278 for (intptr_t i = 0; i < count; i++) {
4279 LoadingUnitPtr unit = objects_[i];
4280 AutoTraceObject(unit);
4281 WriteCompressedField(unit, parent);
4282 s->Write<intptr_t>(
4283 unit->untag()->packed_fields_.Read<UntaggedLoadingUnit::IdBits>());
4284 }
4285 }
4286
4287 private:
4288 GrowableArray<LoadingUnitPtr> objects_;
4289};
4290#endif // !DART_PRECOMPILED_RUNTIME
4291
4292class LoadingUnitDeserializationCluster : public DeserializationCluster {
4293 public:
4294 LoadingUnitDeserializationCluster() : DeserializationCluster("LoadingUnit") {}
4295 ~LoadingUnitDeserializationCluster() {}
4296
4297 void ReadAlloc(Deserializer* d) override {
4298 ReadAllocFixedSize(d, LoadingUnit::InstanceSize());
4299 }
4300
4301 void ReadFill(Deserializer* d_) override {
4302 Deserializer::Local d(d_);
4303
4304 ASSERT(!is_canonical()); // Never canonical.
4305 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4306 LoadingUnitPtr unit = static_cast<LoadingUnitPtr>(d.Ref(id));
4307 Deserializer::InitializeHeader(unit, kLoadingUnitCid,
4308 LoadingUnit::InstanceSize());
4309 unit->untag()->parent_ = static_cast<LoadingUnitPtr>(d.ReadRef());
4310 unit->untag()->base_objects_ = Array::null();
4311 unit->untag()->instructions_image_ = nullptr;
4312 unit->untag()->packed_fields_ =
4313 UntaggedLoadingUnit::LoadStateBits::encode(
4314 UntaggedLoadingUnit::kNotLoaded) |
4315 UntaggedLoadingUnit::IdBits::encode(d.Read<intptr_t>());
4316 }
4317 }
4318};
4319
4320#if !defined(DART_PRECOMPILED_RUNTIME)
4321class LanguageErrorSerializationCluster : public SerializationCluster {
4322 public:
4323 LanguageErrorSerializationCluster()
4324 : SerializationCluster("LanguageError",
4325 kLanguageErrorCid,
4326 compiler::target::LanguageError::InstanceSize()) {}
4327 ~LanguageErrorSerializationCluster() {}
4328
4329 void Trace(Serializer* s, ObjectPtr object) {
4330 LanguageErrorPtr error = LanguageError::RawCast(object);
4331 objects_.Add(error);
4333 }
4334
4335 void WriteAlloc(Serializer* s) {
4336 const intptr_t count = objects_.length();
4337 s->WriteUnsigned(count);
4338 for (intptr_t i = 0; i < count; i++) {
4339 LanguageErrorPtr error = objects_[i];
4340 s->AssignRef(error);
4341 }
4342 }
4343
4344 void WriteFill(Serializer* s) {
4345 const intptr_t count = objects_.length();
4346 for (intptr_t i = 0; i < count; i++) {
4347 LanguageErrorPtr error = objects_[i];
4350 s->WriteTokenPosition(error->untag()->token_pos_);
4351 s->Write<bool>(error->untag()->report_after_token_);
4352 s->Write<int8_t>(error->untag()->kind_);
4353 }
4354 }
4355
4356 private:
4357 GrowableArray<LanguageErrorPtr> objects_;
4358};
4359#endif // !DART_PRECOMPILED_RUNTIME
4360
4361class LanguageErrorDeserializationCluster : public DeserializationCluster {
4362 public:
4363 LanguageErrorDeserializationCluster()
4364 : DeserializationCluster("LanguageError") {}
4365 ~LanguageErrorDeserializationCluster() {}
4366
4367 void ReadAlloc(Deserializer* d) override {
4368 ReadAllocFixedSize(d, LanguageError::InstanceSize());
4369 }
4370
4371 void ReadFill(Deserializer* d_) override {
4372 Deserializer::Local d(d_);
4373
4374 ASSERT(!is_canonical()); // Never canonical.
4375 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4376 LanguageErrorPtr error = static_cast<LanguageErrorPtr>(d.Ref(id));
4377 Deserializer::InitializeHeader(error, kLanguageErrorCid,
4378 LanguageError::InstanceSize());
4379 d.ReadFromTo(error);
4380 error->untag()->token_pos_ = d.ReadTokenPosition();
4381 error->untag()->report_after_token_ = d.Read<bool>();
4382 error->untag()->kind_ = d.Read<int8_t>();
4383 }
4384 }
4385};
4386
4387#if !defined(DART_PRECOMPILED_RUNTIME)
4388class UnhandledExceptionSerializationCluster : public SerializationCluster {
4389 public:
4390 UnhandledExceptionSerializationCluster()
4391 : SerializationCluster(
4392 "UnhandledException",
4393 kUnhandledExceptionCid,
4394 compiler::target::UnhandledException::InstanceSize()) {}
4395 ~UnhandledExceptionSerializationCluster() {}
4396
4397 void Trace(Serializer* s, ObjectPtr object) {
4398 UnhandledExceptionPtr exception = UnhandledException::RawCast(object);
4399 objects_.Add(exception);
4400 PushFromTo(exception);
4401 }
4402
4403 void WriteAlloc(Serializer* s) {
4404 const intptr_t count = objects_.length();
4405 s->WriteUnsigned(count);
4406 for (intptr_t i = 0; i < count; i++) {
4407 UnhandledExceptionPtr exception = objects_[i];
4408 s->AssignRef(exception);
4409 }
4410 }
4411
4412 void WriteFill(Serializer* s) {
4413 const intptr_t count = objects_.length();
4414 for (intptr_t i = 0; i < count; i++) {
4415 UnhandledExceptionPtr exception = objects_[i];
4416 AutoTraceObject(exception);
4417 WriteFromTo(exception);
4418 }
4419 }
4420
4421 private:
4422 GrowableArray<UnhandledExceptionPtr> objects_;
4423};
4424#endif // !DART_PRECOMPILED_RUNTIME
4425
4426class UnhandledExceptionDeserializationCluster : public DeserializationCluster {
4427 public:
4428 UnhandledExceptionDeserializationCluster()
4429 : DeserializationCluster("UnhandledException") {}
4430 ~UnhandledExceptionDeserializationCluster() {}
4431
4432 void ReadAlloc(Deserializer* d) override {
4433 ReadAllocFixedSize(d, UnhandledException::InstanceSize());
4434 }
4435
4436 void ReadFill(Deserializer* d_) override {
4437 Deserializer::Local d(d_);
4438
4439 ASSERT(!is_canonical()); // Never canonical.
4440 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4441 UnhandledExceptionPtr exception =
4442 static_cast<UnhandledExceptionPtr>(d.Ref(id));
4443 Deserializer::InitializeHeader(exception, kUnhandledExceptionCid,
4444 UnhandledException::InstanceSize());
4445 d.ReadFromTo(exception);
4446 }
4447 }
4448};
4449
4450#if !defined(DART_PRECOMPILED_RUNTIME)
4451class InstanceSerializationCluster : public SerializationCluster {
4452 public:
4453 InstanceSerializationCluster(bool is_canonical, intptr_t cid)
4454 : SerializationCluster("Instance", cid, kSizeVaries, is_canonical) {
4455 ClassPtr cls = IsolateGroup::Current()->class_table()->At(cid);
4456 host_next_field_offset_in_words_ =
4457 cls->untag()->host_next_field_offset_in_words_;
4458 ASSERT(host_next_field_offset_in_words_ > 0);
4459#if defined(DART_PRECOMPILER)
4460 target_next_field_offset_in_words_ =
4461 cls->untag()->target_next_field_offset_in_words_;
4462 target_instance_size_in_words_ =
4463 cls->untag()->target_instance_size_in_words_;
4464#else
4465 target_next_field_offset_in_words_ =
4466 cls->untag()->host_next_field_offset_in_words_;
4467 target_instance_size_in_words_ = cls->untag()->host_instance_size_in_words_;
4468#endif // defined(DART_PRECOMPILER)
4469 ASSERT(target_next_field_offset_in_words_ > 0);
4470 ASSERT(target_instance_size_in_words_ > 0);
4471 }
4472 ~InstanceSerializationCluster() {}
4473
4474 void Trace(Serializer* s, ObjectPtr object) {
4475 InstancePtr instance = Instance::RawCast(object);
4476 objects_.Add(instance);
4477 const intptr_t next_field_offset = host_next_field_offset_in_words_
4478 << kCompressedWordSizeLog2;
4479 const auto unboxed_fields_bitmap =
4480 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4481 intptr_t offset = Instance::NextFieldOffset();
4482 while (offset < next_field_offset) {
4483 // Skips unboxed fields
4484 if (!unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4485 ObjectPtr raw_obj =
4486 reinterpret_cast<CompressedObjectPtr*>(
4487 reinterpret_cast<uword>(instance->untag()) + offset)
4488 ->Decompress(instance->untag()->heap_base());
4489 s->Push(raw_obj);
4490 }
4491 offset += kCompressedWordSize;
4492 }
4493 }
4494
4495 void WriteAlloc(Serializer* s) {
4496 const intptr_t count = objects_.length();
4497 s->WriteUnsigned(count);
4498
4499 s->Write<int32_t>(target_next_field_offset_in_words_);
4500 s->Write<int32_t>(target_instance_size_in_words_);
4501
4502 for (intptr_t i = 0; i < count; i++) {
4503 InstancePtr instance = objects_[i];
4504 s->AssignRef(instance);
4505 }
4506
4507 const intptr_t instance_size = compiler::target::RoundedAllocationSize(
4508 target_instance_size_in_words_ * compiler::target::kCompressedWordSize);
4509 target_memory_size_ += instance_size * count;
4510 }
4511
4512 void WriteFill(Serializer* s) {
4513 intptr_t next_field_offset = host_next_field_offset_in_words_
4514 << kCompressedWordSizeLog2;
4515 const intptr_t count = objects_.length();
4516 s->WriteUnsigned64(CalculateTargetUnboxedFieldsBitmap(s, cid_).Value());
4517 const auto unboxed_fields_bitmap =
4518 s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
4519
4520 for (intptr_t i = 0; i < count; i++) {
4521 InstancePtr instance = objects_[i];
4523#if defined(DART_PRECOMPILER)
4524 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4525 ClassPtr cls = s->isolate_group()->class_table()->At(cid_);
4526 s->AttributePropertyRef(cls, "<class>");
4527 }
4528#endif
4529 intptr_t offset = Instance::NextFieldOffset();
4530 while (offset < next_field_offset) {
4531 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4532 // Writes 32 bits of the unboxed value at a time.
4533 const compressed_uword value = *reinterpret_cast<compressed_uword*>(
4534 reinterpret_cast<uword>(instance->untag()) + offset);
4535 s->WriteWordWith32BitWrites(value);
4536 } else {
4537 ObjectPtr raw_obj =
4538 reinterpret_cast<CompressedObjectPtr*>(
4539 reinterpret_cast<uword>(instance->untag()) + offset)
4540 ->Decompress(instance->untag()->heap_base());
4541 s->WriteElementRef(raw_obj, offset);
4542 }
4543 offset += kCompressedWordSize;
4544 }
4545 }
4546 }
4547
4548 private:
4549 intptr_t host_next_field_offset_in_words_;
4550 intptr_t target_next_field_offset_in_words_;
4551 intptr_t target_instance_size_in_words_;
4552 GrowableArray<InstancePtr> objects_;
4553};
4554#endif // !DART_PRECOMPILED_RUNTIME
4555
4556class AbstractInstanceDeserializationCluster : public DeserializationCluster {
4557 protected:
4558 explicit AbstractInstanceDeserializationCluster(const char* name,
4559 bool is_canonical,
4560 bool is_root_unit)
4561 : DeserializationCluster(name, is_canonical),
4562 is_root_unit_(is_root_unit) {}
4563
4564 const bool is_root_unit_;
4565
4566 public:
4567#if defined(DART_PRECOMPILED_RUNTIME)
4568 void PostLoad(Deserializer* d, const Array& refs) override {
4569 if (!is_root_unit_ && is_canonical()) {
4570 SafepointMutexLocker ml(
4571 d->isolate_group()->constant_canonicalization_mutex());
4572 Instance& instance = Instance::Handle(d->zone());
4573 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4574 instance ^= refs.At(i);
4575 instance = instance.CanonicalizeLocked(d->thread());
4576 refs.SetAt(i, instance);
4577 }
4578 }
4579 }
4580#endif
4581};
4582
4583class InstanceDeserializationCluster
4584 : public AbstractInstanceDeserializationCluster {
4585 public:
4586 explicit InstanceDeserializationCluster(intptr_t cid,
4587 bool is_canonical,
4588 bool is_immutable,
4589 bool is_root_unit)
4590 : AbstractInstanceDeserializationCluster("Instance",
4591 is_canonical,
4592 is_root_unit),
4593 cid_(cid),
4594 is_immutable_(is_immutable) {}
4595 ~InstanceDeserializationCluster() {}
4596
4597 void ReadAlloc(Deserializer* d) override {
4598 start_index_ = d->next_index();
4599 const intptr_t count = d->ReadUnsigned();
4600 next_field_offset_in_words_ = d->Read<int32_t>();
4601 instance_size_in_words_ = d->Read<int32_t>();
4602 intptr_t instance_size = Object::RoundedAllocationSize(
4603 instance_size_in_words_ * kCompressedWordSize);
4604 for (intptr_t i = 0; i < count; i++) {
4605 d->AssignRef(d->Allocate(instance_size));
4606 }
4607 stop_index_ = d->next_index();
4608 }
4609
4610 void ReadFill(Deserializer* d_) override {
4611 Deserializer::Local d(d_);
4612
4613 const intptr_t cid = cid_;
4614 const bool mark_canonical = is_root_unit_ && is_canonical();
4615 const bool is_immutable = is_immutable_;
4616 intptr_t next_field_offset = next_field_offset_in_words_
4617 << kCompressedWordSizeLog2;
4618 intptr_t instance_size = Object::RoundedAllocationSize(
4619 instance_size_in_words_ * kCompressedWordSize);
4620 const UnboxedFieldBitmap unboxed_fields_bitmap(d.ReadUnsigned64());
4621
4622 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4623 InstancePtr instance = static_cast<InstancePtr>(d.Ref(id));
4624 Deserializer::InitializeHeader(instance, cid, instance_size,
4625 mark_canonical, is_immutable);
4626 intptr_t offset = Instance::NextFieldOffset();
4627 while (offset < next_field_offset) {
4628 if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
4629 compressed_uword* p = reinterpret_cast<compressed_uword*>(
4630 reinterpret_cast<uword>(instance->untag()) + offset);
4631 // Reads 32 bits of the unboxed value at a time
4632 *p = d.ReadWordWith32BitReads();
4633 } else {
4634 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4635 reinterpret_cast<uword>(instance->untag()) + offset);
4636 *p = d.ReadRef();
4637 }
4638 offset += kCompressedWordSize;
4639 }
4640 while (offset < instance_size) {
4641 CompressedObjectPtr* p = reinterpret_cast<CompressedObjectPtr*>(
4642 reinterpret_cast<uword>(instance->untag()) + offset);
4643 *p = Object::null();
4644 offset += kCompressedWordSize;
4645 }
4646 ASSERT(offset == instance_size);
4647 }
4648 }
4649
4650 private:
4651 const intptr_t cid_;
4652 const bool is_immutable_;
4653 intptr_t next_field_offset_in_words_;
4654 intptr_t instance_size_in_words_;
4655};
4656
4657#if !defined(DART_PRECOMPILED_RUNTIME)
4658class LibraryPrefixSerializationCluster : public SerializationCluster {
4659 public:
4660 LibraryPrefixSerializationCluster()
4661 : SerializationCluster("LibraryPrefix",
4662 kLibraryPrefixCid,
4663 compiler::target::LibraryPrefix::InstanceSize()) {}
4664 ~LibraryPrefixSerializationCluster() {}
4665
4666 void Trace(Serializer* s, ObjectPtr object) {
4667 LibraryPrefixPtr prefix = LibraryPrefix::RawCast(object);
4668 objects_.Add(prefix);
4669 PushFromTo(prefix);
4670 }
4671
4672 void WriteAlloc(Serializer* s) {
4673 const intptr_t count = objects_.length();
4674 s->WriteUnsigned(count);
4675 for (intptr_t i = 0; i < count; i++) {
4676 LibraryPrefixPtr prefix = objects_[i];
4677 s->AssignRef(prefix);
4678 }
4679 }
4680
4681 void WriteFill(Serializer* s) {
4682 const intptr_t count = objects_.length();
4683 for (intptr_t i = 0; i < count; i++) {
4684 LibraryPrefixPtr prefix = objects_[i];
4685 AutoTraceObject(prefix);
4686 WriteFromTo(prefix);
4687 s->Write<uint16_t>(prefix->untag()->num_imports_);
4688 s->Write<bool>(prefix->untag()->is_deferred_load_);
4689 }
4690 }
4691
4692 private:
4693 GrowableArray<LibraryPrefixPtr> objects_;
4694};
4695#endif // !DART_PRECOMPILED_RUNTIME
4696
4697class LibraryPrefixDeserializationCluster : public DeserializationCluster {
4698 public:
4699 LibraryPrefixDeserializationCluster()
4700 : DeserializationCluster("LibraryPrefix") {}
4701 ~LibraryPrefixDeserializationCluster() {}
4702
4703 void ReadAlloc(Deserializer* d) override {
4704 ReadAllocFixedSize(d, LibraryPrefix::InstanceSize());
4705 }
4706
4707 void ReadFill(Deserializer* d_) override {
4708 Deserializer::Local d(d_);
4709
4710 ASSERT(!is_canonical()); // Never canonical.
4711 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4712 LibraryPrefixPtr prefix = static_cast<LibraryPrefixPtr>(d.Ref(id));
4713 Deserializer::InitializeHeader(prefix, kLibraryPrefixCid,
4714 LibraryPrefix::InstanceSize());
4715 d.ReadFromTo(prefix);
4716 prefix->untag()->num_imports_ = d.Read<uint16_t>();
4717 prefix->untag()->is_deferred_load_ = d.Read<bool>();
4718 }
4719 }
4720};
4721
4722#if !defined(DART_PRECOMPILED_RUNTIME)
4723class TypeSerializationCluster
4724 : public CanonicalSetSerializationCluster<
4725 CanonicalTypeSet,
4726 Type,
4727 TypePtr,
4728 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4729 public:
4730 TypeSerializationCluster(bool is_canonical, bool represents_canonical_set)
4731 : CanonicalSetSerializationCluster(
4732 kTypeCid,
4733 is_canonical,
4734 represents_canonical_set,
4735 "Type",
4736 compiler::target::Type::InstanceSize()) {}
4737 ~TypeSerializationCluster() {}
4738
4739 void Trace(Serializer* s, ObjectPtr object) {
4740 TypePtr type = Type::RawCast(object);
4741 objects_.Add(type);
4742
4744
4745 ASSERT(type->untag()->type_class_id() != kIllegalCid);
4746 ClassPtr type_class =
4747 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4748 s->Push(type_class);
4749 }
4750
4751 void WriteAlloc(Serializer* s) {
4752 intptr_t count = objects_.length();
4753 s->WriteUnsigned(count);
4754 ReorderObjects(s);
4755 for (intptr_t i = 0; i < count; i++) {
4756 TypePtr type = objects_[i];
4757 s->AssignRef(type);
4758 }
4759 WriteCanonicalSetLayout(s);
4760 }
4761
4762 void WriteFill(Serializer* s) {
4763 intptr_t count = objects_.length();
4764 for (intptr_t i = 0; i < count; i++) {
4765 WriteType(s, objects_[i]);
4766 }
4767 }
4768
4769 private:
4770 Type& type_ = Type::Handle();
4771 Class& cls_ = Class::Handle();
4772
4773 // Type::Canonicalize does not actually put all canonical Type objects into
4774 // canonical_types set. Some of the canonical declaration types (but not all
4775 // of them) are simply cached in UntaggedClass::declaration_type_ and are not
4776 // inserted into the canonical_types set.
4777 // Keep in sync with Type::Canonicalize.
4778 virtual bool IsInCanonicalSet(Serializer* s, TypePtr type) {
4779 ClassPtr type_class =
4780 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4781 if (type_class->untag()->declaration_type() != type) {
4782 return true;
4783 }
4784
4785 type_ = type;
4786 cls_ = type_class;
4787 return !type_.IsDeclarationTypeOf(cls_);
4788 }
4789
4790 void WriteType(Serializer* s, TypePtr type) {
4792#if defined(DART_PRECOMPILER)
4793 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
4794 ClassPtr type_class =
4795 s->isolate_group()->class_table()->At(type->untag()->type_class_id());
4796 s->AttributePropertyRef(type_class, "<type_class>");
4797 }
4798#endif
4800 s->WriteUnsigned(type->untag()->flags());
4801 }
4802};
4803#endif // !DART_PRECOMPILED_RUNTIME
4804
4805class TypeDeserializationCluster
4806 : public CanonicalSetDeserializationCluster<
4807 CanonicalTypeSet,
4808 /*kAllCanonicalObjectsAreIncludedIntoSet=*/false> {
4809 public:
4810 explicit TypeDeserializationCluster(bool is_canonical, bool is_root_unit)
4811 : CanonicalSetDeserializationCluster(is_canonical, is_root_unit, "Type") {
4812 }
4813 ~TypeDeserializationCluster() {}
4814
4815 void ReadAlloc(Deserializer* d) override {
4816 ReadAllocFixedSize(d, Type::InstanceSize());
4817 BuildCanonicalSetFromLayout(d);
4818 }
4819
4820 void ReadFill(Deserializer* d_) override {
4821 Deserializer::Local d(d_);
4822
4823 const bool mark_canonical = is_root_unit_ && is_canonical();
4824 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4825 TypePtr type = static_cast<TypePtr>(d.Ref(id));
4826 Deserializer::InitializeHeader(type, kTypeCid, Type::InstanceSize(),
4827 mark_canonical);
4828 d.ReadFromTo(type);
4829 type->untag()->set_flags(d.ReadUnsigned());
4830 }
4831 }
4832
4833 void PostLoad(Deserializer* d, const Array& refs) override {
4834 if (!table_.IsNull()) {
4835 auto object_store = d->isolate_group()->object_store();
4836 VerifyCanonicalSet(d, refs,
4837 Array::Handle(object_store->canonical_types()));
4838 object_store->set_canonical_types(table_);
4839 } else if (!is_root_unit_ && is_canonical()) {
4840 AbstractType& type = AbstractType::Handle(d->zone());
4841 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4842 type ^= refs.At(i);
4843 type = type.Canonicalize(d->thread());
4844 refs.SetAt(i, type);
4845 }
4846 }
4847
4848 Type& type = Type::Handle(d->zone());
4849 Code& stub = Code::Handle(d->zone());
4850
4851 if (Snapshot::IncludesCode(d->kind())) {
4852 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4853 type ^= refs.At(id);
4854 type.UpdateTypeTestingStubEntryPoint();
4855 }
4856 } else {
4857 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4858 type ^= refs.At(id);
4859 stub = TypeTestingStubGenerator::DefaultCodeForType(type);
4860 type.InitializeTypeTestingStubNonAtomic(stub);
4861 }
4862 }
4863 }
4864};
4865
4866#if !defined(DART_PRECOMPILED_RUNTIME)
4867class FunctionTypeSerializationCluster
4868 : public CanonicalSetSerializationCluster<CanonicalFunctionTypeSet,
4869 FunctionType,
4870 FunctionTypePtr> {
4871 public:
4872 explicit FunctionTypeSerializationCluster(bool is_canonical,
4873 bool represents_canonical_set)
4874 : CanonicalSetSerializationCluster(
4875 kFunctionTypeCid,
4876 is_canonical,
4877 represents_canonical_set,
4878 "FunctionType",
4879 compiler::target::FunctionType::InstanceSize()) {}
4880 ~FunctionTypeSerializationCluster() {}
4881
4882 void Trace(Serializer* s, ObjectPtr object) {
4883 FunctionTypePtr type = FunctionType::RawCast(object);
4884 objects_.Add(type);
4886 }
4887
4888 void WriteAlloc(Serializer* s) {
4889 intptr_t count = objects_.length();
4890 s->WriteUnsigned(count);
4891 ReorderObjects(s);
4892
4893 for (intptr_t i = 0; i < count; i++) {
4894 FunctionTypePtr type = objects_[i];
4895 s->AssignRef(type);
4896 }
4897 WriteCanonicalSetLayout(s);
4898 }
4899
4900 void WriteFill(Serializer* s) {
4901 intptr_t count = objects_.length();
4902 for (intptr_t i = 0; i < count; i++) {
4903 WriteFunctionType(s, objects_[i]);
4904 }
4905 }
4906
4907 private:
4908 void WriteFunctionType(Serializer* s, FunctionTypePtr type) {
4911 ASSERT(Utils::IsUint(8, type->untag()->flags()));
4912 s->Write<uint8_t>(type->untag()->flags());
4913 s->Write<uint32_t>(type->untag()->packed_parameter_counts_);
4914 s->Write<uint16_t>(type->untag()->packed_type_parameter_counts_);
4915 }
4916};
4917#endif // !DART_PRECOMPILED_RUNTIME
4918
4919class FunctionTypeDeserializationCluster
4920 : public CanonicalSetDeserializationCluster<CanonicalFunctionTypeSet> {
4921 public:
4922 explicit FunctionTypeDeserializationCluster(bool is_canonical,
4923 bool is_root_unit)
4924 : CanonicalSetDeserializationCluster(is_canonical,
4925 is_root_unit,
4926 "FunctionType") {}
4927 ~FunctionTypeDeserializationCluster() {}
4928
4929 void ReadAlloc(Deserializer* d) override {
4930 ReadAllocFixedSize(d, FunctionType::InstanceSize());
4931 BuildCanonicalSetFromLayout(d);
4932 }
4933
4934 void ReadFill(Deserializer* d_) override {
4935 Deserializer::Local d(d_);
4936
4937 const bool mark_canonical = is_root_unit_ && is_canonical();
4938 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4939 FunctionTypePtr type = static_cast<FunctionTypePtr>(d.Ref(id));
4940 Deserializer::InitializeHeader(
4941 type, kFunctionTypeCid, FunctionType::InstanceSize(), mark_canonical);
4942 d.ReadFromTo(type);
4943 type->untag()->set_flags(d.Read<uint8_t>());
4944 type->untag()->packed_parameter_counts_ = d.Read<uint32_t>();
4945 type->untag()->packed_type_parameter_counts_ = d.Read<uint16_t>();
4946 }
4947 }
4948
4949 void PostLoad(Deserializer* d, const Array& refs) override {
4950 if (!table_.IsNull()) {
4951 auto object_store = d->isolate_group()->object_store();
4952 VerifyCanonicalSet(
4953 d, refs, Array::Handle(object_store->canonical_function_types()));
4954 object_store->set_canonical_function_types(table_);
4955 } else if (!is_root_unit_ && is_canonical()) {
4956 AbstractType& type = AbstractType::Handle(d->zone());
4957 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
4958 type ^= refs.At(i);
4959 type = type.Canonicalize(d->thread());
4960 refs.SetAt(i, type);
4961 }
4962 }
4963
4964 FunctionType& type = FunctionType::Handle(d->zone());
4965 Code& stub = Code::Handle(d->zone());
4966
4967 if (Snapshot::IncludesCode(d->kind())) {
4968 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4969 type ^= refs.At(id);
4970 type.UpdateTypeTestingStubEntryPoint();
4971 }
4972 } else {
4973 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
4974 type ^= refs.At(id);
4975 stub = TypeTestingStubGenerator::DefaultCodeForType(type);
4976 type.InitializeTypeTestingStubNonAtomic(stub);
4977 }
4978 }
4979 }
4980};
4981
4982#if !defined(DART_PRECOMPILED_RUNTIME)
4983class RecordTypeSerializationCluster
4984 : public CanonicalSetSerializationCluster<CanonicalRecordTypeSet,
4985 RecordType,
4986 RecordTypePtr> {
4987 public:
4988 RecordTypeSerializationCluster(bool is_canonical,
4989 bool represents_canonical_set)
4990 : CanonicalSetSerializationCluster(
4991 kRecordTypeCid,
4992 is_canonical,
4993 represents_canonical_set,
4994 "RecordType",
4995 compiler::target::RecordType::InstanceSize()) {}
4996 ~RecordTypeSerializationCluster() {}
4997
4998 void Trace(Serializer* s, ObjectPtr object) {
4999 RecordTypePtr type = RecordType::RawCast(object);
5000 objects_.Add(type);
5002 }
5003
5004 void WriteAlloc(Serializer* s) {
5005 intptr_t count = objects_.length();
5006 s->WriteUnsigned(count);
5007 ReorderObjects(s);
5008
5009 for (intptr_t i = 0; i < count; i++) {
5010 RecordTypePtr type = objects_[i];
5011 s->AssignRef(type);
5012 }
5013 WriteCanonicalSetLayout(s);
5014 }
5015
5016 void WriteFill(Serializer* s) {
5017 intptr_t count = objects_.length();
5018 for (intptr_t i = 0; i < count; i++) {
5019 WriteRecordType(s, objects_[i]);
5020 }
5021 }
5022
5023 private:
5024 void WriteRecordType(Serializer* s, RecordTypePtr type) {
5027 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5028 s->Write<uint8_t>(type->untag()->flags());
5029 }
5030};
5031#endif // !DART_PRECOMPILED_RUNTIME
5032
5033class RecordTypeDeserializationCluster
5034 : public CanonicalSetDeserializationCluster<CanonicalRecordTypeSet> {
5035 public:
5036 RecordTypeDeserializationCluster(bool is_canonical, bool is_root_unit)
5037 : CanonicalSetDeserializationCluster(is_canonical,
5038 is_root_unit,
5039 "RecordType") {}
5040 ~RecordTypeDeserializationCluster() {}
5041
5042 void ReadAlloc(Deserializer* d) override {
5043 ReadAllocFixedSize(d, RecordType::InstanceSize());
5044 BuildCanonicalSetFromLayout(d);
5045 }
5046
5047 void ReadFill(Deserializer* d_) override {
5048 Deserializer::Local d(d_);
5049
5050 const bool mark_canonical = is_root_unit_ && is_canonical();
5051 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5052 RecordTypePtr type = static_cast<RecordTypePtr>(d.Ref(id));
5053 Deserializer::InitializeHeader(
5054 type, kRecordTypeCid, RecordType::InstanceSize(), mark_canonical);
5055 d.ReadFromTo(type);
5056 type->untag()->set_flags(d.Read<uint8_t>());
5057 }
5058 }
5059
5060 void PostLoad(Deserializer* d, const Array& refs) override {
5061 if (!table_.IsNull()) {
5062 auto object_store = d->isolate_group()->object_store();
5063 VerifyCanonicalSet(d, refs,
5064 Array::Handle(object_store->canonical_record_types()));
5065 object_store->set_canonical_record_types(table_);
5066 } else if (!is_root_unit_ && is_canonical()) {
5067 AbstractType& type = AbstractType::Handle(d->zone());
5068 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5069 type ^= refs.At(i);
5070 type = type.Canonicalize(d->thread());
5071 refs.SetAt(i, type);
5072 }
5073 }
5074
5075 RecordType& type = RecordType::Handle(d->zone());
5076 Code& stub = Code::Handle(d->zone());
5077
5078 if (Snapshot::IncludesCode(d->kind())) {
5079 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5080 type ^= refs.At(id);
5081 type.UpdateTypeTestingStubEntryPoint();
5082 }
5083 } else {
5084 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5085 type ^= refs.At(id);
5086 stub = TypeTestingStubGenerator::DefaultCodeForType(type);
5087 type.InitializeTypeTestingStubNonAtomic(stub);
5088 }
5089 }
5090 }
5091};
5092
5093#if !defined(DART_PRECOMPILED_RUNTIME)
5094class TypeParameterSerializationCluster
5095 : public CanonicalSetSerializationCluster<CanonicalTypeParameterSet,
5096 TypeParameter,
5097 TypeParameterPtr> {
5098 public:
5099 TypeParameterSerializationCluster(bool is_canonical,
5100 bool cluster_represents_canonical_set)
5101 : CanonicalSetSerializationCluster(
5102 kTypeParameterCid,
5103 is_canonical,
5104 cluster_represents_canonical_set,
5105 "TypeParameter",
5106 compiler::target::TypeParameter::InstanceSize()) {}
5107 ~TypeParameterSerializationCluster() {}
5108
5109 void Trace(Serializer* s, ObjectPtr object) {
5110 TypeParameterPtr type = TypeParameter::RawCast(object);
5111 objects_.Add(type);
5112
5114 }
5115
5116 void WriteAlloc(Serializer* s) {
5117 intptr_t count = objects_.length();
5118 s->WriteUnsigned(count);
5119 ReorderObjects(s);
5120 for (intptr_t i = 0; i < count; i++) {
5121 TypeParameterPtr type = objects_[i];
5122 s->AssignRef(type);
5123 }
5124 WriteCanonicalSetLayout(s);
5125 }
5126
5127 void WriteFill(Serializer* s) {
5128 intptr_t count = objects_.length();
5129 for (intptr_t i = 0; i < count; i++) {
5130 WriteTypeParameter(s, objects_[i]);
5131 }
5132 }
5133
5134 private:
5135 void WriteTypeParameter(Serializer* s, TypeParameterPtr type) {
5138 s->Write<uint16_t>(type->untag()->base_);
5139 s->Write<uint16_t>(type->untag()->index_);
5140 ASSERT(Utils::IsUint(8, type->untag()->flags()));
5141 s->Write<uint8_t>(type->untag()->flags());
5142 }
5143};
5144#endif // !DART_PRECOMPILED_RUNTIME
5145
5146class TypeParameterDeserializationCluster
5147 : public CanonicalSetDeserializationCluster<CanonicalTypeParameterSet> {
5148 public:
5149 explicit TypeParameterDeserializationCluster(bool is_canonical,
5150 bool is_root_unit)
5151 : CanonicalSetDeserializationCluster(is_canonical,
5152 is_root_unit,
5153 "TypeParameter") {}
5154 ~TypeParameterDeserializationCluster() {}
5155
5156 void ReadAlloc(Deserializer* d) override {
5157 ReadAllocFixedSize(d, TypeParameter::InstanceSize());
5158 BuildCanonicalSetFromLayout(d);
5159 }
5160
5161 void ReadFill(Deserializer* d_) override {
5162 Deserializer::Local d(d_);
5163
5164 const bool mark_canonical = is_root_unit_ && is_canonical();
5165 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5166 TypeParameterPtr type = static_cast<TypeParameterPtr>(d.Ref(id));
5167 Deserializer::InitializeHeader(type, kTypeParameterCid,
5168 TypeParameter::InstanceSize(),
5169 mark_canonical);
5170 d.ReadFromTo(type);
5171 type->untag()->base_ = d.Read<uint16_t>();
5172 type->untag()->index_ = d.Read<uint16_t>();
5173 type->untag()->set_flags(d.Read<uint8_t>());
5174 }
5175 }
5176
5177 void PostLoad(Deserializer* d, const Array& refs) override {
5178 if (!table_.IsNull()) {
5179 auto object_store = d->isolate_group()->object_store();
5180 VerifyCanonicalSet(
5181 d, refs, Array::Handle(object_store->canonical_type_parameters()));
5182 object_store->set_canonical_type_parameters(table_);
5183 } else if (!is_root_unit_ && is_canonical()) {
5184 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5185 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5186 type_param ^= refs.At(i);
5187 type_param ^= type_param.Canonicalize(d->thread());
5188 refs.SetAt(i, type_param);
5189 }
5190 }
5191
5192 TypeParameter& type_param = TypeParameter::Handle(d->zone());
5193 Code& stub = Code::Handle(d->zone());
5194
5195 if (Snapshot::IncludesCode(d->kind())) {
5196 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5197 type_param ^= refs.At(id);
5198 type_param.UpdateTypeTestingStubEntryPoint();
5199 }
5200 } else {
5201 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5202 type_param ^= refs.At(id);
5203 stub = TypeTestingStubGenerator::DefaultCodeForType(type_param);
5204 type_param.InitializeTypeTestingStubNonAtomic(stub);
5205 }
5206 }
5207 }
5208};
5209
5210#if !defined(DART_PRECOMPILED_RUNTIME)
5211class ClosureSerializationCluster : public SerializationCluster {
5212 public:
5213 explicit ClosureSerializationCluster(bool is_canonical)
5214 : SerializationCluster("Closure",
5215 kClosureCid,
5216 compiler::target::Closure::InstanceSize(),
5217 is_canonical) {}
5218 ~ClosureSerializationCluster() {}
5219
5220 void Trace(Serializer* s, ObjectPtr object) {
5221 ClosurePtr closure = Closure::RawCast(object);
5222 objects_.Add(closure);
5223 PushFromTo(closure);
5224 }
5225
5226 void WriteAlloc(Serializer* s) {
5227 const intptr_t count = objects_.length();
5228 s->WriteUnsigned(count);
5229 for (intptr_t i = 0; i < count; i++) {
5230 ClosurePtr closure = objects_[i];
5231 s->AssignRef(closure);
5232 }
5233 }
5234
5235 void WriteFill(Serializer* s) {
5236 const intptr_t count = objects_.length();
5237 for (intptr_t i = 0; i < count; i++) {
5238 ClosurePtr closure = objects_[i];
5239 AutoTraceObject(closure);
5240 WriteFromTo(closure);
5241 }
5242 }
5243
5244 private:
5245 GrowableArray<ClosurePtr> objects_;
5246};
5247#endif // !DART_PRECOMPILED_RUNTIME
5248
5249class ClosureDeserializationCluster
5250 : public AbstractInstanceDeserializationCluster {
5251 public:
5252 explicit ClosureDeserializationCluster(bool is_canonical, bool is_root_unit)
5253 : AbstractInstanceDeserializationCluster("Closure",
5254 is_canonical,
5255 is_root_unit) {}
5256 ~ClosureDeserializationCluster() {}
5257
5258 void ReadAlloc(Deserializer* d) override {
5259 ReadAllocFixedSize(d, Closure::InstanceSize());
5260 }
5261
5262 void ReadFill(Deserializer* d_) override {
5263 Deserializer::Local d(d_);
5264
5265 const bool mark_canonical = is_root_unit_ && is_canonical();
5266 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5267 ClosurePtr closure = static_cast<ClosurePtr>(d.Ref(id));
5268 Deserializer::InitializeHeader(closure, kClosureCid,
5269 Closure::InstanceSize(), mark_canonical);
5270 d.ReadFromTo(closure);
5271#if defined(DART_PRECOMPILED_RUNTIME)
5272 closure->untag()->entry_point_ = 0;
5273#endif
5274 }
5275 }
5276
5277#if defined(DART_PRECOMPILED_RUNTIME)
5278 void PostLoad(Deserializer* d, const Array& refs) override {
5279 // We only cache the entry point in bare instructions mode (as we need
5280 // to load the function anyway otherwise).
5281 ASSERT(d->kind() == Snapshot::kFullAOT);
5282 auto& closure = Closure::Handle(d->zone());
5283 auto& func = Function::Handle(d->zone());
5284 for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
5285 closure ^= refs.At(i);
5286 func = closure.function();
5287 uword entry_point = func.entry_point();
5288 ASSERT(entry_point != 0);
5289 closure.ptr()->untag()->entry_point_ = entry_point;
5290 }
5291 }
5292#endif
5293};
5294
5295#if !defined(DART_PRECOMPILED_RUNTIME)
5296class MintSerializationCluster : public SerializationCluster {
5297 public:
5298 explicit MintSerializationCluster(bool is_canonical)
5299 : SerializationCluster("int", kMintCid, kSizeVaries, is_canonical) {}
5300 ~MintSerializationCluster() {}
5301
5302 void Trace(Serializer* s, ObjectPtr object) {
5303 if (!object->IsHeapObject()) {
5304 SmiPtr smi = Smi::RawCast(object);
5305 smis_.Add(smi);
5306 } else {
5307 MintPtr mint = Mint::RawCast(object);
5308 mints_.Add(mint);
5309 }
5310 }
5311
5312 void WriteAlloc(Serializer* s) {
5313 s->WriteUnsigned(smis_.length() + mints_.length());
5314 for (intptr_t i = 0; i < smis_.length(); i++) {
5315 SmiPtr smi = smis_[i];
5316 s->AssignRef(smi);
5317 AutoTraceObject(smi);
5318 const int64_t value = Smi::Value(smi);
5319 s->Write<int64_t>(value);
5320 if (!Smi::IsValid(value)) {
5321 // This Smi will become a Mint when loaded.
5322 target_memory_size_ += compiler::target::Mint::InstanceSize();
5323 }
5324 }
5325 for (intptr_t i = 0; i < mints_.length(); i++) {
5326 MintPtr mint = mints_[i];
5327 s->AssignRef(mint);
5328 AutoTraceObject(mint);
5329 s->Write<int64_t>(mint->untag()->value_);
5330 // All Mints on the host should be Mints on the target.
5331 ASSERT(!Smi::IsValid(mint->untag()->value_));
5332 target_memory_size_ += compiler::target::Mint::InstanceSize();
5333 }
5334 }
5335
5336 void WriteFill(Serializer* s) {}
5337
5338 private:
5339 GrowableArray<SmiPtr> smis_;
5340 GrowableArray<MintPtr> mints_;
5341};
5342#endif // !DART_PRECOMPILED_RUNTIME
5343
5344class MintDeserializationCluster
5345 : public AbstractInstanceDeserializationCluster {
5346 public:
5347 explicit MintDeserializationCluster(bool is_canonical, bool is_root_unit)
5348 : AbstractInstanceDeserializationCluster("int",
5349 is_canonical,
5350 is_root_unit) {}
5351 ~MintDeserializationCluster() {}
5352
5353 void ReadAlloc(Deserializer* d) override {
5354 start_index_ = d->next_index();
5355 const intptr_t count = d->ReadUnsigned();
5356 const bool mark_canonical = is_canonical();
5357 for (intptr_t i = 0; i < count; i++) {
5358 int64_t value = d->Read<int64_t>();
5359 if (Smi::IsValid(value)) {
5360 d->AssignRef(Smi::New(value));
5361 } else {
5362 MintPtr mint = static_cast<MintPtr>(d->Allocate(Mint::InstanceSize()));
5363 Deserializer::InitializeHeader(mint, kMintCid, Mint::InstanceSize(),
5364 mark_canonical);
5365 mint->untag()->value_ = value;
5366 d->AssignRef(mint);
5367 }
5368 }
5369 stop_index_ = d->next_index();
5370 }
5371
5372 void ReadFill(Deserializer* d_) override { Deserializer::Local d(d_); }
5373};
5374
5375#if !defined(DART_PRECOMPILED_RUNTIME)
5376class DoubleSerializationCluster : public SerializationCluster {
5377 public:
5378 explicit DoubleSerializationCluster(bool is_canonical)
5379 : SerializationCluster("double",
5380 kDoubleCid,
5381 compiler::target::Double::InstanceSize(),
5382 is_canonical) {}
5383 ~DoubleSerializationCluster() {}
5384
5385 void Trace(Serializer* s, ObjectPtr object) {
5386 DoublePtr dbl = Double::RawCast(object);
5387 objects_.Add(dbl);
5388 }
5389
5390 void WriteAlloc(Serializer* s) {
5391 const intptr_t count = objects_.length();
5392 s->WriteUnsigned(count);
5393 for (intptr_t i = 0; i < count; i++) {
5394 DoublePtr dbl = objects_[i];
5395 s->AssignRef(dbl);
5396 }
5397 }
5398
5399 void WriteFill(Serializer* s) {
5400 const intptr_t count = objects_.length();
5401 for (intptr_t i = 0; i < count; i++) {
5402 DoublePtr dbl = objects_[i];
5403 AutoTraceObject(dbl);
5404 s->Write<double>(dbl->untag()->value_);
5405 }
5406 }
5407
5408 private:
5409 GrowableArray<DoublePtr> objects_;
5410};
5411#endif // !DART_PRECOMPILED_RUNTIME
5412
5413class DoubleDeserializationCluster
5414 : public AbstractInstanceDeserializationCluster {
5415 public:
5416 explicit DoubleDeserializationCluster(bool is_canonical, bool is_root_unit)
5417 : AbstractInstanceDeserializationCluster("double",
5418 is_canonical,
5419 is_root_unit) {}
5420 ~DoubleDeserializationCluster() {}
5421
5422 void ReadAlloc(Deserializer* d) override {
5423 ReadAllocFixedSize(d, Double::InstanceSize());
5424 }
5425
5426 void ReadFill(Deserializer* d_) override {
5427 Deserializer::Local d(d_);
5428 const bool mark_canonical = is_root_unit_ && is_canonical();
5429 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5430 DoublePtr dbl = static_cast<DoublePtr>(d.Ref(id));
5431 Deserializer::InitializeHeader(dbl, kDoubleCid, Double::InstanceSize(),
5432 mark_canonical);
5433 dbl->untag()->value_ = d.Read<double>();
5434 }
5435 }
5436};
5437
5438#if !defined(DART_PRECOMPILED_RUNTIME)
5439class Simd128SerializationCluster : public SerializationCluster {
5440 public:
5441 explicit Simd128SerializationCluster(intptr_t cid, bool is_canonical)
5442 : SerializationCluster("Simd128",
5443 cid,
5444 compiler::target::Int32x4::InstanceSize(),
5445 is_canonical) {
5446 ASSERT_EQUAL(compiler::target::Int32x4::InstanceSize(),
5447 compiler::target::Float32x4::InstanceSize());
5448 ASSERT_EQUAL(compiler::target::Int32x4::InstanceSize(),
5449 compiler::target::Float64x2::InstanceSize());
5450 }
5451 ~Simd128SerializationCluster() {}
5452
5453 void Trace(Serializer* s, ObjectPtr object) { objects_.Add(object); }
5454
5455 void WriteAlloc(Serializer* s) {
5456 const intptr_t count = objects_.length();
5457 s->WriteUnsigned(count);
5458 for (intptr_t i = 0; i < count; i++) {
5459 ObjectPtr vector = objects_[i];
5460 s->AssignRef(vector);
5461 }
5462 }
5463
5464 void WriteFill(Serializer* s) {
5465 const intptr_t count = objects_.length();
5466 for (intptr_t i = 0; i < count; i++) {
5467 ObjectPtr vector = objects_[i];
5468 AutoTraceObject(vector);
5469 ASSERT_EQUAL(Int32x4::value_offset(), Float32x4::value_offset());
5470 ASSERT_EQUAL(Int32x4::value_offset(), Float64x2::value_offset());
5471 s->WriteBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5472 sizeof(simd128_value_t));
5473 }
5474 }
5475
5476 private:
5477 GrowableArray<ObjectPtr> objects_;
5478};
5479#endif // !DART_PRECOMPILED_RUNTIME
5480
5481class Simd128DeserializationCluster
5482 : public AbstractInstanceDeserializationCluster {
5483 public:
5484 explicit Simd128DeserializationCluster(intptr_t cid,
5485 bool is_canonical,
5486 bool is_root_unit)
5487 : AbstractInstanceDeserializationCluster("Simd128",
5488 is_canonical,
5489 is_root_unit),
5490 cid_(cid) {}
5491 ~Simd128DeserializationCluster() {}
5492
5493 void ReadAlloc(Deserializer* d) override {
5494 ASSERT_EQUAL(Int32x4::InstanceSize(), Float32x4::InstanceSize());
5495 ASSERT_EQUAL(Int32x4::InstanceSize(), Float64x2::InstanceSize());
5496 ReadAllocFixedSize(d, Int32x4::InstanceSize());
5497 }
5498
5499 void ReadFill(Deserializer* d_) override {
5500 Deserializer::Local d(d_);
5501 const intptr_t cid = cid_;
5502 const bool mark_canonical = is_root_unit_ && is_canonical();
5503 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5504 ObjectPtr vector = d.Ref(id);
5505 Deserializer::InitializeHeader(vector, cid, Int32x4::InstanceSize(),
5506 mark_canonical);
5507 d.ReadBytes(&(static_cast<Int32x4Ptr>(vector)->untag()->value_),
5508 sizeof(simd128_value_t));
5509 }
5510 }
5511
5512 private:
5513 intptr_t cid_;
5514};
5515
5516#if !defined(DART_PRECOMPILED_RUNTIME)
5517class GrowableObjectArraySerializationCluster : public SerializationCluster {
5518 public:
5519 GrowableObjectArraySerializationCluster()
5520 : SerializationCluster(
5521 "GrowableObjectArray",
5522 kGrowableObjectArrayCid,
5523 compiler::target::GrowableObjectArray::InstanceSize()) {}
5524 ~GrowableObjectArraySerializationCluster() {}
5525
5526 void Trace(Serializer* s, ObjectPtr object) {
5527 GrowableObjectArrayPtr array = GrowableObjectArray::RawCast(object);
5528 objects_.Add(array);
5529 PushFromTo(array);
5530 }
5531
5532 void WriteAlloc(Serializer* s) {
5533 const intptr_t count = objects_.length();
5534 s->WriteUnsigned(count);
5535 for (intptr_t i = 0; i < count; i++) {
5536 GrowableObjectArrayPtr array = objects_[i];
5537 s->AssignRef(array);
5538 }
5539 }
5540
5541 void WriteFill(Serializer* s) {
5542 const intptr_t count = objects_.length();
5543 for (intptr_t i = 0; i < count; i++) {
5544 GrowableObjectArrayPtr array = objects_[i];
5545 AutoTraceObject(array);
5546 WriteFromTo(array);
5547 }
5548 }
5549
5550 private:
5551 GrowableArray<GrowableObjectArrayPtr> objects_;
5552};
5553#endif // !DART_PRECOMPILED_RUNTIME
5554
5555class GrowableObjectArrayDeserializationCluster
5556 : public DeserializationCluster {
5557 public:
5558 GrowableObjectArrayDeserializationCluster()
5559 : DeserializationCluster("GrowableObjectArray") {}
5560 ~GrowableObjectArrayDeserializationCluster() {}
5561
5562 void ReadAlloc(Deserializer* d) override {
5563 ReadAllocFixedSize(d, GrowableObjectArray::InstanceSize());
5564 }
5565
5566 void ReadFill(Deserializer* d_) override {
5567 Deserializer::Local d(d_);
5568
5569 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5570 GrowableObjectArrayPtr list =
5571 static_cast<GrowableObjectArrayPtr>(d.Ref(id));
5572 Deserializer::InitializeHeader(list, kGrowableObjectArrayCid,
5573 GrowableObjectArray::InstanceSize());
5574 d.ReadFromTo(list);
5575 }
5576 }
5577};
5578
5579#if !defined(DART_PRECOMPILED_RUNTIME)
5580class RecordSerializationCluster : public SerializationCluster {
5581 public:
5582 explicit RecordSerializationCluster(bool is_canonical)
5583 : SerializationCluster("Record", kRecordCid, kSizeVaries, is_canonical) {}
5584 ~RecordSerializationCluster() {}
5585
5586 void Trace(Serializer* s, ObjectPtr object) {
5587 RecordPtr record = Record::RawCast(object);
5588 objects_.Add(record);
5589
5590 const intptr_t num_fields = Record::NumFields(record);
5591 for (intptr_t i = 0; i < num_fields; ++i) {
5592 s->Push(record->untag()->field(i));
5593 }
5594 }
5595
5596 void WriteAlloc(Serializer* s) {
5597 const intptr_t count = objects_.length();
5598 s->WriteUnsigned(count);
5599 for (intptr_t i = 0; i < count; ++i) {
5600 RecordPtr record = objects_[i];
5601 s->AssignRef(record);
5602 AutoTraceObject(record);
5603 const intptr_t num_fields = Record::NumFields(record);
5604 s->WriteUnsigned(num_fields);
5605 target_memory_size_ += compiler::target::Record::InstanceSize(num_fields);
5606 }
5607 }
5608
5609 void WriteFill(Serializer* s) {
5610 const intptr_t count = objects_.length();
5611 for (intptr_t i = 0; i < count; ++i) {
5612 RecordPtr record = objects_[i];
5613 AutoTraceObject(record);
5614 const RecordShape shape(record->untag()->shape());
5615 s->WriteUnsigned(shape.AsInt());
5616 const intptr_t num_fields = shape.num_fields();
5617 for (intptr_t j = 0; j < num_fields; ++j) {
5618 s->WriteElementRef(record->untag()->field(j), j);
5619 }
5620 }
5621 }
5622
5623 private:
5624 GrowableArray<RecordPtr> objects_;
5625};
5626#endif // !DART_PRECOMPILED_RUNTIME
5627
5628class RecordDeserializationCluster
5629 : public AbstractInstanceDeserializationCluster {
5630 public:
5631 explicit RecordDeserializationCluster(bool is_canonical, bool is_root_unit)
5632 : AbstractInstanceDeserializationCluster("Record",
5633 is_canonical,
5634 is_root_unit) {}
5635 ~RecordDeserializationCluster() {}
5636
5637 void ReadAlloc(Deserializer* d) override {
5638 start_index_ = d->next_index();
5639 const intptr_t count = d->ReadUnsigned();
5640 for (intptr_t i = 0; i < count; i++) {
5641 const intptr_t num_fields = d->ReadUnsigned();
5642 d->AssignRef(d->Allocate(Record::InstanceSize(num_fields)));
5643 }
5644 stop_index_ = d->next_index();
5645 }
5646
5647 void ReadFill(Deserializer* d_) override {
5648 Deserializer::Local d(d_);
5649
5650 const bool stamp_canonical = is_root_unit_ && is_canonical();
5651 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5652 RecordPtr record = static_cast<RecordPtr>(d.Ref(id));
5653 const intptr_t shape = d.ReadUnsigned();
5654 const intptr_t num_fields = RecordShape(shape).num_fields();
5655 Deserializer::InitializeHeader(record, kRecordCid,
5656 Record::InstanceSize(num_fields),
5657 stamp_canonical);
5658 record->untag()->shape_ = Smi::New(shape);
5659 for (intptr_t j = 0; j < num_fields; ++j) {
5660 record->untag()->data()[j] = d.ReadRef();
5661 }
5662 }
5663 }
5664};
5665
5666#if !defined(DART_PRECOMPILED_RUNTIME)
5667class TypedDataSerializationCluster : public SerializationCluster {
5668 public:
5669 explicit TypedDataSerializationCluster(intptr_t cid)
5670 : SerializationCluster("TypedData", cid) {}
5671 ~TypedDataSerializationCluster() {}
5672
5673 void Trace(Serializer* s, ObjectPtr object) {
5674 TypedDataPtr data = TypedData::RawCast(object);
5675 objects_.Add(data);
5676 }
5677
5678 void WriteAlloc(Serializer* s) {
5679 const intptr_t count = objects_.length();
5680 s->WriteUnsigned(count);
5681 const intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
5682 for (intptr_t i = 0; i < count; i++) {
5683 TypedDataPtr data = objects_[i];
5684 s->AssignRef(data);
5685 AutoTraceObject(data);
5686 const intptr_t length = Smi::Value(data->untag()->length());
5687 s->WriteUnsigned(length);
5688 target_memory_size_ +=
5689 compiler::target::TypedData::InstanceSize(length * element_size);
5690 }
5691 }
5692
5693 void WriteFill(Serializer* s) {
5694 const intptr_t count = objects_.length();
5695 intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
5696 for (intptr_t i = 0; i < count; i++) {
5697 TypedDataPtr data = objects_[i];
5698 AutoTraceObject(data);
5699 const intptr_t length = Smi::Value(data->untag()->length());
5700 s->WriteUnsigned(length);
5701 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5702 s->WriteBytes(cdata, length * element_size);
5703 }
5704 }
5705
5706 private:
5707 GrowableArray<TypedDataPtr> objects_;
5708};
5709#endif // !DART_PRECOMPILED_RUNTIME
5710
5711class TypedDataDeserializationCluster : public DeserializationCluster {
5712 public:
5713 explicit TypedDataDeserializationCluster(intptr_t cid)
5714 : DeserializationCluster("TypedData"), cid_(cid) {}
5715 ~TypedDataDeserializationCluster() {}
5716
5717 void ReadAlloc(Deserializer* d) override {
5718 start_index_ = d->next_index();
5719 const intptr_t count = d->ReadUnsigned();
5720 intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
5721 for (intptr_t i = 0; i < count; i++) {
5722 const intptr_t length = d->ReadUnsigned();
5723 d->AssignRef(d->Allocate(TypedData::InstanceSize(length * element_size)));
5724 }
5725 stop_index_ = d->next_index();
5726 }
5727
5728 void ReadFill(Deserializer* d_) override {
5729 Deserializer::Local d(d_);
5730
5731 ASSERT(!is_canonical()); // Never canonical.
5732 intptr_t element_size = TypedData::ElementSizeInBytes(cid_);
5733
5734 const intptr_t cid = cid_;
5735 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5736 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5737 const intptr_t length = d.ReadUnsigned();
5738 const intptr_t length_in_bytes = length * element_size;
5739 Deserializer::InitializeHeader(data, cid,
5740 TypedData::InstanceSize(length_in_bytes));
5741 data->untag()->length_ = Smi::New(length);
5742 data->untag()->RecomputeDataField();
5743 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data());
5744 d.ReadBytes(cdata, length_in_bytes);
5745 }
5746 }
5747
5748 private:
5749 const intptr_t cid_;
5750};
5751
5752#if !defined(DART_PRECOMPILED_RUNTIME)
5753class TypedDataViewSerializationCluster : public SerializationCluster {
5754 public:
5755 explicit TypedDataViewSerializationCluster(intptr_t cid)
5756 : SerializationCluster("TypedDataView",
5757 cid,
5758 compiler::target::TypedDataView::InstanceSize()) {}
5759 ~TypedDataViewSerializationCluster() {}
5760
5761 void Trace(Serializer* s, ObjectPtr object) {
5762 TypedDataViewPtr view = TypedDataView::RawCast(object);
5763 objects_.Add(view);
5764
5765 PushFromTo(view);
5766 }
5767
5768 void WriteAlloc(Serializer* s) {
5769 const intptr_t count = objects_.length();
5770 s->WriteUnsigned(count);
5771 for (intptr_t i = 0; i < count; i++) {
5772 TypedDataViewPtr view = objects_[i];
5773 s->AssignRef(view);
5774 }
5775 }
5776
5777 void WriteFill(Serializer* s) {
5778 const intptr_t count = objects_.length();
5779 for (intptr_t i = 0; i < count; i++) {
5780 TypedDataViewPtr view = objects_[i];
5781 AutoTraceObject(view);
5782 WriteFromTo(view);
5783 }
5784 }
5785
5786 private:
5787 GrowableArray<TypedDataViewPtr> objects_;
5788};
5789#endif // !DART_PRECOMPILED_RUNTIME
5790
5791class TypedDataViewDeserializationCluster : public DeserializationCluster {
5792 public:
5793 explicit TypedDataViewDeserializationCluster(intptr_t cid)
5794 : DeserializationCluster("TypedDataView"), cid_(cid) {}
5795 ~TypedDataViewDeserializationCluster() {}
5796
5797 void ReadAlloc(Deserializer* d) override {
5798 ReadAllocFixedSize(d, TypedDataView::InstanceSize());
5799 }
5800
5801 void ReadFill(Deserializer* d_) override {
5802 Deserializer::Local d(d_);
5803
5804 const intptr_t cid = cid_;
5805 ASSERT(!is_canonical()); // Never canonical.
5806 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5807 TypedDataViewPtr view = static_cast<TypedDataViewPtr>(d.Ref(id));
5808 Deserializer::InitializeHeader(view, cid, TypedDataView::InstanceSize());
5809 d.ReadFromTo(view);
5810 }
5811 }
5812
5813 void PostLoad(Deserializer* d, const Array& refs) override {
5814 auto& view = TypedDataView::Handle(d->zone());
5815 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5816 view ^= refs.At(id);
5817 view.RecomputeDataField();
5818 }
5819 }
5820
5821 private:
5822 const intptr_t cid_;
5823};
5824
5825#if !defined(DART_PRECOMPILED_RUNTIME)
5826class ExternalTypedDataSerializationCluster : public SerializationCluster {
5827 public:
5828 explicit ExternalTypedDataSerializationCluster(intptr_t cid)
5829 : SerializationCluster(
5830 "ExternalTypedData",
5831 cid,
5832 compiler::target::ExternalTypedData::InstanceSize()) {}
5833 ~ExternalTypedDataSerializationCluster() {}
5834
5835 void Trace(Serializer* s, ObjectPtr object) {
5836 ExternalTypedDataPtr data = ExternalTypedData::RawCast(object);
5837 objects_.Add(data);
5838 }
5839
5840 void WriteAlloc(Serializer* s) {
5841 const intptr_t count = objects_.length();
5842 s->WriteUnsigned(count);
5843 for (intptr_t i = 0; i < count; i++) {
5844 ExternalTypedDataPtr data = objects_[i];
5845 s->AssignRef(data);
5846 }
5847 }
5848
5849 void WriteFill(Serializer* s) {
5850 const intptr_t count = objects_.length();
5851 intptr_t element_size = ExternalTypedData::ElementSizeInBytes(cid_);
5852 for (intptr_t i = 0; i < count; i++) {
5853 ExternalTypedDataPtr data = objects_[i];
5854 AutoTraceObject(data);
5855 const intptr_t length = Smi::Value(data->untag()->length());
5856 s->WriteUnsigned(length);
5857 uint8_t* cdata = reinterpret_cast<uint8_t*>(data->untag()->data_);
5858 s->Align(ExternalTypedData::kDataSerializationAlignment);
5859 s->WriteBytes(cdata, length * element_size);
5860 }
5861 }
5862
5863 private:
5864 GrowableArray<ExternalTypedDataPtr> objects_;
5865};
5866#endif // !DART_PRECOMPILED_RUNTIME
5867
5868class ExternalTypedDataDeserializationCluster : public DeserializationCluster {
5869 public:
5870 explicit ExternalTypedDataDeserializationCluster(intptr_t cid)
5871 : DeserializationCluster("ExternalTypedData"), cid_(cid) {}
5872 ~ExternalTypedDataDeserializationCluster() {}
5873
5874 void ReadAlloc(Deserializer* d) override {
5875 ReadAllocFixedSize(d, ExternalTypedData::InstanceSize());
5876 }
5877
5878 void ReadFill(Deserializer* d_) override {
5879 Deserializer::Local d(d_);
5880
5881 ASSERT(!is_canonical()); // Never canonical.
5882 const intptr_t cid = cid_;
5883 intptr_t element_size = ExternalTypedData::ElementSizeInBytes(cid);
5884 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5885 ExternalTypedDataPtr data = static_cast<ExternalTypedDataPtr>(d.Ref(id));
5886 const intptr_t length = d.ReadUnsigned();
5887 Deserializer::InitializeHeader(data, cid,
5888 ExternalTypedData::InstanceSize());
5889 data->untag()->length_ = Smi::New(length);
5890 d.Align(ExternalTypedData::kDataSerializationAlignment);
5891 data->untag()->data_ = const_cast<uint8_t*>(d.AddressOfCurrentPosition());
5892 d.Advance(length * element_size);
5893 // No finalizer / external size 0.
5894 }
5895 }
5896
5897 private:
5898 const intptr_t cid_;
5899};
5900
5901#if !defined(DART_PRECOMPILED_RUNTIME)
5902class DeltaEncodedTypedDataSerializationCluster : public SerializationCluster {
5903 public:
5904 DeltaEncodedTypedDataSerializationCluster()
5905 : SerializationCluster("DeltaEncodedTypedData",
5906 kDeltaEncodedTypedDataCid) {}
5907 ~DeltaEncodedTypedDataSerializationCluster() {}
5908
5909 void Trace(Serializer* s, ObjectPtr object) {
5910 TypedDataPtr data = TypedData::RawCast(object);
5911 objects_.Add(data);
5912 }
5913
5914 void WriteAlloc(Serializer* s) {
5915 const intptr_t count = objects_.length();
5916 s->WriteUnsigned(count);
5917 for (intptr_t i = 0; i < count; i++) {
5918 const TypedDataPtr data = objects_[i];
5919 const intptr_t element_size =
5920 TypedData::ElementSizeInBytes(data->GetClassId());
5921 s->AssignRef(data);
5922 AutoTraceObject(data);
5923 const intptr_t length_in_bytes =
5924 Smi::Value(data->untag()->length()) * element_size;
5925 s->WriteUnsigned(length_in_bytes);
5926 target_memory_size_ +=
5927 compiler::target::TypedData::InstanceSize(length_in_bytes);
5928 }
5929 }
5930
5931 void WriteFill(Serializer* s) {
5932 const intptr_t count = objects_.length();
5933 TypedData& typed_data = TypedData::Handle(s->zone());
5934 for (intptr_t i = 0; i < count; i++) {
5935 const TypedDataPtr data = objects_[i];
5936 AutoTraceObject(data);
5937 const intptr_t cid = data->GetClassId();
5938 // Only Uint16 and Uint32 typed data is supported at the moment. So encode
5939 // which this is in the low bit of the length. Uint16 is 0, Uint32 is 1.
5940 ASSERT(cid == kTypedDataUint16ArrayCid ||
5941 cid == kTypedDataUint32ArrayCid);
5942 const intptr_t cid_flag = cid == kTypedDataUint16ArrayCid ? 0 : 1;
5943 const intptr_t length = Smi::Value(data->untag()->length());
5944 const intptr_t encoded_length = (length << 1) | cid_flag;
5945 s->WriteUnsigned(encoded_length);
5946 intptr_t prev = 0;
5947 typed_data = data;
5948 for (intptr_t j = 0; j < length; ++j) {
5949 const intptr_t value = (cid == kTypedDataUint16ArrayCid)
5950 ? typed_data.GetUint16(j << 1)
5951 : typed_data.GetUint32(j << 2);
5952 ASSERT(value >= prev);
5953 s->WriteUnsigned(value - prev);
5954 prev = value;
5955 }
5956 }
5957 }
5958
5959 private:
5960 GrowableArray<TypedDataPtr> objects_;
5961};
5962#endif // !DART_PRECOMPILED_RUNTIME
5963
5964class DeltaEncodedTypedDataDeserializationCluster
5965 : public DeserializationCluster {
5966 public:
5967 DeltaEncodedTypedDataDeserializationCluster()
5968 : DeserializationCluster("DeltaEncodedTypedData") {}
5969 ~DeltaEncodedTypedDataDeserializationCluster() {}
5970
5971 void ReadAlloc(Deserializer* d) override {
5972 start_index_ = d->next_index();
5973 const intptr_t count = d->ReadUnsigned();
5974 for (intptr_t i = 0; i < count; i++) {
5975 const intptr_t length_in_bytes = d->ReadUnsigned();
5976 d->AssignRef(d->Allocate(TypedData::InstanceSize(length_in_bytes)));
5977 }
5978 stop_index_ = d->next_index();
5979 }
5980
5981 void ReadFill(Deserializer* d_) override {
5982 Deserializer::Local d(d_);
5983 TypedData& typed_data = TypedData::Handle(d_->zone());
5984
5985 ASSERT(!is_canonical()); // Never canonical.
5986
5987 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
5988 TypedDataPtr data = static_cast<TypedDataPtr>(d.Ref(id));
5989 const intptr_t encoded_length = d.ReadUnsigned();
5990 const intptr_t length = encoded_length >> 1;
5991 const intptr_t cid = (encoded_length & 0x1) == 0
5992 ? kTypedDataUint16ArrayCid
5993 : kTypedDataUint32ArrayCid;
5994 const intptr_t element_size = TypedData::ElementSizeInBytes(cid);
5995 const intptr_t length_in_bytes = length * element_size;
5996 Deserializer::InitializeHeader(data, cid,
5997 TypedData::InstanceSize(length_in_bytes));
5998 data->untag()->length_ = Smi::New(length);
5999 data->untag()->RecomputeDataField();
6000 intptr_t value = 0;
6001 typed_data = data;
6002 for (intptr_t j = 0; j < length; ++j) {
6003 value += d.ReadUnsigned();
6004 if (cid == kTypedDataUint16ArrayCid) {
6005 typed_data.SetUint16(j << 1, static_cast<uint16_t>(value));
6006 } else {
6007 typed_data.SetUint32(j << 2, value);
6008 }
6009 }
6010 }
6011 }
6012};
6013
6014#if !defined(DART_PRECOMPILED_RUNTIME)
6015class StackTraceSerializationCluster : public SerializationCluster {
6016 public:
6017 StackTraceSerializationCluster()
6018 : SerializationCluster("StackTrace",
6019 kStackTraceCid,
6020 compiler::target::StackTrace::InstanceSize()) {}
6021 ~StackTraceSerializationCluster() {}
6022
6023 void Trace(Serializer* s, ObjectPtr object) {
6024 StackTracePtr trace = StackTrace::RawCast(object);
6025 objects_.Add(trace);
6026 PushFromTo(trace);
6027 }
6028
6029 void WriteAlloc(Serializer* s) {
6030 const intptr_t count = objects_.length();
6031 s->WriteUnsigned(count);
6032 for (intptr_t i = 0; i < count; i++) {
6033 StackTracePtr trace = objects_[i];
6034 s->AssignRef(trace);
6035 }
6036 }
6037
6038 void WriteFill(Serializer* s) {
6039 const intptr_t count = objects_.length();
6040 for (intptr_t i = 0; i < count; i++) {
6041 StackTracePtr trace = objects_[i];
6042 AutoTraceObject(trace);
6043 WriteFromTo(trace);
6044 }
6045 }
6046
6047 private:
6048 GrowableArray<StackTracePtr> objects_;
6049};
6050#endif // !DART_PRECOMPILED_RUNTIME
6051
6052class StackTraceDeserializationCluster : public DeserializationCluster {
6053 public:
6054 StackTraceDeserializationCluster() : DeserializationCluster("StackTrace") {}
6055 ~StackTraceDeserializationCluster() {}
6056
6057 void ReadAlloc(Deserializer* d) override {
6058 ReadAllocFixedSize(d, StackTrace::InstanceSize());
6059 }
6060
6061 void ReadFill(Deserializer* d_) override {
6062 Deserializer::Local d(d_);
6063
6064 ASSERT(!is_canonical()); // Never canonical.
6065 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6066 StackTracePtr trace = static_cast<StackTracePtr>(d.Ref(id));
6067 Deserializer::InitializeHeader(trace, kStackTraceCid,
6068 StackTrace::InstanceSize());
6069 d.ReadFromTo(trace);
6070 }
6071 }
6072};
6073
6074#if !defined(DART_PRECOMPILED_RUNTIME)
6075class RegExpSerializationCluster : public SerializationCluster {
6076 public:
6077 RegExpSerializationCluster()
6078 : SerializationCluster("RegExp",
6079 kRegExpCid,
6080 compiler::target::RegExp::InstanceSize()) {}
6081 ~RegExpSerializationCluster() {}
6082
6083 void Trace(Serializer* s, ObjectPtr object) {
6084 RegExpPtr regexp = RegExp::RawCast(object);
6085 objects_.Add(regexp);
6086 PushFromTo(regexp);
6087 }
6088
6089 void WriteAlloc(Serializer* s) {
6090 const intptr_t count = objects_.length();
6091 s->WriteUnsigned(count);
6092 for (intptr_t i = 0; i < count; i++) {
6093 RegExpPtr regexp = objects_[i];
6094 s->AssignRef(regexp);
6095 }
6096 }
6097
6098 void WriteFill(Serializer* s) {
6099 const intptr_t count = objects_.length();
6100 for (intptr_t i = 0; i < count; i++) {
6101 RegExpPtr regexp = objects_[i];
6102 AutoTraceObject(regexp);
6103 WriteFromTo(regexp);
6104 s->Write<int32_t>(regexp->untag()->num_one_byte_registers_);
6105 s->Write<int32_t>(regexp->untag()->num_two_byte_registers_);
6106 s->Write<int8_t>(regexp->untag()->type_flags_);
6107 }
6108 }
6109
6110 private:
6111 GrowableArray<RegExpPtr> objects_;
6112};
6113#endif // !DART_PRECOMPILED_RUNTIME
6114
6115class RegExpDeserializationCluster : public DeserializationCluster {
6116 public:
6117 RegExpDeserializationCluster() : DeserializationCluster("RegExp") {}
6118 ~RegExpDeserializationCluster() {}
6119
6120 void ReadAlloc(Deserializer* d) override {
6121 ReadAllocFixedSize(d, RegExp::InstanceSize());
6122 }
6123
6124 void ReadFill(Deserializer* d_) override {
6125 Deserializer::Local d(d_);
6126
6127 ASSERT(!is_canonical()); // Never canonical.
6128 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6129 RegExpPtr regexp = static_cast<RegExpPtr>(d.Ref(id));
6130 Deserializer::InitializeHeader(regexp, kRegExpCid,
6131 RegExp::InstanceSize());
6132 d.ReadFromTo(regexp);
6133 regexp->untag()->num_one_byte_registers_ = d.Read<int32_t>();
6134 regexp->untag()->num_two_byte_registers_ = d.Read<int32_t>();
6135 regexp->untag()->type_flags_ = d.Read<int8_t>();
6136 }
6137 }
6138};
6139
6140#if !defined(DART_PRECOMPILED_RUNTIME)
6141class WeakPropertySerializationCluster : public SerializationCluster {
6142 public:
6143 WeakPropertySerializationCluster()
6144 : SerializationCluster("WeakProperty",
6145 kWeakPropertyCid,
6146 compiler::target::WeakProperty::InstanceSize()) {}
6147 ~WeakPropertySerializationCluster() {}
6148
6149 void Trace(Serializer* s, ObjectPtr object) {
6150 WeakPropertyPtr property = WeakProperty::RawCast(object);
6151 objects_.Add(property);
6152
6153 s->PushWeak(property->untag()->key());
6154 }
6155
6156 void RetraceEphemerons(Serializer* s) {
6157 for (intptr_t i = 0; i < objects_.length(); i++) {
6158 WeakPropertyPtr property = objects_[i];
6159 if (s->IsReachable(property->untag()->key())) {
6160 s->Push(property->untag()->value());
6161 }
6162 }
6163 }
6164
6165 void WriteAlloc(Serializer* s) {
6166 const intptr_t count = objects_.length();
6167 s->WriteUnsigned(count);
6168 for (intptr_t i = 0; i < count; i++) {
6169 WeakPropertyPtr property = objects_[i];
6170 s->AssignRef(property);
6171 }
6172 }
6173
6174 void WriteFill(Serializer* s) {
6175 const intptr_t count = objects_.length();
6176 for (intptr_t i = 0; i < count; i++) {
6177 WeakPropertyPtr property = objects_[i];
6178 AutoTraceObject(property);
6179 if (s->HasRef(property->untag()->key())) {
6180 s->WriteOffsetRef(property->untag()->key(), WeakProperty::key_offset());
6181 s->WriteOffsetRef(property->untag()->value(),
6182 WeakProperty::value_offset());
6183 } else {
6184 s->WriteOffsetRef(Object::null(), WeakProperty::key_offset());
6185 s->WriteOffsetRef(Object::null(), WeakProperty::value_offset());
6186 }
6187 }
6188 }
6189
6190 private:
6191 GrowableArray<WeakPropertyPtr> objects_;
6192};
6193#endif // !DART_PRECOMPILED_RUNTIME
6194
6195class WeakPropertyDeserializationCluster : public DeserializationCluster {
6196 public:
6197 WeakPropertyDeserializationCluster()
6198 : DeserializationCluster("WeakProperty") {}
6199 ~WeakPropertyDeserializationCluster() {}
6200
6201 void ReadAlloc(Deserializer* d) override {
6202 ReadAllocFixedSize(d, WeakProperty::InstanceSize());
6203 }
6204
6205 void ReadFill(Deserializer* d_) override {
6206 Deserializer::Local d(d_);
6207
6208 ASSERT(!is_canonical()); // Never canonical.
6209 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6210 WeakPropertyPtr property = static_cast<WeakPropertyPtr>(d.Ref(id));
6211 Deserializer::InitializeHeader(property, kWeakPropertyCid,
6212 WeakProperty::InstanceSize());
6213 d.ReadFromTo(property);
6214 property->untag()->next_seen_by_gc_ = WeakProperty::null();
6215 }
6216 }
6217};
6218
6219#if !defined(DART_PRECOMPILED_RUNTIME)
6220class MapSerializationCluster : public SerializationCluster {
6221 public:
6222 MapSerializationCluster(bool is_canonical, intptr_t cid)
6223 : SerializationCluster("Map",
6224 cid,
6225 compiler::target::Map::InstanceSize(),
6226 is_canonical) {}
6227 ~MapSerializationCluster() {}
6228
6229 void Trace(Serializer* s, ObjectPtr object) {
6230 MapPtr map = Map::RawCast(object);
6231 // We never have mutable hashmaps in snapshots.
6232 ASSERT(map->untag()->IsCanonical());
6233 ASSERT_EQUAL(map.GetClassId(), kConstMapCid);
6234 objects_.Add(map);
6235 PushFromTo(map);
6236 }
6237
6238 void WriteAlloc(Serializer* s) {
6239 const intptr_t count = objects_.length();
6240 s->WriteUnsigned(count);
6241 for (intptr_t i = 0; i < count; i++) {
6242 MapPtr map = objects_[i];
6243 s->AssignRef(map);
6244 }
6245 }
6246
6247 void WriteFill(Serializer* s) {
6248 const intptr_t count = objects_.length();
6249 for (intptr_t i = 0; i < count; i++) {
6250 MapPtr map = objects_[i];
6251 AutoTraceObject(map);
6252 WriteFromTo(map);
6253 }
6254 }
6255
6256 private:
6257 GrowableArray<MapPtr> objects_;
6258};
6259#endif // !DART_PRECOMPILED_RUNTIME
6260
6261class MapDeserializationCluster
6262 : public AbstractInstanceDeserializationCluster {
6263 public:
6264 explicit MapDeserializationCluster(intptr_t cid,
6265 bool is_canonical,
6266 bool is_root_unit)
6267 : AbstractInstanceDeserializationCluster("Map",
6268 is_canonical,
6269 is_root_unit),
6270 cid_(cid) {}
6271 ~MapDeserializationCluster() {}
6272
6273 void ReadAlloc(Deserializer* d) override {
6274 ReadAllocFixedSize(d, Map::InstanceSize());
6275 }
6276
6277 void ReadFill(Deserializer* d_) override {
6278 Deserializer::Local d(d_);
6279
6280 const intptr_t cid = cid_;
6281 const bool mark_canonical = is_root_unit_ && is_canonical();
6282 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6283 MapPtr map = static_cast<MapPtr>(d.Ref(id));
6284 Deserializer::InitializeHeader(map, cid, Map::InstanceSize(),
6285 mark_canonical);
6286 d.ReadFromTo(map);
6287 }
6288 }
6289
6290 private:
6291 const intptr_t cid_;
6292};
6293
6294#if !defined(DART_PRECOMPILED_RUNTIME)
6295class SetSerializationCluster : public SerializationCluster {
6296 public:
6297 SetSerializationCluster(bool is_canonical, intptr_t cid)
6298 : SerializationCluster("Set",
6299 cid,
6300 compiler::target::Set::InstanceSize(),
6301 is_canonical) {}
6302 ~SetSerializationCluster() {}
6303
6304 void Trace(Serializer* s, ObjectPtr object) {
6305 SetPtr set = Set::RawCast(object);
6306 // We never have mutable hashsets in snapshots.
6307 ASSERT(set->untag()->IsCanonical());
6308 ASSERT_EQUAL(set.GetClassId(), kConstSetCid);
6309 objects_.Add(set);
6310 PushFromTo(set);
6311 }
6312
6313 void WriteAlloc(Serializer* s) {
6314 const intptr_t count = objects_.length();
6315 s->WriteUnsigned(count);
6316 for (intptr_t i = 0; i < count; i++) {
6317 SetPtr set = objects_[i];
6318 s->AssignRef(set);
6319 }
6320 }
6321
6322 void WriteFill(Serializer* s) {
6323 const intptr_t count = objects_.length();
6324 for (intptr_t i = 0; i < count; i++) {
6325 SetPtr set = objects_[i];
6326 AutoTraceObject(set);
6327 WriteFromTo(set);
6328 }
6329 }
6330
6331 private:
6332 GrowableArray<SetPtr> objects_;
6333};
6334#endif // !DART_PRECOMPILED_RUNTIME
6335
6336class SetDeserializationCluster
6337 : public AbstractInstanceDeserializationCluster {
6338 public:
6339 explicit SetDeserializationCluster(intptr_t cid,
6340 bool is_canonical,
6341 bool is_root_unit)
6342 : AbstractInstanceDeserializationCluster("Set",
6343 is_canonical,
6344 is_root_unit),
6345 cid_(cid) {}
6346 ~SetDeserializationCluster() {}
6347
6348 void ReadAlloc(Deserializer* d) override {
6349 ReadAllocFixedSize(d, Set::InstanceSize());
6350 }
6351
6352 void ReadFill(Deserializer* d_) override {
6353 Deserializer::Local d(d_);
6354
6355 const intptr_t cid = cid_;
6356 const bool mark_canonical = is_root_unit_ && is_canonical();
6357 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6358 SetPtr set = static_cast<SetPtr>(d.Ref(id));
6359 Deserializer::InitializeHeader(set, cid, Set::InstanceSize(),
6360 mark_canonical);
6361 d.ReadFromTo(set);
6362 }
6363 }
6364
6365 private:
6366 const intptr_t cid_;
6367};
6368
6369#if !defined(DART_PRECOMPILED_RUNTIME)
6370class ArraySerializationCluster : public SerializationCluster {
6371 public:
6372 ArraySerializationCluster(bool is_canonical, intptr_t cid)
6373 : SerializationCluster("Array", cid, kSizeVaries, is_canonical) {}
6374 ~ArraySerializationCluster() {}
6375
6376 void Trace(Serializer* s, ObjectPtr object) {
6377 ArrayPtr array = Array::RawCast(object);
6378 objects_.Add(array);
6379
6380 s->Push(array->untag()->type_arguments());
6381 const intptr_t length = Smi::Value(array->untag()->length());
6382 for (intptr_t i = 0; i < length; i++) {
6383 s->Push(array->untag()->element(i));
6384 }
6385 }
6386
6387#if defined(DART_PRECOMPILER)
6388 static bool IsReadOnlyCid(intptr_t cid) {
6389 switch (cid) {
6390 case kPcDescriptorsCid:
6391 case kCodeSourceMapCid:
6392 case kCompressedStackMapsCid:
6393 case kOneByteStringCid:
6394 case kTwoByteStringCid:
6395 return true;
6396 default:
6397 return false;
6398 }
6399 }
6400#endif // defined(DART_PRECOMPILER)
6401
6402 void WriteAlloc(Serializer* s) {
6403#if defined(DART_PRECOMPILER)
6404 if (FLAG_print_array_optimization_candidates) {
6405 intptr_t array_count = objects_.length();
6406 intptr_t array_count_allsmi = 0;
6407 intptr_t array_count_allro = 0;
6408 intptr_t array_count_empty = 0;
6409 intptr_t element_count = 0;
6410 intptr_t element_count_allsmi = 0;
6411 intptr_t element_count_allro = 0;
6412 for (intptr_t i = 0; i < array_count; i++) {
6413 ArrayPtr array = objects_[i];
6414 bool allsmi = true;
6415 bool allro = true;
6416 const intptr_t length = Smi::Value(array->untag()->length());
6417 for (intptr_t i = 0; i < length; i++) {
6418 ObjectPtr element = array->untag()->element(i);
6419 intptr_t cid = element->GetClassIdMayBeSmi();
6420 if (!IsReadOnlyCid(cid)) allro = false;
6421 if (cid != kSmiCid) allsmi = false;
6422 }
6423 element_count += length;
6424 if (length == 0) {
6425 array_count_empty++;
6426 } else if (allsmi) {
6427 array_count_allsmi++;
6428 element_count_allsmi += length;
6429 } else if (allro) {
6430 array_count_allro++;
6431 element_count_allro += length;
6432 }
6433 }
6434 OS::PrintErr("Arrays\n");
6435 OS::PrintErr(" total: %" Pd ", % " Pd " elements\n", array_count,
6436 element_count);
6437 OS::PrintErr(" smi-only:%" Pd ", % " Pd " elements\n",
6438 array_count_allsmi, element_count_allsmi);
6439 OS::PrintErr(" ro-only:%" Pd " , % " Pd " elements\n", array_count_allro,
6440 element_count_allro);
6441 OS::PrintErr(" empty:%" Pd "\n", array_count_empty);
6442 }
6443#endif // defined(DART_PRECOMPILER)
6444
6445 const intptr_t count = objects_.length();
6446 s->WriteUnsigned(count);
6447 for (intptr_t i = 0; i < count; i++) {
6448 ArrayPtr array = objects_[i];
6449 s->AssignRef(array);
6450 AutoTraceObject(array);
6451 const intptr_t length = Smi::Value(array->untag()->length());
6452 s->WriteUnsigned(length);
6453 target_memory_size_ += compiler::target::Array::InstanceSize(length);
6454 }
6455 }
6456
6457 void WriteFill(Serializer* s) {
6458 const intptr_t count = objects_.length();
6459 for (intptr_t i = 0; i < count; i++) {
6460 ArrayPtr array = objects_[i];
6461 AutoTraceObject(array);
6462 const intptr_t length = Smi::Value(array->untag()->length());
6463 s->WriteUnsigned(length);
6464 WriteCompressedField(array, type_arguments);
6465 for (intptr_t j = 0; j < length; j++) {
6466 s->WriteElementRef(array->untag()->element(j), j);
6467 }
6468 }
6469 }
6470
6471 private:
6472 GrowableArray<ArrayPtr> objects_;
6473};
6474#endif // !DART_PRECOMPILED_RUNTIME
6475
6476class ArrayDeserializationCluster
6477 : public AbstractInstanceDeserializationCluster {
6478 public:
6479 explicit ArrayDeserializationCluster(intptr_t cid,
6480 bool is_canonical,
6481 bool is_root_unit)
6482 : AbstractInstanceDeserializationCluster("Array",
6483 is_canonical,
6484 is_root_unit),
6485 cid_(cid) {}
6486 ~ArrayDeserializationCluster() {}
6487
6488 void ReadAlloc(Deserializer* d) override {
6489 start_index_ = d->next_index();
6490 const intptr_t count = d->ReadUnsigned();
6491 for (intptr_t i = 0; i < count; i++) {
6492 const intptr_t length = d->ReadUnsigned();
6493 d->AssignRef(d->Allocate(Array::InstanceSize(length)));
6494 }
6495 stop_index_ = d->next_index();
6496 }
6497
6498 void ReadFill(Deserializer* d_) override {
6499 Deserializer::Local d(d_);
6500
6501 const intptr_t cid = cid_;
6502 const bool stamp_canonical = is_root_unit_ && is_canonical();
6503 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6504 ArrayPtr array = static_cast<ArrayPtr>(d.Ref(id));
6505 const intptr_t length = d.ReadUnsigned();
6506 Deserializer::InitializeHeader(array, cid, Array::InstanceSize(length),
6507 stamp_canonical);
6508 if (Array::UseCardMarkingForAllocation(length)) {
6509 array->untag()->SetCardRememberedBitUnsynchronized();
6510 }
6511 array->untag()->type_arguments_ =
6512 static_cast<TypeArgumentsPtr>(d.ReadRef());
6513 array->untag()->length_ = Smi::New(length);
6514 for (intptr_t j = 0; j < length; j++) {
6515 array->untag()->data()[j] = d.ReadRef();
6516 }
6517 }
6518 }
6519
6520 private:
6521 const intptr_t cid_;
6522};
6523
6524#if !defined(DART_PRECOMPILED_RUNTIME)
6525class WeakArraySerializationCluster : public SerializationCluster {
6526 public:
6527 WeakArraySerializationCluster()
6528 : SerializationCluster("WeakArray", kWeakArrayCid, kSizeVaries) {}
6529 ~WeakArraySerializationCluster() {}
6530
6531 void Trace(Serializer* s, ObjectPtr object) {
6532 WeakArrayPtr array = WeakArray::RawCast(object);
6533 objects_.Add(array);
6534
6535 const intptr_t length = Smi::Value(array->untag()->length());
6536 for (intptr_t i = 0; i < length; i++) {
6537 s->PushWeak(array->untag()->element(i));
6538 }
6539 }
6540
6541 void WriteAlloc(Serializer* s) {
6542 const intptr_t count = objects_.length();
6543 s->WriteUnsigned(count);
6544 for (intptr_t i = 0; i < count; i++) {
6545 WeakArrayPtr array = objects_[i];
6546 s->AssignRef(array);
6547 AutoTraceObject(array);
6548 const intptr_t length = Smi::Value(array->untag()->length());
6549 s->WriteUnsigned(length);
6550 target_memory_size_ += compiler::target::WeakArray::InstanceSize(length);
6551 }
6552 }
6553
6554 void WriteFill(Serializer* s) {
6555 const intptr_t count = objects_.length();
6556 for (intptr_t i = 0; i < count; i++) {
6557 WeakArrayPtr array = objects_[i];
6558 AutoTraceObject(array);
6559 const intptr_t length = Smi::Value(array->untag()->length());
6560 s->WriteUnsigned(length);
6561 for (intptr_t j = 0; j < length; j++) {
6562 if (s->HasRef(array->untag()->element(j))) {
6563 s->WriteElementRef(array->untag()->element(j), j);
6564 } else {
6565 s->WriteElementRef(Object::null(), j);
6566 }
6567 }
6568 }
6569 }
6570
6571 private:
6572 GrowableArray<WeakArrayPtr> objects_;
6573};
6574#endif // !DART_PRECOMPILED_RUNTIME
6575
6576class WeakArrayDeserializationCluster : public DeserializationCluster {
6577 public:
6578 WeakArrayDeserializationCluster() : DeserializationCluster("WeakArray") {}
6579 ~WeakArrayDeserializationCluster() {}
6580
6581 void ReadAlloc(Deserializer* d) override {
6582 start_index_ = d->next_index();
6583 const intptr_t count = d->ReadUnsigned();
6584 for (intptr_t i = 0; i < count; i++) {
6585 const intptr_t length = d->ReadUnsigned();
6586 d->AssignRef(d->Allocate(WeakArray::InstanceSize(length)));
6587 }
6588 stop_index_ = d->next_index();
6589 }
6590
6591 void ReadFill(Deserializer* d_) override {
6592 Deserializer::Local d(d_);
6593
6594 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6595 WeakArrayPtr array = static_cast<WeakArrayPtr>(d.Ref(id));
6596 const intptr_t length = d.ReadUnsigned();
6597 Deserializer::InitializeHeader(array, kWeakArrayCid,
6598 WeakArray::InstanceSize(length), false);
6599 array->untag()->next_seen_by_gc_ = WeakArray::null();
6600 array->untag()->length_ = Smi::New(length);
6601 for (intptr_t j = 0; j < length; j++) {
6602 array->untag()->data()[j] = d.ReadRef();
6603 }
6604 }
6605 }
6606};
6607
6608#if !defined(DART_PRECOMPILED_RUNTIME)
6609class StringSerializationCluster
6610 : public CanonicalSetSerializationCluster<CanonicalStringSet,
6611 String,
6612 StringPtr> {
6613 public:
6614 // To distinguish one and two byte strings, we put a bit in the length to
6615 // indicate which it is. The length is an unsigned SMI, so we actually have
6616 // two spare bits available. Keep in sync with DecodeLengthAndCid.
6617 static intptr_t EncodeLengthAndCid(intptr_t length, intptr_t cid) {
6618 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
6619 ASSERT(length <= compiler::target::kSmiMax);
6620 return (length << 1) | (cid == kTwoByteStringCid ? 0x1 : 0x0);
6621 }
6622
6623 explicit StringSerializationCluster(bool is_canonical,
6624 bool represents_canonical_set)
6625 : CanonicalSetSerializationCluster(kStringCid,
6626 is_canonical,
6627 represents_canonical_set,
6628 "String",
6629 kSizeVaries) {}
6630 ~StringSerializationCluster() {}
6631
6632 void Trace(Serializer* s, ObjectPtr object) {
6633 StringPtr str = static_cast<StringPtr>(object);
6634 objects_.Add(str);
6635 }
6636
6637 void WriteAlloc(Serializer* s) {
6638 const intptr_t count = objects_.length();
6639 s->WriteUnsigned(count);
6640 ReorderObjects(s);
6641 for (intptr_t i = 0; i < count; i++) {
6642 StringPtr str = objects_[i];
6643 s->AssignRef(str);
6644 AutoTraceObject(str);
6645 const intptr_t cid = str->GetClassId();
6646 const intptr_t length = Smi::Value(str->untag()->length());
6647 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6648 s->WriteUnsigned(encoded);
6649 target_memory_size_ +=
6650 cid == kOneByteStringCid
6651 ? compiler::target::OneByteString::InstanceSize(length)
6652 : compiler::target::TwoByteString::InstanceSize(length);
6653 }
6654 WriteCanonicalSetLayout(s);
6655 }
6656
6657 void WriteFill(Serializer* s) {
6658 const intptr_t count = objects_.length();
6659 for (intptr_t i = 0; i < count; i++) {
6660 StringPtr str = objects_[i];
6661 AutoTraceObject(str);
6662 const intptr_t cid = str->GetClassId();
6663 const intptr_t length = Smi::Value(str->untag()->length());
6664 const intptr_t encoded = EncodeLengthAndCid(length, cid);
6665 s->WriteUnsigned(encoded);
6666 if (cid == kOneByteStringCid) {
6667 s->WriteBytes(static_cast<OneByteStringPtr>(str)->untag()->data(),
6668 length);
6669 } else {
6670 s->WriteBytes(reinterpret_cast<uint8_t*>(
6671 static_cast<TwoByteStringPtr>(str)->untag()->data()),
6672 length * 2);
6673 }
6674 }
6675 }
6676};
6677#endif // !DART_PRECOMPILED_RUNTIME
6678
6679class StringDeserializationCluster
6680 : public CanonicalSetDeserializationCluster<CanonicalStringSet> {
6681 public:
6682 static intptr_t DecodeLengthAndCid(intptr_t encoded, intptr_t* out_cid) {
6683 *out_cid = (encoded & 0x1) != 0 ? kTwoByteStringCid : kOneByteStringCid;
6684 return encoded >> 1;
6685 }
6686
6687 static intptr_t InstanceSize(intptr_t length, intptr_t cid) {
6688 return cid == kOneByteStringCid ? OneByteString::InstanceSize(length)
6689 : TwoByteString::InstanceSize(length);
6690 }
6691
6692 explicit StringDeserializationCluster(bool is_canonical, bool is_root_unit)
6693 : CanonicalSetDeserializationCluster(is_canonical,
6694 is_root_unit,
6695 "String") {}
6696 ~StringDeserializationCluster() {}
6697
6698 void ReadAlloc(Deserializer* d) override {
6699 start_index_ = d->next_index();
6700 const intptr_t count = d->ReadUnsigned();
6701 for (intptr_t i = 0; i < count; i++) {
6702 const intptr_t encoded = d->ReadUnsigned();
6703 intptr_t cid = 0;
6704 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6705 d->AssignRef(d->Allocate(InstanceSize(length, cid)));
6706 }
6707 stop_index_ = d->next_index();
6708 BuildCanonicalSetFromLayout(d);
6709 }
6710
6711 void ReadFill(Deserializer* d_) override {
6712 Deserializer::Local d(d_);
6713
6714 for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
6715 StringPtr str = static_cast<StringPtr>(d.Ref(id));
6716 const intptr_t encoded = d.ReadUnsigned();
6717 intptr_t cid = 0;
6718 const intptr_t length = DecodeLengthAndCid(encoded, &cid);
6719 const intptr_t instance_size = InstanceSize(length, cid);
6720 // Clean up last two words of the string object to simplify future
6721 // string comparisons.
6722 // Objects are rounded up to two-word size boundary.
6723 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6724 instance_size - 1 * kWordSize) = 0;
6725 *reinterpret_cast<word*>(reinterpret_cast<uint8_t*>(str->untag()) +
6726 instance_size - 2 * kWordSize) = 0;
6727 Deserializer::InitializeHeader(str, cid, instance_size, is_canonical());
6728#if DART_COMPRESSED_POINTERS
6729 // Gap caused by less-than-a-word length_ smi sitting before data_.
6730 const intptr_t length_offset =
6731 reinterpret_cast<intptr_t>(&str->untag()->length_);
6732 const intptr_t data_offset =
6733 cid == kOneByteStringCid
6734 ? reinterpret_cast<intptr_t>(
6735 static_cast<OneByteStringPtr>(str)->untag()->data())
6736 : reinterpret_cast<intptr_t>(
6737 static_cast<TwoByteStringPtr>(str)->untag()->data());
6738 const intptr_t length_with_gap = data_offset - length_offset;
6739 ASSERT(length_with_gap > kCompressedWordSize);
6740 ASSERT(length_with_gap == kWordSize);
6741 memset(reinterpret_cast<void*>(length_offset), 0, length_with_gap);
6742#endif
6743 str->untag()->length_ = Smi::New(length);
6744
6745 StringHasher hasher;
6746 if (cid == kOneByteStringCid) {
6747 for (intptr_t j = 0; j < length; j++) {
6748 uint8_t code_unit = d.Read<uint8_t>();
6749 static_cast<OneByteStringPtr>(str)->untag()->data()[j] = code_unit;
6750 hasher.Add(code_unit);
6751 }
6752
6753 } else {
6754 for (intptr_t j = 0; j < length; j++) {
6755 uint16_t code_unit = d.Read<uint8_t>();
6756 code_unit = code_unit | (d.Read<uint8_t>() << 8);
6757 static_cast<TwoByteStringPtr>(str)->untag()->data()[j] = code_unit;
6758 hasher.Add(code_unit);
6759 }
6760 }
6761 String::SetCachedHash(str, hasher.Finalize());
6762 }
6763 }
6764
6765 void PostLoad(Deserializer* d, const Array& refs) override {
6766 if (!table_.IsNull()) {
6767 auto object_store = d->isolate_group()->object_store();
6768 VerifyCanonicalSet(d, refs,
6769 WeakArray::Handle(object_store->symbol_table()));
6770 object_store->set_symbol_table(table_);
6771 if (d->isolate_group() == Dart::vm_isolate_group()) {
6772 Symbols::InitFromSnapshot(d->isolate_group());
6773 }
6774#if defined(DEBUG)
6775 Symbols::New(Thread::Current(), ":some:new:symbol:");
6776 ASSERT(object_store->symbol_table() == table_.ptr()); // Did not rehash.
6777#endif
6778 }
6779 }
6780};
6781
6782#if !defined(DART_PRECOMPILED_RUNTIME)
6783class FakeSerializationCluster : public SerializationCluster {
6784 public:
6785 FakeSerializationCluster(const char* name,
6786 intptr_t num_objects,
6787 intptr_t size,
6788 intptr_t target_memory_size = 0)
6789 : SerializationCluster(name, -1) {
6790 num_objects_ = num_objects;
6791 size_ = size;
6792 target_memory_size_ = target_memory_size;
6793 }
6794 ~FakeSerializationCluster() {}
6795
6796 void Trace(Serializer* s, ObjectPtr object) { UNREACHABLE(); }
6797 void WriteAlloc(Serializer* s) { UNREACHABLE(); }
6798 void WriteFill(Serializer* s) { UNREACHABLE(); }
6799};
6800#endif // !DART_PRECOMPILED_RUNTIME
6801
6802#if !defined(DART_PRECOMPILED_RUNTIME)
6803class VMSerializationRoots : public SerializationRoots {
6804 public:
6805 explicit VMSerializationRoots(const WeakArray& symbols,
6806 bool should_write_symbols)
6807 : symbols_(symbols),
6808 should_write_symbols_(should_write_symbols),
6809 zone_(Thread::Current()->zone()) {}
6810
6811 void AddBaseObjects(Serializer* s) {
6812 // These objects are always allocated by Object::InitOnce, so they are not
6813 // written into the snapshot.
6814
6815 s->AddBaseObject(Object::null(), "Null", "null");
6816 s->AddBaseObject(Object::sentinel().ptr(), "Null", "sentinel");
6817 s->AddBaseObject(Object::transition_sentinel().ptr(), "Null",
6818 "transition_sentinel");
6819 s->AddBaseObject(Object::optimized_out().ptr(), "Null", "<optimized out>");
6820 s->AddBaseObject(Object::empty_array().ptr(), "Array", "<empty_array>");
6821 s->AddBaseObject(Object::empty_instantiations_cache_array().ptr(), "Array",
6822 "<empty_instantiations_cache_array>");
6823 s->AddBaseObject(Object::empty_subtype_test_cache_array().ptr(), "Array",
6824 "<empty_subtype_test_cache_array>");
6825 s->AddBaseObject(Object::dynamic_type().ptr(), "Type", "<dynamic type>");
6826 s->AddBaseObject(Object::void_type().ptr(), "Type", "<void type>");
6827 s->AddBaseObject(Object::empty_type_arguments().ptr(), "TypeArguments",
6828 "[]");
6829 s->AddBaseObject(Bool::True().ptr(), "bool", "true");
6830 s->AddBaseObject(Bool::False().ptr(), "bool", "false");
6831 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6832 s->AddBaseObject(Object::synthetic_getter_parameter_types().ptr(), "Array",
6833 "<synthetic getter parameter types>");
6834 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6835 s->AddBaseObject(Object::synthetic_getter_parameter_names().ptr(), "Array",
6836 "<synthetic getter parameter names>");
6837 s->AddBaseObject(Object::empty_context_scope().ptr(), "ContextScope",
6838 "<empty>");
6839 s->AddBaseObject(Object::empty_object_pool().ptr(), "ObjectPool",
6840 "<empty>");
6841 s->AddBaseObject(Object::empty_compressed_stackmaps().ptr(),
6842 "CompressedStackMaps", "<empty>");
6843 s->AddBaseObject(Object::empty_descriptors().ptr(), "PcDescriptors",
6844 "<empty>");
6845 s->AddBaseObject(Object::empty_var_descriptors().ptr(),
6846 "LocalVarDescriptors", "<empty>");
6847 s->AddBaseObject(Object::empty_exception_handlers().ptr(),
6848 "ExceptionHandlers", "<empty>");
6849 s->AddBaseObject(Object::empty_async_exception_handlers().ptr(),
6850 "ExceptionHandlers", "<empty async>");
6851
6852 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6853 s->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i],
6854 "ArgumentsDescriptor", "<cached arguments descriptor>");
6855 }
6856 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6857 s->AddBaseObject(ICData::cached_icdata_arrays_[i], "Array",
6858 "<empty icdata entries>");
6859 }
6860
6861 ClassTable* table = s->isolate_group()->class_table();
6862 for (intptr_t cid = kFirstInternalOnlyCid; cid <= kLastInternalOnlyCid;
6863 cid++) {
6864 // Error, CallSiteData has no class object.
6865 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6866 ASSERT(table->HasValidClassAt(cid));
6867 s->AddBaseObject(
6868 table->At(cid), "Class",
6869 Class::Handle(table->At(cid))
6870 .NameCString(Object::NameVisibility::kInternalName));
6871 }
6872 }
6873 s->AddBaseObject(table->At(kDynamicCid), "Class", "dynamic");
6874 s->AddBaseObject(table->At(kVoidCid), "Class", "void");
6875
6876 if (!Snapshot::IncludesCode(s->kind())) {
6877 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6878 s->AddBaseObject(StubCode::EntryAt(i).ptr());
6879 }
6880 }
6881 }
6882
6883 void PushRoots(Serializer* s) {
6884 if (should_write_symbols_) {
6885 s->Push(symbols_.ptr());
6886 } else {
6887 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6888 s->Push(symbols_.At(i));
6889 }
6890 }
6891 if (Snapshot::IncludesCode(s->kind())) {
6892 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6893 s->Push(StubCode::EntryAt(i).ptr());
6894 }
6895 }
6896 }
6897
6898 void WriteRoots(Serializer* s) {
6899 s->WriteRootRef(should_write_symbols_ ? symbols_.ptr() : Object::null(),
6900 "symbol-table");
6901 if (Snapshot::IncludesCode(s->kind())) {
6902 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6903 s->WriteRootRef(StubCode::EntryAt(i).ptr(),
6904 zone_->PrintToString("Stub:%s", StubCode::NameAt(i)));
6905 }
6906 }
6907
6908 if (!should_write_symbols_ && s->profile_writer() != nullptr) {
6909 // If writing V8 snapshot profile create an artificial node representing
6910 // VM isolate symbol table.
6911 ASSERT(!s->IsReachable(symbols_.ptr()));
6912 s->AssignArtificialRef(symbols_.ptr());
6913 const auto& symbols_snapshot_id = s->GetProfileId(symbols_.ptr());
6914 s->profile_writer()->SetObjectTypeAndName(symbols_snapshot_id, "Symbols",
6915 "vm_symbols");
6916 s->profile_writer()->AddRoot(symbols_snapshot_id);
6917 for (intptr_t i = 0; i < symbols_.Length(); i++) {
6918 s->profile_writer()->AttributeReferenceTo(
6919 symbols_snapshot_id, V8SnapshotProfileWriter::Reference::Element(i),
6920 s->GetProfileId(symbols_.At(i)));
6921 }
6922 }
6923 }
6924
6925 private:
6926 const WeakArray& symbols_;
6927 const bool should_write_symbols_;
6928 Zone* zone_;
6929};
6930#endif // !DART_PRECOMPILED_RUNTIME
6931
6932class VMDeserializationRoots : public DeserializationRoots {
6933 public:
6934 VMDeserializationRoots() : symbol_table_(WeakArray::Handle()) {}
6935
6936 void AddBaseObjects(Deserializer* d) override {
6937 // These objects are always allocated by Object::InitOnce, so they are not
6938 // written into the snapshot.
6939
6940 d->AddBaseObject(Object::null());
6941 d->AddBaseObject(Object::sentinel().ptr());
6942 d->AddBaseObject(Object::transition_sentinel().ptr());
6943 d->AddBaseObject(Object::optimized_out().ptr());
6944 d->AddBaseObject(Object::empty_array().ptr());
6945 d->AddBaseObject(Object::empty_instantiations_cache_array().ptr());
6946 d->AddBaseObject(Object::empty_subtype_test_cache_array().ptr());
6947 d->AddBaseObject(Object::dynamic_type().ptr());
6948 d->AddBaseObject(Object::void_type().ptr());
6949 d->AddBaseObject(Object::empty_type_arguments().ptr());
6950 d->AddBaseObject(Bool::True().ptr());
6951 d->AddBaseObject(Bool::False().ptr());
6952 ASSERT(Object::synthetic_getter_parameter_types().ptr() != Object::null());
6953 d->AddBaseObject(Object::synthetic_getter_parameter_types().ptr());
6954 ASSERT(Object::synthetic_getter_parameter_names().ptr() != Object::null());
6955 d->AddBaseObject(Object::synthetic_getter_parameter_names().ptr());
6956 d->AddBaseObject(Object::empty_context_scope().ptr());
6957 d->AddBaseObject(Object::empty_object_pool().ptr());
6958 d->AddBaseObject(Object::empty_compressed_stackmaps().ptr());
6959 d->AddBaseObject(Object::empty_descriptors().ptr());
6960 d->AddBaseObject(Object::empty_var_descriptors().ptr());
6961 d->AddBaseObject(Object::empty_exception_handlers().ptr());
6962 d->AddBaseObject(Object::empty_async_exception_handlers().ptr());
6963
6964 for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
6965 d->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i]);
6966 }
6967 for (intptr_t i = 0; i < ICData::kCachedICDataArrayCount; i++) {
6968 d->AddBaseObject(ICData::cached_icdata_arrays_[i]);
6969 }
6970
6971 ClassTable* table = d->isolate_group()->class_table();
6972 for (intptr_t cid = kFirstInternalOnlyCid; cid <= kLastInternalOnlyCid;
6973 cid++) {
6974 // Error, CallSiteData has no class object.
6975 if (cid != kErrorCid && cid != kCallSiteDataCid) {
6976 ASSERT(table->HasValidClassAt(cid));
6977 d->AddBaseObject(table->At(cid));
6978 }
6979 }
6980 d->AddBaseObject(table->At(kDynamicCid));
6981 d->AddBaseObject(table->At(kVoidCid));
6982
6983 if (!Snapshot::IncludesCode(d->kind())) {
6984 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6985 d->AddBaseObject(StubCode::EntryAt(i).ptr());
6986 }
6987 }
6988 }
6989
6990 void ReadRoots(Deserializer* d) override {
6991 symbol_table_ ^= d->ReadRef();
6992 if (!symbol_table_.IsNull()) {
6993 d->isolate_group()->object_store()->set_symbol_table(symbol_table_);
6994 }
6995 if (Snapshot::IncludesCode(d->kind())) {
6996 for (intptr_t i = 0; i < StubCode::NumEntries(); i++) {
6997 Code* code = Code::ReadOnlyHandle();
6998 *code ^= d->ReadRef();
6999 StubCode::EntryAtPut(i, code);
7000 }
7001 StubCode::InitializationDone();
7002 }
7003 }
7004
7005 void PostLoad(Deserializer* d, const Array& refs) override {
7006 // Move remaining bump allocation space to the freelist so it used by C++
7007 // allocations (e.g., FinalizeVMIsolate) before allocating new pages.
7008 d->heap()->old_space()->ReleaseBumpAllocation();
7009
7010 if (!symbol_table_.IsNull()) {
7011 Symbols::InitFromSnapshot(d->isolate_group());
7012 }
7013
7014 Object::set_vm_isolate_snapshot_object_table(refs);
7015 }
7016
7017 private:
7018 WeakArray& symbol_table_;
7019};
7020
7021#if !defined(DART_PRECOMPILED_RUNTIME)
7022static const char* const kObjectStoreFieldNames[] = {
7023#define DECLARE_OBJECT_STORE_FIELD(Type, Name) #Name,
7033#undef DECLARE_OBJECT_STORE_FIELD
7034};
7035
7036class ProgramSerializationRoots : public SerializationRoots {
7037 public:
7038#define RESET_ROOT_LIST(V) \
7039 V(symbol_table, WeakArray, HashTables::New<CanonicalStringSet>(4)) \
7040 V(canonical_types, Array, HashTables::New<CanonicalTypeSet>(4)) \
7041 V(canonical_function_types, Array, \
7042 HashTables::New<CanonicalFunctionTypeSet>(4)) \
7043 V(canonical_record_types, Array, HashTables::New<CanonicalRecordTypeSet>(4)) \
7044 V(canonical_type_arguments, Array, \
7045 HashTables::New<CanonicalTypeArgumentsSet>(4)) \
7046 V(canonical_type_parameters, Array, \
7047 HashTables::New<CanonicalTypeParameterSet>(4)) \
7048 ONLY_IN_PRODUCT(ONLY_IN_AOT( \
7049 V(closure_functions, GrowableObjectArray, GrowableObjectArray::null()))) \
7050 ONLY_IN_AOT(V(closure_functions_table, Array, Array::null())) \
7051 ONLY_IN_AOT(V(canonicalized_stack_map_entries, CompressedStackMaps, \
7052 CompressedStackMaps::null()))
7053
7054 ProgramSerializationRoots(ZoneGrowableArray<Object*>* base_objects,
7055 ObjectStore* object_store,
7056 Snapshot::Kind snapshot_kind)
7057 : base_objects_(base_objects),
7058 object_store_(object_store),
7059 snapshot_kind_(snapshot_kind) {
7060#define ONLY_IN_AOT(code) \
7061 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7062 code \
7063 }
7064#define SAVE_AND_RESET_ROOT(name, Type, init) \
7065 do { \
7066 saved_##name##_ = object_store->name(); \
7067 object_store->set_##name(Type::Handle(init)); \
7068 } while (0);
7069
7071#undef SAVE_AND_RESET_ROOT
7072#undef ONLY_IN_AOT
7073 }
7074 ~ProgramSerializationRoots() {
7075#define ONLY_IN_AOT(code) \
7076 if (snapshot_kind_ == Snapshot::kFullAOT) { \
7077 code \
7078 }
7079#define RESTORE_ROOT(name, Type, init) \
7080 object_store_->set_##name(saved_##name##_);
7082#undef RESTORE_ROOT
7083#undef ONLY_IN_AOT
7084 }
7085
7086 void AddBaseObjects(Serializer* s) {
7087 if (base_objects_ == nullptr) {
7088 // Not writing a new vm isolate: use the one this VM was loaded from.
7089 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7090 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7091 s->AddBaseObject(base_objects.At(i));
7092 }
7093 } else {
7094 // Base objects carried over from WriteVMSnapshot.
7095 for (intptr_t i = 0; i < base_objects_->length(); i++) {
7096 s->AddBaseObject((*base_objects_)[i]->ptr());
7097 }
7098 }
7099 }
7100
7101 void PushRoots(Serializer* s) {
7102 ObjectPtr* from = object_store_->from();
7103 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7104 for (ObjectPtr* p = from; p <= to; p++) {
7105 s->Push(*p);
7106 }
7107
7108 FieldTable* initial_field_table =
7109 s->thread()->isolate_group()->initial_field_table();
7110 for (intptr_t i = 0, n = initial_field_table->NumFieldIds(); i < n; i++) {
7111 s->Push(initial_field_table->At(i));
7112 }
7113
7114 dispatch_table_entries_ = object_store_->dispatch_table_code_entries();
7115 // We should only have a dispatch table in precompiled mode.
7116 ASSERT(dispatch_table_entries_.IsNull() || s->kind() == Snapshot::kFullAOT);
7117
7118#if defined(DART_PRECOMPILER)
7119 // We treat the dispatch table as a root object and trace the Code objects
7120 // it references. Otherwise, a non-empty entry could be invalid on
7121 // deserialization if the corresponding Code object was not reachable from
7122 // the existing snapshot roots.
7123 if (!dispatch_table_entries_.IsNull()) {
7124 for (intptr_t i = 0; i < dispatch_table_entries_.Length(); i++) {
7125 s->Push(dispatch_table_entries_.At(i));
7126 }
7127 }
7128#endif
7129 }
7130
7131 void WriteRoots(Serializer* s) {
7132 ObjectPtr* from = object_store_->from();
7133 ObjectPtr* to = object_store_->to_snapshot(s->kind());
7134 for (ObjectPtr* p = from; p <= to; p++) {
7135 s->WriteRootRef(*p, kObjectStoreFieldNames[p - from]);
7136 }
7137
7138 FieldTable* initial_field_table =
7139 s->thread()->isolate_group()->initial_field_table();
7140 intptr_t n = initial_field_table->NumFieldIds();
7141 s->WriteUnsigned(n);
7142 for (intptr_t i = 0; i < n; i++) {
7143 s->WriteRootRef(initial_field_table->At(i), "some-static-field");
7144 }
7145
7146 // The dispatch table is serialized only for precompiled snapshots.
7147 s->WriteDispatchTable(dispatch_table_entries_);
7148 }
7149
7150 virtual const CompressedStackMaps& canonicalized_stack_map_entries() const {
7151 return saved_canonicalized_stack_map_entries_;
7152 }
7153
7154 private:
7155 ZoneGrowableArray<Object*>* const base_objects_;
7156 ObjectStore* const object_store_;
7157 const Snapshot::Kind snapshot_kind_;
7158 Array& dispatch_table_entries_ = Array::Handle();
7159
7160#define ONLY_IN_AOT(code) code
7161#define DECLARE_FIELD(name, Type, init) Type& saved_##name##_ = Type::Handle();
7163#undef DECLARE_FIELD
7164#undef ONLY_IN_AOT
7165};
7166#endif // !DART_PRECOMPILED_RUNTIME
7167
7168class ProgramDeserializationRoots : public DeserializationRoots {
7169 public:
7170 explicit ProgramDeserializationRoots(ObjectStore* object_store)
7171 : object_store_(object_store) {}
7172
7173 void AddBaseObjects(Deserializer* d) override {
7174 // N.B.: Skipping index 0 because ref 0 is illegal.
7175 const Array& base_objects = Object::vm_isolate_snapshot_object_table();
7176 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7177 d->AddBaseObject(base_objects.At(i));
7178 }
7179 }
7180
7181 void ReadRoots(Deserializer* d) override {
7182 // Read roots.
7183 ObjectPtr* from = object_store_->from();
7184 ObjectPtr* to = object_store_->to_snapshot(d->kind());
7185 for (ObjectPtr* p = from; p <= to; p++) {
7186 *p = d->ReadRef();
7187 }
7188
7189 FieldTable* initial_field_table =
7190 d->thread()->isolate_group()->initial_field_table();
7191 intptr_t n = d->ReadUnsigned();
7192 initial_field_table->AllocateIndex(n - 1);
7193 for (intptr_t i = 0; i < n; i++) {
7194 initial_field_table->SetAt(i, d->ReadRef());
7195 }
7196
7197 // Deserialize dispatch table (when applicable)
7198 d->ReadDispatchTable();
7199 }
7200
7201 void PostLoad(Deserializer* d, const Array& refs) override {
7202 auto isolate_group = d->isolate_group();
7203 { isolate_group->class_table()->CopySizesFromClassObjects(); }
7204 d->heap()->old_space()->EvaluateAfterLoading();
7205
7206 auto object_store = isolate_group->object_store();
7207 const Array& units = Array::Handle(object_store->loading_units());
7208 if (!units.IsNull()) {
7209 LoadingUnit& unit = LoadingUnit::Handle();
7210 unit ^= units.At(LoadingUnit::kRootId);
7211 unit.set_base_objects(refs);
7212 }
7213
7214 // Setup native resolver for bootstrap impl.
7215 Bootstrap::SetupNativeResolver();
7216 }
7217
7218 private:
7219 ObjectStore* object_store_;
7220};
7221
7222#if !defined(DART_PRECOMPILED_RUNTIME)
7223class UnitSerializationRoots : public SerializationRoots {
7224 public:
7225 explicit UnitSerializationRoots(LoadingUnitSerializationData* unit)
7226 : unit_(unit) {}
7227
7228 void AddBaseObjects(Serializer* s) {
7229 ZoneGrowableArray<Object*>* objects = unit_->parent()->objects();
7230 for (intptr_t i = 0; i < objects->length(); i++) {
7231 s->AddBaseObject(objects->At(i)->ptr());
7232 }
7233 }
7234
7235 void PushRoots(Serializer* s) {
7236 for (auto deferred_object : *unit_->deferred_objects()) {
7237 ASSERT(deferred_object->IsCode());
7238 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7239 ObjectPoolPtr pool = code->untag()->object_pool_;
7240 if (pool != ObjectPool::null()) {
7241 const intptr_t length = pool->untag()->length_;
7242 uint8_t* entry_bits = pool->untag()->entry_bits();
7243 for (intptr_t i = 0; i < length; i++) {
7244 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7245 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7246 s->Push(pool->untag()->data()[i].raw_obj_);
7247 }
7248 }
7249 }
7250 s->Push(code->untag()->code_source_map_);
7251 }
7252 }
7253
7254 void WriteRoots(Serializer* s) {
7255#if defined(DART_PRECOMPILER)
7256 intptr_t start_index = 0;
7257 intptr_t num_deferred_objects = unit_->deferred_objects()->length();
7258 if (num_deferred_objects != 0) {
7259 start_index = s->RefId(unit_->deferred_objects()->At(0)->ptr());
7260 ASSERT(start_index > 0);
7261 }
7262 s->WriteUnsigned(start_index);
7263 s->WriteUnsigned(num_deferred_objects);
7264 for (intptr_t i = 0; i < num_deferred_objects; i++) {
7265 const Object* deferred_object = (*unit_->deferred_objects())[i];
7266 ASSERT(deferred_object->IsCode());
7267 CodePtr code = static_cast<CodePtr>(deferred_object->ptr());
7268 ASSERT(s->RefId(code) == (start_index + i));
7269 ASSERT(!Code::IsDiscarded(code));
7270 s->WriteInstructions(code->untag()->instructions_,
7271 code->untag()->unchecked_offset_, code, false);
7272 s->WriteRootRef(code->untag()->code_source_map_, "deferred-code");
7273 }
7274
7275 ObjectPoolPtr pool =
7276 s->isolate_group()->object_store()->global_object_pool();
7277 const intptr_t length = pool->untag()->length_;
7278 uint8_t* entry_bits = pool->untag()->entry_bits();
7279 intptr_t last_write = 0;
7280 for (intptr_t i = 0; i < length; i++) {
7281 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7282 if (entry_type == ObjectPool::EntryType::kTaggedObject) {
7283 if (s->IsWritten(pool->untag()->data()[i].raw_obj_)) {
7284 intptr_t skip = i - last_write;
7285 s->WriteUnsigned(skip);
7286 s->WriteRootRef(pool->untag()->data()[i].raw_obj_,
7287 "deferred-literal");
7288 last_write = i;
7289 }
7290 }
7291 }
7292 s->WriteUnsigned(length - last_write);
7293#endif
7294 }
7295
7296 private:
7297 LoadingUnitSerializationData* unit_;
7298};
7299#endif // !DART_PRECOMPILED_RUNTIME
7300
7301class UnitDeserializationRoots : public DeserializationRoots {
7302 public:
7303 explicit UnitDeserializationRoots(const LoadingUnit& unit) : unit_(unit) {}
7304
7305 void AddBaseObjects(Deserializer* d) override {
7306 const Array& base_objects =
7307 Array::Handle(LoadingUnit::Handle(unit_.parent()).base_objects());
7308 for (intptr_t i = kFirstReference; i < base_objects.Length(); i++) {
7309 d->AddBaseObject(base_objects.At(i));
7310 }
7311 }
7312
7313 void ReadRoots(Deserializer* d) override {
7314 deferred_start_index_ = d->ReadUnsigned();
7315 deferred_stop_index_ = deferred_start_index_ + d->ReadUnsigned();
7316 for (intptr_t id = deferred_start_index_; id < deferred_stop_index_; id++) {
7317 CodePtr code = static_cast<CodePtr>(d->Ref(id));
7318 ASSERT(!Code::IsUnknownDartCode(code));
7319 d->ReadInstructions(code, /*deferred=*/false);
7320 if (code->untag()->owner_->IsHeapObject() &&
7321 code->untag()->owner_->IsFunction()) {
7322 FunctionPtr func = static_cast<FunctionPtr>(code->untag()->owner_);
7323 uword entry_point = code->untag()->entry_point_;
7324 ASSERT(entry_point != 0);
7325 func->untag()->entry_point_ = entry_point;
7326 uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
7327 ASSERT(unchecked_entry_point != 0);
7328 func->untag()->unchecked_entry_point_ = unchecked_entry_point;
7329#if defined(DART_PRECOMPILED_RUNTIME)
7330 if (func->untag()->data()->IsHeapObject() &&
7331 func->untag()->data()->IsClosureData()) {
7332 // For closure functions in bare instructions mode, also update the
7333 // cache inside the static implicit closure object, if any.
7334 auto data = static_cast<ClosureDataPtr>(func->untag()->data());
7335 if (data->untag()->closure() != Closure::null()) {
7336 // Closure functions only have one entry point.
7337 ASSERT_EQUAL(entry_point, unchecked_entry_point);
7338 data->untag()->closure()->untag()->entry_point_ = entry_point;
7339 }
7340 }
7341#endif
7342 }
7343 code->untag()->code_source_map_ =
7344 static_cast<CodeSourceMapPtr>(d->ReadRef());
7345 }
7346
7347 ObjectPoolPtr pool =
7348 d->isolate_group()->object_store()->global_object_pool();
7349 const intptr_t length = pool->untag()->length_;
7350 uint8_t* entry_bits = pool->untag()->entry_bits();
7351 for (intptr_t i = d->ReadUnsigned(); i < length; i += d->ReadUnsigned()) {
7352 auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
7353 ASSERT(entry_type == ObjectPool::EntryType::kTaggedObject);
7354 // The existing entry will usually be null, but it might also be an
7355 // equivalent object that was duplicated in another loading unit.
7356 pool->untag()->data()[i].raw_obj_ = d->ReadRef();
7357 }
7358
7359 // Reinitialize the dispatch table by rereading the table's serialization
7360 // in the root snapshot.
7361 auto isolate_group = d->isolate_group();
7362 if (isolate_group->dispatch_table_snapshot() != nullptr) {
7363 ReadStream stream(isolate_group->dispatch_table_snapshot(),
7364 isolate_group->dispatch_table_snapshot_size());
7365 const GrowableObjectArray& tables = GrowableObjectArray::Handle(
7366 isolate_group->object_store()->instructions_tables());
7367 InstructionsTable& root_table = InstructionsTable::Handle();
7368 root_table ^= tables.At(0);
7369 d->ReadDispatchTable(&stream, /*deferred=*/true, root_table,
7370 deferred_start_index_, deferred_stop_index_);
7371 }
7372 }
7373
7374 void PostLoad(Deserializer* d, const Array& refs) override {
7375 d->EndInstructions();
7376 unit_.set_base_objects(refs);
7377 }
7378
7379 private:
7380 const LoadingUnit& unit_;
7381 intptr_t deferred_start_index_;
7382 intptr_t deferred_stop_index_;
7383};
7384
7385#if defined(DEBUG)
7386static constexpr int32_t kSectionMarker = 0xABAB;
7387#endif
7388
7389Serializer::Serializer(Thread* thread,
7390 Snapshot::Kind kind,
7391 NonStreamingWriteStream* stream,
7392 ImageWriter* image_writer,
7393 bool vm,
7394 V8SnapshotProfileWriter* profile_writer)
7395 : ThreadStackResource(thread),
7396 heap_(thread->isolate_group()->heap()),
7397 zone_(thread->zone()),
7398 kind_(kind),
7399 stream_(stream),
7400 image_writer_(image_writer),
7401 canonical_clusters_by_cid_(nullptr),
7402 clusters_by_cid_(nullptr),
7403 stack_(),
7404 num_cids_(0),
7405 num_tlc_cids_(0),
7406 num_base_objects_(0),
7407 num_written_objects_(0),
7408 next_ref_index_(kFirstReference),
7409 vm_(vm),
7410 profile_writer_(profile_writer)
7411#if defined(SNAPSHOT_BACKTRACE)
7412 ,
7413 current_parent_(Object::null()),
7414 parent_pairs_()
7415#endif
7416#if defined(DART_PRECOMPILER)
7417 ,
7418 deduped_instructions_sources_(zone_)
7419#endif
7420{
7421 num_cids_ = thread->isolate_group()->class_table()->NumCids();
7422 num_tlc_cids_ = thread->isolate_group()->class_table()->NumTopLevelCids();
7423 canonical_clusters_by_cid_ = new SerializationCluster*[num_cids_];
7424 for (intptr_t i = 0; i < num_cids_; i++) {
7425 canonical_clusters_by_cid_[i] = nullptr;
7426 }
7427 clusters_by_cid_ = new SerializationCluster*[num_cids_];
7428 for (intptr_t i = 0; i < num_cids_; i++) {
7429 clusters_by_cid_[i] = nullptr;
7430 }
7431 if (profile_writer_ != nullptr) {
7432 offsets_table_ = new (zone_) OffsetsTable(zone_);
7433 }
7434}
7435
7436Serializer::~Serializer() {
7437 delete[] canonical_clusters_by_cid_;
7438 delete[] clusters_by_cid_;
7439}
7440
7441void Serializer::AddBaseObject(ObjectPtr base_object,
7442 const char* type,
7443 const char* name) {
7444 // Don't assign references to the discarded code.
7445 const bool is_discarded_code = base_object->IsHeapObject() &&
7446 base_object->IsCode() &&
7447 Code::IsDiscarded(Code::RawCast(base_object));
7448 if (!is_discarded_code) {
7449 AssignRef(base_object);
7450 }
7451 num_base_objects_++;
7452
7453 if ((profile_writer_ != nullptr) && (type != nullptr)) {
7454 const auto& profile_id = GetProfileId(base_object);
7455 profile_writer_->SetObjectTypeAndName(profile_id, type, name);
7456 profile_writer_->AddRoot(profile_id);
7457 }
7458}
7459
7460intptr_t Serializer::AssignRef(ObjectPtr object) {
7461 ASSERT(IsAllocatedReference(next_ref_index_));
7462
7463 // The object id weak table holds image offsets for Instructions instead
7464 // of ref indices.
7465 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7466 heap_->SetObjectId(object, next_ref_index_);
7467 ASSERT(heap_->GetObjectId(object) == next_ref_index_);
7468
7469 objects_->Add(&Object::ZoneHandle(object));
7470
7471 return next_ref_index_++;
7472}
7473
7474intptr_t Serializer::AssignArtificialRef(ObjectPtr object) {
7475 const intptr_t ref = -(next_ref_index_++);
7477 if (object != nullptr) {
7478 ASSERT(!object.IsHeapObject() || !object.IsInstructions());
7479 ASSERT(heap_->GetObjectId(object) == kUnreachableReference);
7480 heap_->SetObjectId(object, ref);
7481 ASSERT(heap_->GetObjectId(object) == ref);
7482 }
7483 return ref;
7484}
7485
7486void Serializer::FlushProfile() {
7487 if (profile_writer_ == nullptr) return;
7488 const intptr_t bytes =
7489 stream_->Position() - object_currently_writing_.last_stream_position_;
7490 profile_writer_->AttributeBytesTo(object_currently_writing_.id_, bytes);
7491 object_currently_writing_.last_stream_position_ = stream_->Position();
7492}
7493
7494V8SnapshotProfileWriter::ObjectId Serializer::GetProfileId(
7495 ObjectPtr object) const {
7496 // Instructions are handled separately.
7497 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7498 return GetProfileId(UnsafeRefId(object));
7499}
7500
7501V8SnapshotProfileWriter::ObjectId Serializer::GetProfileId(
7502 intptr_t heap_id) const {
7503 if (IsArtificialReference(heap_id)) {
7504 return {IdSpace::kArtificial, -heap_id};
7505 }
7506 ASSERT(IsAllocatedReference(heap_id));
7507 return {IdSpace::kSnapshot, heap_id};
7508}
7509
7510void Serializer::AttributeReference(
7511 ObjectPtr object,
7512 const V8SnapshotProfileWriter::Reference& reference) {
7513 if (profile_writer_ == nullptr) return;
7514 const auto& object_id = GetProfileId(object);
7515#if defined(DART_PRECOMPILER)
7516 if (object->IsHeapObject() && object->IsWeakSerializationReference()) {
7517 auto const wsr = WeakSerializationReference::RawCast(object);
7518 auto const target = wsr->untag()->target();
7519 const auto& target_id = GetProfileId(target);
7520 if (object_id != target_id) {
7521 const auto& replacement_id = GetProfileId(wsr->untag()->replacement());
7522 ASSERT(object_id == replacement_id);
7523 // The target of the WSR will be replaced in the snapshot, so write
7524 // attributions for both the dropped target and for the replacement.
7525 profile_writer_->AttributeDroppedReferenceTo(
7526 object_currently_writing_.id_, reference, target_id, replacement_id);
7527 return;
7528 }
7529 // The replacement isn't used for this WSR in the snapshot, as either the
7530 // target is strongly referenced or the WSR itself is unreachable, so fall
7531 // through to attributing a reference to the WSR (which shares the profile
7532 // ID of the target).
7533 }
7534#endif
7535 profile_writer_->AttributeReferenceTo(object_currently_writing_.id_,
7536 reference, object_id);
7537}
7538
7539Serializer::WritingObjectScope::WritingObjectScope(
7540 Serializer* serializer,
7541 const V8SnapshotProfileWriter::ObjectId& id,
7542 ObjectPtr object)
7543 : serializer_(serializer),
7544 old_object_(serializer->object_currently_writing_.object_),
7545 old_id_(serializer->object_currently_writing_.id_),
7546 old_cid_(serializer->object_currently_writing_.cid_) {
7547 if (serializer_->profile_writer_ == nullptr) return;
7548 // The ID should correspond to one already added appropriately to the
7549 // profile writer.
7550 ASSERT(serializer_->profile_writer_->HasId(id));
7551 serializer_->FlushProfile();
7552 serializer_->object_currently_writing_.object_ = object;
7553 serializer_->object_currently_writing_.id_ = id;
7554 serializer_->object_currently_writing_.cid_ =
7555 object == nullptr ? -1 : object->GetClassIdMayBeSmi();
7556}
7557
7558Serializer::WritingObjectScope::~WritingObjectScope() {
7559 if (serializer_->profile_writer_ == nullptr) return;
7560 serializer_->FlushProfile();
7561 serializer_->object_currently_writing_.object_ = old_object_;
7562 serializer_->object_currently_writing_.id_ = old_id_;
7563 serializer_->object_currently_writing_.cid_ = old_cid_;
7564}
7565
7566V8SnapshotProfileWriter::ObjectId Serializer::WritingObjectScope::ReserveId(
7567 Serializer* s,
7568 const char* type,
7569 ObjectPtr obj,
7570 const char* name) {
7571 if (s->profile_writer_ == nullptr) {
7572 return V8SnapshotProfileWriter::kArtificialRootId;
7573 }
7574 if (name == nullptr) {
7575 // Handle some cases where there are obvious names to assign.
7576 switch (obj->GetClassIdMayBeSmi()) {
7577 case kSmiCid: {
7578 name = OS::SCreate(s->zone(), "%" Pd "", Smi::Value(Smi::RawCast(obj)));
7579 break;
7580 }
7581 case kMintCid: {
7582 name = OS::SCreate(s->zone(), "%" Pd64 "",
7583 Mint::RawCast(obj)->untag()->value_);
7584 break;
7585 }
7586 case kOneByteStringCid:
7587 case kTwoByteStringCid: {
7588 name = String::ToCString(s->thread(), String::RawCast(obj));
7589 break;
7590 }
7591 }
7592 }
7593 const auto& obj_id = s->GetProfileId(obj);
7594 s->profile_writer_->SetObjectTypeAndName(obj_id, type, name);
7595 return obj_id;
7596}
7597
7598#if !defined(DART_PRECOMPILED_RUNTIME)
7599bool Serializer::CreateArtificialNodeIfNeeded(ObjectPtr obj) {
7600 ASSERT(profile_writer() != nullptr);
7601
7602 // UnsafeRefId will do lazy reference allocation for WSRs.
7603 intptr_t id = UnsafeRefId(obj);
7604 ASSERT(id != kUnallocatedReference);
7605 if (id != kUnreachableReference) {
7606 return IsArtificialReference(id);
7607 }
7608 if (obj->IsHeapObject() && obj->IsWeakSerializationReference()) {
7609 auto const target =
7610 WeakSerializationReference::RawCast(obj)->untag()->target();
7611 CreateArtificialNodeIfNeeded(target);
7612 // Since the WSR is unreachable, we can replace its id with whatever the
7613 // ID of the target is, whether real or artificial.
7614 id = heap_->GetObjectId(target);
7615 heap_->SetObjectId(obj, id);
7616 return IsArtificialReference(id);
7617 }
7618
7619 const char* type = nullptr;
7620 const char* name = nullptr;
7621 GrowableArray<std::pair<ObjectPtr, V8SnapshotProfileWriter::Reference>> links;
7622 const classid_t cid = obj->GetClassIdMayBeSmi();
7623 switch (cid) {
7624 // For profiling static call target tables in AOT mode.
7625 case kSmiCid: {
7626 type = "Smi";
7627 break;
7628 }
7629 // For profiling per-code object pools in bare instructions mode.
7630 case kObjectPoolCid: {
7631 type = "ObjectPool";
7632 auto const pool = ObjectPool::RawCast(obj);
7633 for (intptr_t i = 0; i < pool->untag()->length_; i++) {
7634 uint8_t bits = pool->untag()->entry_bits()[i];
7635 if (ObjectPool::TypeBits::decode(bits) ==
7636 ObjectPool::EntryType::kTaggedObject) {
7637 auto const elem = pool->untag()->data()[i].raw_obj_;
7638 // Elements should be reachable from the global object pool.
7639 ASSERT(HasRef(elem));
7640 links.Add({elem, V8SnapshotProfileWriter::Reference::Element(i)});
7641 }
7642 }
7643 break;
7644 }
7645 // For profiling static call target tables and the dispatch table in AOT.
7646 case kImmutableArrayCid:
7647 case kArrayCid: {
7648 type = "Array";
7649 auto const array = Array::RawCast(obj);
7650 for (intptr_t i = 0, n = Smi::Value(array->untag()->length()); i < n;
7651 i++) {
7652 ObjectPtr elem = array->untag()->element(i);
7653 links.Add({elem, V8SnapshotProfileWriter::Reference::Element(i)});
7654 }
7655 break;
7656 }
7657 // For profiling the dispatch table.
7658 case kCodeCid: {
7659 type = "Code";
7660 auto const code = Code::RawCast(obj);
7661 name = CodeSerializationCluster::MakeDisambiguatedCodeName(this, code);
7662 links.Add({code->untag()->owner(),
7663 V8SnapshotProfileWriter::Reference::Property("owner_")});
7664 break;
7665 }
7666 case kFunctionCid: {
7667 FunctionPtr func = static_cast<FunctionPtr>(obj);
7668 type = "Function";
7669 name = FunctionSerializationCluster::MakeDisambiguatedFunctionName(this,
7670 func);
7671 links.Add({func->untag()->owner(),
7672 V8SnapshotProfileWriter::Reference::Property("owner_")});
7673 ObjectPtr data = func->untag()->data();
7674 if (data->GetClassId() == kClosureDataCid) {
7675 links.Add(
7676 {data, V8SnapshotProfileWriter::Reference::Property("data_")});
7677 }
7678 break;
7679 }
7680 case kClosureDataCid: {
7681 auto data = static_cast<ClosureDataPtr>(obj);
7682 type = "ClosureData";
7683 links.Add(
7684 {data->untag()->parent_function(),
7685 V8SnapshotProfileWriter::Reference::Property("parent_function_")});
7686 break;
7687 }
7688 case kClassCid: {
7689 ClassPtr cls = static_cast<ClassPtr>(obj);
7690 type = "Class";
7691 name = String::ToCString(thread(), cls->untag()->name());
7692 links.Add({cls->untag()->library(),
7693 V8SnapshotProfileWriter::Reference::Property("library_")});
7694 break;
7695 }
7696 case kPatchClassCid: {
7697 PatchClassPtr patch_cls = static_cast<PatchClassPtr>(obj);
7698 type = "PatchClass";
7699 links.Add(
7700 {patch_cls->untag()->wrapped_class(),
7701 V8SnapshotProfileWriter::Reference::Property("wrapped_class_")});
7702 break;
7703 }
7704 case kLibraryCid: {
7705 LibraryPtr lib = static_cast<LibraryPtr>(obj);
7706 type = "Library";
7707 name = String::ToCString(thread(), lib->untag()->url());
7708 break;
7709 }
7710 case kFunctionTypeCid: {
7711 type = "FunctionType";
7712 break;
7713 };
7714 case kRecordTypeCid: {
7715 type = "RecordType";
7716 break;
7717 };
7718 default:
7719 FATAL("Request to create artificial node for object with cid %d", cid);
7720 }
7721
7722 id = AssignArtificialRef(obj);
7723 Serializer::WritingObjectScope scope(this, type, obj, name);
7724 for (const auto& link : links) {
7725 CreateArtificialNodeIfNeeded(link.first);
7726 AttributeReference(link.first, link.second);
7727 }
7728 return true;
7729}
7730#endif // !defined(DART_PRECOMPILED_RUNTIME)
7731
7732intptr_t Serializer::RefId(ObjectPtr object) const {
7733 auto const id = UnsafeRefId(object);
7734 if (IsAllocatedReference(id)) {
7735 return id;
7736 }
7737 ASSERT(id == kUnreachableReference || IsArtificialReference(id));
7739 auto& handle = thread()->ObjectHandle();
7740 handle = object;
7741 FATAL("Reference to unreachable object %s", handle.ToCString());
7742}
7743
7744intptr_t Serializer::UnsafeRefId(ObjectPtr object) const {
7745 // The object id weak table holds image offsets for Instructions instead
7746 // of ref indices.
7747 ASSERT(!object->IsHeapObject() || !object->IsInstructions());
7748 if (!Snapshot::IncludesCode(kind_) &&
7749 object->GetClassIdMayBeSmi() == kCodeCid) {
7750 return RefId(Object::null());
7751 }
7752 auto id = heap_->GetObjectId(object);
7753 if (id != kUnallocatedReference) {
7754 return id;
7755 }
7756 // This is the only case where we may still see unallocated references after
7757 // WriteAlloc is finished.
7758 if (object->IsWeakSerializationReference()) {
7759 // Lazily set the object ID of the WSR to the object which will replace
7760 // it in the snapshot.
7761 auto const wsr = static_cast<WeakSerializationReferencePtr>(object);
7762 // Either the target or the replacement must be allocated, since the
7763 // WSR is reachable.
7764 id = HasRef(wsr->untag()->target()) ? RefId(wsr->untag()->target())
7765 : RefId(wsr->untag()->replacement());
7766 heap_->SetObjectId(wsr, id);
7767 return id;
7768 }
7770 auto& handle = thread()->ObjectHandle();
7771 handle = object;
7772 FATAL("Reference for object %s is unallocated", handle.ToCString());
7773}
7774
7775const char* Serializer::ReadOnlyObjectType(intptr_t cid) {
7776 switch (cid) {
7777 case kPcDescriptorsCid:
7778 return "PcDescriptors";
7779 case kCodeSourceMapCid:
7780 return "CodeSourceMap";
7781 case kCompressedStackMapsCid:
7782 return "CompressedStackMaps";
7783 case kStringCid:
7784 return current_loading_unit_id_ <= LoadingUnit::kRootId
7785 ? "CanonicalString"
7786 : nullptr;
7787 case kOneByteStringCid:
7788 return current_loading_unit_id_ <= LoadingUnit::kRootId
7789 ? "OneByteStringCid"
7790 : nullptr;
7791 case kTwoByteStringCid:
7792 return current_loading_unit_id_ <= LoadingUnit::kRootId
7793 ? "TwoByteStringCid"
7794 : nullptr;
7795 default:
7796 return nullptr;
7797 }
7798}
7799
7800SerializationCluster* Serializer::NewClusterForClass(intptr_t cid,
7801 bool is_canonical) {
7802#if defined(DART_PRECOMPILED_RUNTIME)
7803 UNREACHABLE();
7804 return nullptr;
7805#else
7806 Zone* Z = zone_;
7807 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
7808 Push(isolate_group()->class_table()->At(cid));
7809 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7810 }
7811 if (IsTypedDataViewClassId(cid)) {
7812 return new (Z) TypedDataViewSerializationCluster(cid);
7813 }
7814 if (IsExternalTypedDataClassId(cid)) {
7815 return new (Z) ExternalTypedDataSerializationCluster(cid);
7816 }
7817 if (IsTypedDataClassId(cid)) {
7818 return new (Z) TypedDataSerializationCluster(cid);
7819 }
7820
7821#if !defined(DART_COMPRESSED_POINTERS)
7822 // Sometimes we write memory images for read-only objects that contain no
7823 // pointers. These can be mmapped directly, needing no relocation, and added
7824 // to the list of heap pages. This gives us lazy/demand paging from the OS.
7825 // We do not do this for snapshots without code to keep snapshots portable
7826 // between machines with different word sizes. We do not do this when we use
7827 // compressed pointers because we cannot always control the load address of
7828 // the memory image, and it might be outside the 4GB region addressable by
7829 // compressed pointers.
7830 if (Snapshot::IncludesCode(kind_)) {
7831 if (auto const type = ReadOnlyObjectType(cid)) {
7832 return new (Z) RODataSerializationCluster(Z, type, cid, is_canonical);
7833 }
7834 }
7835#endif
7836
7837 const bool cluster_represents_canonical_set =
7838 current_loading_unit_id_ <= LoadingUnit::kRootId && is_canonical;
7839
7840 switch (cid) {
7841 case kClassCid:
7842 return new (Z) ClassSerializationCluster(num_cids_ + num_tlc_cids_);
7843 case kTypeParametersCid:
7844 return new (Z) TypeParametersSerializationCluster();
7845 case kTypeArgumentsCid:
7846 return new (Z) TypeArgumentsSerializationCluster(
7847 is_canonical, cluster_represents_canonical_set);
7848 case kPatchClassCid:
7849 return new (Z) PatchClassSerializationCluster();
7850 case kFunctionCid:
7851 return new (Z) FunctionSerializationCluster();
7852 case kClosureDataCid:
7853 return new (Z) ClosureDataSerializationCluster();
7854 case kFfiTrampolineDataCid:
7855 return new (Z) FfiTrampolineDataSerializationCluster();
7856 case kFieldCid:
7857 return new (Z) FieldSerializationCluster();
7858 case kScriptCid:
7859 return new (Z) ScriptSerializationCluster();
7860 case kLibraryCid:
7861 return new (Z) LibrarySerializationCluster();
7862 case kNamespaceCid:
7863 return new (Z) NamespaceSerializationCluster();
7864 case kKernelProgramInfoCid:
7865 return new (Z) KernelProgramInfoSerializationCluster();
7866 case kCodeCid:
7867 return new (Z) CodeSerializationCluster(heap_);
7868 case kObjectPoolCid:
7869 return new (Z) ObjectPoolSerializationCluster();
7870 case kPcDescriptorsCid:
7871 return new (Z) PcDescriptorsSerializationCluster();
7872 case kCodeSourceMapCid:
7873 return new (Z) CodeSourceMapSerializationCluster();
7874 case kCompressedStackMapsCid:
7875 return new (Z) CompressedStackMapsSerializationCluster();
7876 case kExceptionHandlersCid:
7877 return new (Z) ExceptionHandlersSerializationCluster();
7878 case kContextCid:
7879 return new (Z) ContextSerializationCluster();
7880 case kContextScopeCid:
7881 return new (Z) ContextScopeSerializationCluster();
7882 case kUnlinkedCallCid:
7883 return new (Z) UnlinkedCallSerializationCluster();
7884 case kICDataCid:
7885 return new (Z) ICDataSerializationCluster();
7886 case kMegamorphicCacheCid:
7887 return new (Z) MegamorphicCacheSerializationCluster();
7888 case kSubtypeTestCacheCid:
7889 return new (Z) SubtypeTestCacheSerializationCluster();
7890 case kLoadingUnitCid:
7891 return new (Z) LoadingUnitSerializationCluster();
7892 case kLanguageErrorCid:
7893 return new (Z) LanguageErrorSerializationCluster();
7894 case kUnhandledExceptionCid:
7895 return new (Z) UnhandledExceptionSerializationCluster();
7896 case kLibraryPrefixCid:
7897 return new (Z) LibraryPrefixSerializationCluster();
7898 case kTypeCid:
7899 return new (Z) TypeSerializationCluster(is_canonical,
7900 cluster_represents_canonical_set);
7901 case kFunctionTypeCid:
7902 return new (Z) FunctionTypeSerializationCluster(
7903 is_canonical, cluster_represents_canonical_set);
7904 case kRecordTypeCid:
7905 return new (Z) RecordTypeSerializationCluster(
7906 is_canonical, cluster_represents_canonical_set);
7907 case kTypeParameterCid:
7908 return new (Z) TypeParameterSerializationCluster(
7909 is_canonical, cluster_represents_canonical_set);
7910 case kClosureCid:
7911 return new (Z) ClosureSerializationCluster(is_canonical);
7912 case kMintCid:
7913 return new (Z) MintSerializationCluster(is_canonical);
7914 case kDoubleCid:
7915 return new (Z) DoubleSerializationCluster(is_canonical);
7916 case kInt32x4Cid:
7917 case kFloat32x4Cid:
7918 case kFloat64x2Cid:
7919 return new (Z) Simd128SerializationCluster(cid, is_canonical);
7920 case kGrowableObjectArrayCid:
7921 return new (Z) GrowableObjectArraySerializationCluster();
7922 case kRecordCid:
7923 return new (Z) RecordSerializationCluster(is_canonical);
7924 case kStackTraceCid:
7925 return new (Z) StackTraceSerializationCluster();
7926 case kRegExpCid:
7927 return new (Z) RegExpSerializationCluster();
7928 case kWeakPropertyCid:
7929 return new (Z) WeakPropertySerializationCluster();
7930 case kMapCid:
7931 // We do not have mutable hash maps in snapshots.
7932 UNREACHABLE();
7933 case kConstMapCid:
7934 return new (Z) MapSerializationCluster(is_canonical, kConstMapCid);
7935 case kSetCid:
7936 // We do not have mutable hash sets in snapshots.
7937 UNREACHABLE();
7938 case kConstSetCid:
7939 return new (Z) SetSerializationCluster(is_canonical, kConstSetCid);
7940 case kArrayCid:
7941 return new (Z) ArraySerializationCluster(is_canonical, kArrayCid);
7942 case kImmutableArrayCid:
7943 return new (Z)
7944 ArraySerializationCluster(is_canonical, kImmutableArrayCid);
7945 case kWeakArrayCid:
7946 return new (Z) WeakArraySerializationCluster();
7947 case kStringCid:
7948 return new (Z) StringSerializationCluster(
7949 is_canonical, cluster_represents_canonical_set && !vm_);
7950#define CASE_FFI_CID(name) case kFfi##name##Cid:
7952#undef CASE_FFI_CID
7953 return new (Z) InstanceSerializationCluster(is_canonical, cid);
7954 case kDeltaEncodedTypedDataCid:
7955 return new (Z) DeltaEncodedTypedDataSerializationCluster();
7956 case kWeakSerializationReferenceCid:
7957#if defined(DART_PRECOMPILER)
7958 ASSERT(kind_ == Snapshot::kFullAOT);
7959 return new (Z) WeakSerializationReferenceSerializationCluster();
7960#endif
7961 default:
7962 break;
7963 }
7964
7965 // The caller will check for nullptr and provide an error with more context
7966 // than is available here.
7967 return nullptr;
7968#endif // !DART_PRECOMPILED_RUNTIME
7969}
7970
7971bool Serializer::InCurrentLoadingUnitOrRoot(ObjectPtr obj) {
7972 if (loading_units_ == nullptr) return true;
7973
7974 intptr_t unit_id = heap_->GetLoadingUnit(obj);
7975 if (unit_id == WeakTable::kNoValue) {
7976 FATAL("Missing loading unit assignment: %s\n",
7977 Object::Handle(obj).ToCString());
7978 }
7979 return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
7980}
7981
7982void Serializer::RecordDeferredCode(CodePtr code) {
7983 const intptr_t unit_id = heap_->GetLoadingUnit(code);
7984 ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
7985 (*loading_units_)[unit_id]->AddDeferredObject(code);
7986}
7987
7988#if !defined(DART_PRECOMPILED_RUNTIME)
7989#if defined(DART_PRECOMPILER)
7990// We use the following encoding schemes when encoding references to Code
7991// objects.
7992//
7993// In AOT mode:
7994//
7995// 0 -- LazyCompile stub
7996// 1 -+
7997// | for non-root-unit/non-VM snapshots
7998// ... > reference into parent snapshot objects
7999// | (base is num_base_objects_ in this case, 0 otherwise).
8000// base -+
8001// base + 1 -+
8002// | for non-deferred Code objects (those with instructions)
8003// > index in into the instructions table (code_index_).
8004// | (L is code_index_.Length()).
8005// base + L -+
8006// ... -+
8007// | for deferred Code objects (those without instructions)
8008// > index of this Code object in the deferred part of the
8009// | Code cluster.
8010//
8011// Note that this encoding has the following property: non-discarded
8012// non-deferred Code objects form the tail of the instruction table
8013// which makes indices assigned to non-discarded non-deferred Code objects
8014// and deferred Code objects continuous. This means when decoding
8015// code_index - (base + 1) - first_entry_with_code yields an index of the
8016// Code object in the Code cluster both for non-deferred and deferred
8017// Code objects.
8018//
8019// For JIT snapshots we do:
8020//
8021// 0 -- LazyCompile stub
8022// 1 -+
8023// |
8024// ... > index of the Code object in the Code cluster.
8025// |
8026//
8027intptr_t Serializer::GetCodeIndex(CodePtr code) {
8028 // In the precompiled mode Code object is uniquely identified by its
8029 // instructions (because ProgramVisitor::DedupInstructions will dedup Code
8030 // objects with the same instructions).
8031 if (code == StubCode::LazyCompile().ptr() && !vm_) {
8032 return 0;
8033 } else if (FLAG_precompiled_mode) {
8034 const intptr_t ref = heap_->GetObjectId(code);
8035 ASSERT(!IsReachableReference(ref) == Code::IsDiscarded(code));
8036
8037 const intptr_t base =
8038 (vm_ || current_loading_unit_id() == LoadingUnit::kRootId)
8039 ? 0
8040 : num_base_objects_;
8041
8042 // Check if we are referring to the Code object which originates from the
8043 // parent loading unit. In this case we write out the reference of this
8044 // object.
8045 if (!Code::IsDiscarded(code) && ref < base) {
8046 RELEASE_ASSERT(current_loading_unit_id() != LoadingUnit::kRootId);
8047 return 1 + ref;
8048 }
8049
8050 // Otherwise the code object must either be discarded or originate from
8051 // the Code cluster.
8052 ASSERT(Code::IsDiscarded(code) || (code_cluster_->first_ref() <= ref &&
8053 ref <= code_cluster_->last_ref()));
8054
8055 // If Code object is non-deferred then simply write out the index of the
8056 // entry point, otherwise write out the index of the deferred code object.
8057 if (ref < code_cluster_->first_deferred_ref()) {
8058 const intptr_t key = static_cast<intptr_t>(code->untag()->instructions_);
8059 ASSERT(code_index_.HasKey(key));
8060 const intptr_t result = code_index_.Lookup(key);
8061 ASSERT(0 < result && result <= code_index_.Length());
8062 // Note: result already has + 1.
8063 return base + result;
8064 } else {
8065 // Note: only root snapshot can have deferred Code objects in the
8066 // cluster.
8067 const intptr_t cluster_index = ref - code_cluster_->first_deferred_ref();
8068 return 1 + base + code_index_.Length() + cluster_index;
8069 }
8070 } else {
8071 const intptr_t ref = heap_->GetObjectId(code);
8073 ASSERT(code_cluster_->first_ref() <= ref &&
8074 ref <= code_cluster_->last_ref());
8075 return 1 + (ref - code_cluster_->first_ref());
8076 }
8077}
8078#endif // defined(DART_PRECOMPILER)
8079
8080void Serializer::PrepareInstructions(
8081 const CompressedStackMaps& canonical_stack_map_entries) {
8082 if (!Snapshot::IncludesCode(kind())) return;
8083
8084 // Code objects that have identical/duplicate instructions must be adjacent in
8085 // the order that Code objects are written because the encoding of the
8086 // reference from the Code to the Instructions assumes monotonically
8087 // increasing offsets as part of a delta encoding. Also the code order table
8088 // that allows for mapping return addresses back to Code objects depends on
8089 // this sorting.
8090 if (code_cluster_ != nullptr) {
8091 CodeSerializationCluster::Sort(this, code_cluster_->objects());
8092 }
8093 if ((loading_units_ != nullptr) &&
8094 (current_loading_unit_id_ == LoadingUnit::kRootId)) {
8095 for (intptr_t i = LoadingUnit::kRootId + 1; i < loading_units_->length();
8096 i++) {
8097 auto unit_objects = loading_units_->At(i)->deferred_objects();
8098 CodeSerializationCluster::Sort(this, unit_objects);
8099 ASSERT(unit_objects->length() == 0 || code_cluster_ != nullptr);
8100 for (intptr_t j = 0; j < unit_objects->length(); j++) {
8101 code_cluster_->deferred_objects()->Add(unit_objects->At(j)->ptr());
8102 }
8103 }
8104 }
8105
8106#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8107 if (kind() == Snapshot::kFullAOT) {
8108 // Group the code objects whose instructions are not being deferred in this
8109 // snapshot unit in the order they will be written: first the code objects
8110 // encountered for this first time in this unit being written by the
8111 // CodeSerializationCluster, then code object previously deferred whose
8112 // instructions are now written by UnitSerializationRoots. This order needs
8113 // to be known to finalize bare-instructions-mode's PC-relative calls.
8114 GrowableArray<CodePtr> code_objects;
8115 if (code_cluster_ != nullptr) {
8116 auto in = code_cluster_->objects();
8117 for (intptr_t i = 0; i < in->length(); i++) {
8118 code_objects.Add(in->At(i));
8119 }
8120 }
8121 if (loading_units_ != nullptr) {
8122 auto in =
8123 loading_units_->At(current_loading_unit_id_)->deferred_objects();
8124 for (intptr_t i = 0; i < in->length(); i++) {
8125 code_objects.Add(in->At(i)->ptr());
8126 }
8127 }
8128
8129 GrowableArray<ImageWriterCommand> writer_commands;
8130 RelocateCodeObjects(vm_, &code_objects, &writer_commands);
8131 image_writer_->PrepareForSerialization(&writer_commands);
8132
8133 if (code_objects.length() == 0) {
8134 return;
8135 }
8136
8137 // Build UntaggedInstructionsTable::Data object to be added to the
8138 // read-only data section of the snapshot. It contains:
8139 //
8140 // - a binary search table mapping an Instructions entry point to its
8141 // stack maps (by offset from the beginning of the Data object);
8142 // - followed by stack maps bytes;
8143 // - followed by canonical stack map entries.
8144 //
8145 struct StackMapInfo : public ZoneAllocated {
8146 CompressedStackMapsPtr map;
8147 intptr_t use_count;
8148 uint32_t offset;
8149 };
8150
8151 GrowableArray<StackMapInfo*> stack_maps;
8152 IntMap<StackMapInfo*> stack_maps_info;
8153
8154 // Build code_index_ (which maps Instructions object to the order in
8155 // which they appear in the code section in the end) and collect all
8156 // stack maps.
8157 // We also find the first Instructions object which is going to have
8158 // Code object associated with it. This will allow to reduce the binary
8159 // search space when searching specifically for the code object in runtime.
8160 uint32_t total = 0;
8161 intptr_t not_discarded_count = 0;
8162 uint32_t first_entry_with_code = 0;
8163 for (auto& cmd : writer_commands) {
8164 if (cmd.op == ImageWriterCommand::InsertInstructionOfCode) {
8165 RELEASE_ASSERT(code_objects[total] ==
8166 cmd.insert_instruction_of_code.code);
8167 ASSERT(!Code::IsDiscarded(cmd.insert_instruction_of_code.code) ||
8168 (not_discarded_count == 0));
8169 if (!Code::IsDiscarded(cmd.insert_instruction_of_code.code)) {
8170 if (not_discarded_count == 0) {
8171 first_entry_with_code = total;
8172 }
8173 not_discarded_count++;
8174 }
8175 total++;
8176
8177 // Update code_index_.
8178 {
8179 const intptr_t instr = static_cast<intptr_t>(
8180 cmd.insert_instruction_of_code.code->untag()->instructions_);
8181 ASSERT(!code_index_.HasKey(instr));
8182 code_index_.Insert(instr, total);
8183 }
8184
8185 // Collect stack maps.
8186 CompressedStackMapsPtr stack_map =
8187 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8188 const intptr_t key = static_cast<intptr_t>(stack_map);
8189
8190 if (stack_maps_info.HasKey(key)) {
8191 stack_maps_info.Lookup(key)->use_count++;
8192 } else {
8193 auto info = new StackMapInfo();
8194 info->map = stack_map;
8195 info->use_count = 1;
8196 stack_maps.Add(info);
8197 stack_maps_info.Insert(key, info);
8198 }
8199 }
8200 }
8201 ASSERT(static_cast<intptr_t>(total) == code_index_.Length());
8202 instructions_table_len_ = not_discarded_count;
8203
8204 // Sort stack maps by usage so that most commonly used stack maps are
8205 // together at the start of the Data object.
8206 stack_maps.Sort([](StackMapInfo* const* a, StackMapInfo* const* b) {
8207 if ((*a)->use_count < (*b)->use_count) return 1;
8208 if ((*a)->use_count > (*b)->use_count) return -1;
8209 return 0;
8210 });
8211
8212 // Build Data object.
8213 MallocWriteStream pc_mapping(4 * KB);
8214
8215 // Write the header out.
8216 {
8217 UntaggedInstructionsTable::Data header;
8218 memset(&header, 0, sizeof(header));
8219 header.length = total;
8220 header.first_entry_with_code = first_entry_with_code;
8221 pc_mapping.WriteFixed<UntaggedInstructionsTable::Data>(header);
8222 }
8223
8224 // Reserve space for the binary search table.
8225 for (auto& cmd : writer_commands) {
8226 if (cmd.op == ImageWriterCommand::InsertInstructionOfCode) {
8227 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>({0, 0});
8228 }
8229 }
8230
8231 // Now write collected stack maps after the binary search table.
8232 auto write_stack_map = [&](CompressedStackMapsPtr smap) {
8233 const auto flags_and_size = smap->untag()->payload()->flags_and_size();
8234 const auto payload_size =
8235 UntaggedCompressedStackMaps::SizeField::decode(flags_and_size);
8236 pc_mapping.WriteFixed<uint32_t>(flags_and_size);
8237 pc_mapping.WriteBytes(smap->untag()->payload()->data(), payload_size);
8238 };
8239
8240 for (auto sm : stack_maps) {
8241 sm->offset = pc_mapping.bytes_written();
8242 write_stack_map(sm->map);
8243 }
8244
8245 // Write canonical entries (if any).
8246 if (!canonical_stack_map_entries.IsNull()) {
8247 auto header = reinterpret_cast<UntaggedInstructionsTable::Data*>(
8248 pc_mapping.buffer());
8249 header->canonical_stack_map_entries_offset = pc_mapping.bytes_written();
8250 write_stack_map(canonical_stack_map_entries.ptr());
8251 }
8252 const auto total_bytes = pc_mapping.bytes_written();
8253
8254 // Now that we have offsets to all stack maps we can write binary
8255 // search table.
8256 pc_mapping.SetPosition(
8257 sizeof(UntaggedInstructionsTable::Data)); // Skip the header.
8258 for (auto& cmd : writer_commands) {
8259 if (cmd.op == ImageWriterCommand::InsertInstructionOfCode) {
8260 CompressedStackMapsPtr smap =
8261 cmd.insert_instruction_of_code.code->untag()->compressed_stackmaps_;
8262 const auto offset =
8263 stack_maps_info.Lookup(static_cast<intptr_t>(smap))->offset;
8264 const auto entry = image_writer_->GetTextOffsetFor(
8265 Code::InstructionsOf(cmd.insert_instruction_of_code.code),
8266 cmd.insert_instruction_of_code.code);
8267
8268 pc_mapping.WriteFixed<UntaggedInstructionsTable::DataEntry>(
8269 {static_cast<uint32_t>(entry), offset});
8270 }
8271 }
8272 // Restore position so that Steal does not truncate the buffer.
8273 pc_mapping.SetPosition(total_bytes);
8274
8275 intptr_t length = 0;
8276 uint8_t* bytes = pc_mapping.Steal(&length);
8277
8278 instructions_table_rodata_offset_ =
8279 image_writer_->AddBytesToData(bytes, length);
8280 // Attribute all bytes in this object to the root for simplicity.
8281 if (profile_writer_ != nullptr) {
8282 const auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8283 profile_writer_->AttributeReferenceTo(
8284 V8SnapshotProfileWriter::kArtificialRootId,
8285 V8SnapshotProfileWriter::Reference::Property(
8286 "<instructions-table-rodata>"),
8287 {offset_space, instructions_table_rodata_offset_});
8288 }
8289 }
8290#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
8291}
8292
8293void Serializer::WriteInstructions(InstructionsPtr instr,
8294 uint32_t unchecked_offset,
8295 CodePtr code,
8296 bool deferred) {
8297 ASSERT(code != Code::null());
8298
8299 ASSERT(InCurrentLoadingUnitOrRoot(code) != deferred);
8300 if (deferred) {
8301 return;
8302 }
8303
8304 const intptr_t offset = image_writer_->GetTextOffsetFor(instr, code);
8305#if defined(DART_PRECOMPILER)
8306 if (profile_writer_ != nullptr) {
8307 ASSERT(object_currently_writing_.id_ !=
8308 V8SnapshotProfileWriter::kArtificialRootId);
8309 const auto offset_space = vm_ ? IdSpace::kVmText : IdSpace::kIsolateText;
8310 profile_writer_->AttributeReferenceTo(
8311 object_currently_writing_.id_,
8312 V8SnapshotProfileWriter::Reference::Property("<instructions>"),
8313 {offset_space, offset});
8314 }
8315
8316 if (Code::IsDiscarded(code)) {
8317 // Discarded Code objects are not supported in the vm isolate snapshot.
8318 ASSERT(!vm_);
8319 return;
8320 }
8321
8322 if (FLAG_precompiled_mode) {
8323 const uint32_t payload_info =
8324 (unchecked_offset << 1) | (Code::HasMonomorphicEntry(code) ? 0x1 : 0x0);
8325 WriteUnsigned(payload_info);
8326 return;
8327 }
8328#endif
8329 Write<uint32_t>(offset);
8330 WriteUnsigned(unchecked_offset);
8331}
8332
8333void Serializer::TraceDataOffset(uint32_t offset) {
8334 if (profile_writer_ == nullptr) return;
8335 // ROData cannot be roots.
8336 ASSERT(object_currently_writing_.id_ !=
8337 V8SnapshotProfileWriter::kArtificialRootId);
8338 auto offset_space = vm_ ? IdSpace::kVmData : IdSpace::kIsolateData;
8339 // TODO(sjindel): Give this edge a more appropriate type than element
8340 // (internal, maybe?).
8341 profile_writer_->AttributeReferenceTo(
8342 object_currently_writing_.id_,
8343 V8SnapshotProfileWriter::Reference::Element(0), {offset_space, offset});
8344}
8345
8346uint32_t Serializer::GetDataOffset(ObjectPtr object) const {
8347#if defined(SNAPSHOT_BACKTRACE)
8348 return image_writer_->GetDataOffsetFor(object, ParentOf(object));
8349#else
8350 return image_writer_->GetDataOffsetFor(object);
8351#endif
8352}
8353
8354intptr_t Serializer::GetDataSize() const {
8355 if (image_writer_ == nullptr) {
8356 return 0;
8357 }
8358 return image_writer_->data_size();
8359}
8360#endif // !defined(DART_PRECOMPILED_RUNTIME)
8361
8362void Serializer::Push(ObjectPtr object, intptr_t cid_override) {
8363 const bool is_code = object->IsHeapObject() && object->IsCode();
8364 if (is_code && !Snapshot::IncludesCode(kind_)) {
8365 return; // Do not trace, will write null.
8366 }
8367
8368 intptr_t id = heap_->GetObjectId(object);
8369 if (id == kUnreachableReference) {
8370 // When discovering the transitive closure of objects reachable from the
8371 // roots we do not trace references, e.g. inside [RawCode], to
8372 // [RawInstructions], since [RawInstructions] doesn't contain any references
8373 // and the serialization code uses an [ImageWriter] for those.
8374 if (object->IsHeapObject() && object->IsInstructions()) {
8375 UnexpectedObject(object,
8376 "Instructions should only be reachable from Code");
8377 }
8378
8379 heap_->SetObjectId(object, kUnallocatedReference);
8380 ASSERT(IsReachableReference(heap_->GetObjectId(object)));
8381 stack_.Add({object, cid_override});
8382 if (!(is_code && Code::IsDiscarded(Code::RawCast(object)))) {
8383 num_written_objects_++;
8384 }
8385#if defined(SNAPSHOT_BACKTRACE)
8386 parent_pairs_.Add(&Object::Handle(zone_, object));
8387 parent_pairs_.Add(&Object::Handle(zone_, current_parent_));
8388#endif
8389 }
8390}
8391
8392void Serializer::PushWeak(ObjectPtr object) {
8393 // The GC considers immediate objects to always be alive. This doesn't happen
8394 // automatically in the serializer because the serializer does not have
8395 // immediate objects: it handles Smis as ref indices like all other objects.
8396 // This visit causes the serializer to reproduce the GC's semantics for
8397 // weakness, which in particular allows the templates in hash_table.h to work
8398 // with weak arrays because the metadata Smis always survive.
8399 if (!object->IsHeapObject() || vm_) {
8400 Push(object);
8401 }
8402}
8403
8404void Serializer::Trace(ObjectPtr object, intptr_t cid_override) {
8405 intptr_t cid;
8406 bool is_canonical;
8407 if (!object->IsHeapObject()) {
8408 // Smis are merged into the Mint cluster because Smis for the writer might
8409 // become Mints for the reader and vice versa.
8410 cid = kMintCid;
8411 is_canonical = true;
8412 } else {
8413 cid = object->GetClassId();
8414 is_canonical = object->untag()->IsCanonical();
8415 }
8416 if (cid_override != kIllegalCid) {
8417 cid = cid_override;
8418 } else if (IsStringClassId(cid)) {
8419 cid = kStringCid;
8420 }
8421
8422 SerializationCluster** cluster_ref =
8423 is_canonical ? &canonical_clusters_by_cid_[cid] : &clusters_by_cid_[cid];
8424 if (*cluster_ref == nullptr) {
8425 *cluster_ref = NewClusterForClass(cid, is_canonical);
8426 if (*cluster_ref == nullptr) {
8427 UnexpectedObject(object, "No serialization cluster defined");
8428 }
8429 }
8430 SerializationCluster* cluster = *cluster_ref;
8431 ASSERT(cluster != nullptr);
8432 if (cluster->is_canonical() != is_canonical) {
8433 FATAL("cluster for %s (cid %" Pd ") %s as canonical, but %s",
8434 cluster->name(), cid,
8435 cluster->is_canonical() ? "marked" : "not marked",
8436 is_canonical ? "should be" : "should not be");
8437 }
8438
8439#if defined(SNAPSHOT_BACKTRACE)
8440 current_parent_ = object;
8441#endif
8442
8443 cluster->Trace(this, object);
8444
8445#if defined(SNAPSHOT_BACKTRACE)
8446 current_parent_ = Object::null();
8447#endif
8448}
8449
8450void Serializer::UnexpectedObject(ObjectPtr raw_object, const char* message) {
8451 // Exit the no safepoint scope so we can allocate while printing.
8452 while (thread()->no_safepoint_scope_depth() > 0) {
8453 thread()->DecrementNoSafepointScopeDepth();
8454 }
8455 Object& object = Object::Handle(raw_object);
8456 OS::PrintErr("Unexpected object (%s, %s): 0x%" Px " %s\n", message,
8457 Snapshot::KindToCString(kind_), static_cast<uword>(object.ptr()),
8458 object.ToCString());
8459#if defined(SNAPSHOT_BACKTRACE)
8460 while (!object.IsNull()) {
8461 object = ParentOf(object);
8462 OS::PrintErr("referenced by 0x%" Px " %s\n",
8463 static_cast<uword>(object.ptr()), object.ToCString());
8464 }
8465#endif
8466 OS::Abort();
8467}
8468
8469#if defined(SNAPSHOT_BACKTRACE)
8470ObjectPtr Serializer::ParentOf(ObjectPtr object) const {
8471 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8472 if (parent_pairs_[i]->ptr() == object) {
8473 return parent_pairs_[i + 1]->ptr();
8474 }
8475 }
8476 return Object::null();
8477}
8478
8479ObjectPtr Serializer::ParentOf(const Object& object) const {
8480 for (intptr_t i = 0; i < parent_pairs_.length(); i += 2) {
8481 if (parent_pairs_[i]->ptr() == object.ptr()) {
8482 return parent_pairs_[i + 1]->ptr();
8483 }
8484 }
8485 return Object::null();
8486}
8487#endif // SNAPSHOT_BACKTRACE
8488
8489void Serializer::WriteVersionAndFeatures(bool is_vm_snapshot) {
8490 const char* expected_version = Version::SnapshotString();
8491 ASSERT(expected_version != nullptr);
8492 const intptr_t version_len = strlen(expected_version);
8493 WriteBytes(reinterpret_cast<const uint8_t*>(expected_version), version_len);
8494
8495 char* expected_features =
8496 Dart::FeaturesString(IsolateGroup::Current(), is_vm_snapshot, kind_);
8497 ASSERT(expected_features != nullptr);
8498 const intptr_t features_len = strlen(expected_features);
8499 WriteBytes(reinterpret_cast<const uint8_t*>(expected_features),
8500 features_len + 1);
8501 free(expected_features);
8502}
8503
8504#if !defined(DART_PRECOMPILED_RUNTIME)
8505static int CompareClusters(SerializationCluster* const* a,
8506 SerializationCluster* const* b) {
8507 if ((*a)->size() > (*b)->size()) {
8508 return -1;
8509 } else if ((*a)->size() < (*b)->size()) {
8510 return 1;
8511 } else {
8512 return 0;
8513 }
8514}
8515
8516#define CID_CLUSTER(Type) \
8517 reinterpret_cast<Type##SerializationCluster*>(clusters_by_cid_[k##Type##Cid])
8518
8519const CompressedStackMaps& SerializationRoots::canonicalized_stack_map_entries()
8520 const {
8521 return CompressedStackMaps::Handle();
8522}
8523
8524ZoneGrowableArray<Object*>* Serializer::Serialize(SerializationRoots* roots) {
8525 // While object_currently_writing_ is initialized to the artificial root, we
8526 // set up a scope to ensure proper flushing to the profile.
8527 Serializer::WritingObjectScope scope(
8528 this, V8SnapshotProfileWriter::kArtificialRootId);
8529 roots->AddBaseObjects(this);
8530
8531 NoSafepointScope no_safepoint;
8532
8533 roots->PushRoots(this);
8534
8535 // Resolving WeakSerializationReferences and WeakProperties may cause new
8536 // objects to be pushed on the stack, and handling the changes to the stack
8537 // may cause the targets of WeakSerializationReferences and keys of
8538 // WeakProperties to become reachable, so we do this as a fixed point
8539 // computation. Note that reachability is computed monotonically (an object
8540 // can change from not reachable to reachable, but never the reverse), which
8541 // is technically a conservative approximation for WSRs, but doing a strict
8542 // analysis that allows non-monotonic reachability may not halt.
8543 //
8544 // To see this, take a WSR whose replacement causes the target of another WSR
8545 // to become reachable, which then causes the target of the first WSR to
8546 // become reachable, but the only way to reach the target is through the
8547 // target of the second WSR, which was only reachable via the replacement
8548 // the first.
8549 //
8550 // In practice, this case doesn't come up as replacements tend to be either
8551 // null, smis, or singleton objects that do not contain WSRs currently.
8552 while (stack_.length() > 0) {
8553 // Strong references.
8554 while (stack_.length() > 0) {
8555 StackEntry entry = stack_.RemoveLast();
8556 Trace(entry.obj, entry.cid_override);
8557 }
8558
8559 // Ephemeron references.
8560#if defined(DART_PRECOMPILER)
8561 if (auto const cluster = CID_CLUSTER(WeakSerializationReference)) {
8562 cluster->RetraceEphemerons(this);
8563 }
8564#endif
8565 if (auto const cluster = CID_CLUSTER(WeakProperty)) {
8566 cluster->RetraceEphemerons(this);
8567 }
8568 }
8569
8570#if defined(DART_PRECOMPILER)
8571 auto const wsr_cluster = CID_CLUSTER(WeakSerializationReference);
8572 if (wsr_cluster != nullptr) {
8573 // Now that we have computed the reachability fixpoint, we remove the
8574 // count of now-reachable WSRs as they are not actually serialized.
8575 num_written_objects_ -= wsr_cluster->Count(this);
8576 // We don't need to write this cluster, so remove it from consideration.
8577 clusters_by_cid_[kWeakSerializationReferenceCid] = nullptr;
8578 }
8579 ASSERT(clusters_by_cid_[kWeakSerializationReferenceCid] == nullptr);
8580#endif
8581
8582 code_cluster_ = CID_CLUSTER(Code);
8583
8584 GrowableArray<SerializationCluster*> clusters;
8585 // The order that PostLoad runs matters for some classes because of
8586 // assumptions during canonicalization, read filling, or post-load filling of
8587 // some classes about what has already been read and/or canonicalized.
8588 // Explicitly add these clusters first, then add the rest ordered by class id.
8589#define ADD_CANONICAL_NEXT(cid) \
8590 if (auto const cluster = canonical_clusters_by_cid_[cid]) { \
8591 clusters.Add(cluster); \
8592 canonical_clusters_by_cid_[cid] = nullptr; \
8593 }
8594#define ADD_NON_CANONICAL_NEXT(cid) \
8595 if (auto const cluster = clusters_by_cid_[cid]) { \
8596 clusters.Add(cluster); \
8597 clusters_by_cid_[cid] = nullptr; \
8598 }
8599 ADD_CANONICAL_NEXT(kOneByteStringCid)
8600 ADD_CANONICAL_NEXT(kTwoByteStringCid)
8601 ADD_CANONICAL_NEXT(kStringCid)
8602 ADD_CANONICAL_NEXT(kMintCid)
8603 ADD_CANONICAL_NEXT(kDoubleCid)
8604 ADD_CANONICAL_NEXT(kTypeParameterCid)
8605 ADD_CANONICAL_NEXT(kTypeCid)
8606 ADD_CANONICAL_NEXT(kTypeArgumentsCid)
8607 // Code cluster should be deserialized before Function as
8608 // FunctionDeserializationCluster::ReadFill uses instructions table
8609 // which is filled in CodeDeserializationCluster::ReadFill.
8610 // Code cluster should also precede ObjectPool as its ReadFill uses
8611 // entry points of stubs.
8612 ADD_NON_CANONICAL_NEXT(kCodeCid)
8613 // The function cluster should be deserialized before any closures, as
8614 // PostLoad for closures caches the entry point found in the function.
8615 ADD_NON_CANONICAL_NEXT(kFunctionCid)
8616 ADD_CANONICAL_NEXT(kClosureCid)
8617#undef ADD_CANONICAL_NEXT
8618#undef ADD_NON_CANONICAL_NEXT
8619 const intptr_t out_of_order_clusters = clusters.length();
8620 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8621 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8622 clusters.Add(cluster);
8623 }
8624 }
8625 for (intptr_t cid = 0; cid < num_cids_; cid++) {
8626 if (auto const cluster = clusters_by_cid_[cid]) {
8627 clusters.Add(clusters_by_cid_[cid]);
8628 }
8629 }
8630 // Put back any taken out temporarily to avoid re-adding them during the loop.
8631 for (intptr_t i = 0; i < out_of_order_clusters; i++) {
8632 const auto& cluster = clusters.At(i);
8633 const intptr_t cid = cluster->cid();
8634 auto const cid_clusters =
8635 cluster->is_canonical() ? canonical_clusters_by_cid_ : clusters_by_cid_;
8636 ASSERT(cid_clusters[cid] == nullptr);
8637 cid_clusters[cid] = cluster;
8638 }
8639
8640 PrepareInstructions(roots->canonicalized_stack_map_entries());
8641
8642 intptr_t num_objects = num_base_objects_ + num_written_objects_;
8643#if defined(ARCH_IS_64_BIT)
8644 if (!Utils::IsInt(32, num_objects)) {
8645 FATAL("Ref overflow");
8646 }
8647#endif
8648
8649 WriteUnsigned(num_base_objects_);
8650 WriteUnsigned(num_objects);
8651 WriteUnsigned(clusters.length());
8652 ASSERT((instructions_table_len_ == 0) || FLAG_precompiled_mode);
8653 WriteUnsigned(instructions_table_len_);
8654 WriteUnsigned(instructions_table_rodata_offset_);
8655
8656 for (SerializationCluster* cluster : clusters) {
8657 cluster->WriteAndMeasureAlloc(this);
8658 bytes_heap_allocated_ += cluster->target_memory_size();
8659#if defined(DEBUG)
8660 Write<int32_t>(next_ref_index_);
8661#endif
8662 }
8663
8664 // We should have assigned a ref to every object we pushed.
8665 ASSERT((next_ref_index_ - 1) == num_objects);
8666 // And recorded them all in [objects_].
8667 ASSERT(objects_->length() == num_objects);
8668
8669#if defined(DART_PRECOMPILER)
8670 if (profile_writer_ != nullptr && wsr_cluster != nullptr) {
8671 // Post-WriteAlloc, we eagerly create artificial nodes for any unreachable
8672 // targets in reachable WSRs if writing a v8 snapshot profile, since they
8673 // will be used in AttributeReference().
8674 //
8675 // Unreachable WSRs may also need artificial nodes, as they may be members
8676 // of other unreachable objects that have artificial nodes in the profile,
8677 // but they are instead lazily handled in CreateArtificialNodeIfNeeded().
8678 wsr_cluster->CreateArtificialTargetNodesIfNeeded(this);
8679 }
8680#endif
8681
8682 for (SerializationCluster* cluster : clusters) {
8683 cluster->WriteAndMeasureFill(this);
8684#if defined(DEBUG)
8685 Write<int32_t>(kSectionMarker);
8686#endif
8687 }
8688
8689 roots->WriteRoots(this);
8690
8691#if defined(DEBUG)
8692 Write<int32_t>(kSectionMarker);
8693#endif
8694
8695 PrintSnapshotSizes();
8696
8697 heap()->ResetObjectIdTable();
8698
8699 return objects_;
8700}
8701#endif // !defined(DART_PRECOMPILED_RUNTIME)
8702
8703#if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8704// The serialized format of the dispatch table is a sequence of variable-length
8705// integers (the built-in variable-length integer encoding/decoding of
8706// the stream). Each encoded integer e is interpreted thus:
8707// -kRecentCount .. -1 Pick value from the recent values buffer at index -1-e.
8708// 0 Empty (unused) entry.
8709// 1 .. kMaxRepeat Repeat previous entry e times.
8710// kIndexBase or higher Pick entry point from the object at index e-kIndexBase
8711// in the snapshot code cluster. Also put it in the recent
8712// values buffer at the next round-robin index.
8713
8714// Constants for serialization format. Chosen such that repeats and recent
8715// values are encoded as single bytes in SLEB128 encoding.
8716static constexpr intptr_t kDispatchTableSpecialEncodingBits = 6;
8717static constexpr intptr_t kDispatchTableRecentCount =
8718 1 << kDispatchTableSpecialEncodingBits;
8719static constexpr intptr_t kDispatchTableRecentMask =
8720 (1 << kDispatchTableSpecialEncodingBits) - 1;
8721static constexpr intptr_t kDispatchTableMaxRepeat =
8722 (1 << kDispatchTableSpecialEncodingBits) - 1;
8723static constexpr intptr_t kDispatchTableIndexBase = kDispatchTableMaxRepeat + 1;
8724#endif // defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME)
8725
8726void Serializer::WriteDispatchTable(const Array& entries) {
8727#if defined(DART_PRECOMPILER)
8728 if (kind() != Snapshot::kFullAOT) return;
8729
8730 // Create an artificial node to which the bytes should be attributed. We
8731 // don't attribute them to entries.ptr(), as we don't want to attribute the
8732 // bytes for printing out a length of 0 to Object::null() when the dispatch
8733 // table is empty.
8734 const intptr_t profile_ref = AssignArtificialRef();
8735 const auto& dispatch_table_profile_id = GetProfileId(profile_ref);
8736 if (profile_writer_ != nullptr) {
8737 profile_writer_->SetObjectTypeAndName(dispatch_table_profile_id,
8738 "DispatchTable", "dispatch_table");
8739 profile_writer_->AddRoot(dispatch_table_profile_id);
8740 }
8741 WritingObjectScope scope(this, dispatch_table_profile_id);
8742 if (profile_writer_ != nullptr) {
8743 // We'll write the Array object as a property of the artificial dispatch
8744 // table node, so Code objects otherwise unreferenced will have it as an
8745 // ancestor.
8746 CreateArtificialNodeIfNeeded(entries.ptr());
8747 AttributePropertyRef(entries.ptr(), "<code entries>");
8748 }
8749
8750 const intptr_t bytes_before = bytes_written();
8751 const intptr_t table_length = entries.IsNull() ? 0 : entries.Length();
8752
8753 ASSERT(table_length <= compiler::target::kWordMax);
8754 WriteUnsigned(table_length);
8755 if (table_length == 0) {
8756 dispatch_table_size_ = bytes_written() - bytes_before;
8757 return;
8758 }
8759
8760 ASSERT(code_cluster_ != nullptr);
8761 // If instructions can be deduped, the code order table in the deserializer
8762 // may not contain all Code objects in the snapshot. Thus, we write the ID
8763 // for the first code object here so we can retrieve it during deserialization
8764 // and calculate the snapshot ID for Code objects from the cluster index.
8765 //
8766 // We could just use the snapshot reference ID of the Code object itself
8767 // instead of the cluster index and avoid this. However, since entries are
8768 // SLEB128 encoded, the size delta for serializing the first ID once is less
8769 // than the size delta of serializing the ID plus kIndexBase for each entry,
8770 // even when Code objects are allocated before all other non-base objects.
8771 //
8772 // We could also map Code objects to the first Code object in the cluster with
8773 // the same entry point and serialize that ID instead, but that loses
8774 // information about which Code object was originally referenced.
8775 WriteUnsigned(code_cluster_->first_ref());
8776
8777 CodePtr previous_code = nullptr;
8778 CodePtr recent[kDispatchTableRecentCount] = {nullptr};
8779 intptr_t recent_index = 0;
8780 intptr_t repeat_count = 0;
8781 for (intptr_t i = 0; i < table_length; i++) {
8782 auto const code = Code::RawCast(entries.At(i));
8783 // First, see if we're repeating the previous entry (invalid, recent, or
8784 // encoded).
8785 if (code == previous_code) {
8786 if (++repeat_count == kDispatchTableMaxRepeat) {
8787 Write(kDispatchTableMaxRepeat);
8788 repeat_count = 0;
8789 }
8790 continue;
8791 }
8792 // Emit any outstanding repeat count before handling the new code value.
8793 if (repeat_count > 0) {
8794 Write(repeat_count);
8795 repeat_count = 0;
8796 }
8797 previous_code = code;
8798 // The invalid entry can be repeated, but is never part of the recent list
8799 // since it already encodes to a single byte..
8800 if (code == Code::null()) {
8801 Write(0);
8802 continue;
8803 }
8804 // Check against the recent entries, and write an encoded reference to
8805 // the recent entry if found.
8806 intptr_t found_index = 0;
8807 for (; found_index < kDispatchTableRecentCount; found_index++) {
8808 if (recent[found_index] == code) break;
8809 }
8810 if (found_index < kDispatchTableRecentCount) {
8811 Write(~found_index);
8812 continue;
8813 }
8814 // We have a non-repeated, non-recent entry, so encode the reference ID of
8815 // the code object and emit that.
8816 auto const code_index = GetCodeIndex(code);
8817 // Use the index in the code cluster, not in the snapshot..
8818 auto const encoded = kDispatchTableIndexBase + code_index;
8819 ASSERT(encoded <= compiler::target::kWordMax);
8820 Write(encoded);
8821 recent[recent_index] = code;
8822 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
8823 }
8824 if (repeat_count > 0) {
8825 Write(repeat_count);
8826 }
8827 dispatch_table_size_ = bytes_written() - bytes_before;
8828#endif // defined(DART_PRECOMPILER)
8829}
8830
8831void Serializer::PrintSnapshotSizes() {
8832#if !defined(DART_PRECOMPILED_RUNTIME)
8833 if (FLAG_print_snapshot_sizes_verbose) {
8834 TextBuffer buffer(1024);
8835 // Header, using format sizes matching those below to ensure alignment.
8836 buffer.Printf("%25s", "Cluster");
8837 buffer.Printf(" %6s", "Objs");
8838 buffer.Printf(" %8s", "Size");
8839 buffer.Printf(" %8s", "Fraction");
8840 buffer.Printf(" %10s", "Cumulative");
8841 buffer.Printf(" %8s", "HeapSize");
8842 buffer.Printf(" %5s", "Cid");
8843 buffer.Printf(" %9s", "Canonical");
8844 buffer.AddString("\n");
8845 GrowableArray<SerializationCluster*> clusters_by_size;
8846 for (intptr_t cid = 1; cid < num_cids_; cid++) {
8847 if (auto const cluster = canonical_clusters_by_cid_[cid]) {
8848 clusters_by_size.Add(cluster);
8849 }
8850 if (auto const cluster = clusters_by_cid_[cid]) {
8851 clusters_by_size.Add(cluster);
8852 }
8853 }
8854 intptr_t text_size = 0;
8855 if (image_writer_ != nullptr) {
8856 auto const text_object_count = image_writer_->GetTextObjectCount();
8857 text_size = image_writer_->text_size();
8858 intptr_t trampoline_count, trampoline_size;
8859 image_writer_->GetTrampolineInfo(&trampoline_count, &trampoline_size);
8860 auto const instructions_count = text_object_count - trampoline_count;
8861 auto const instructions_size = text_size - trampoline_size;
8862 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8863 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Instructions"),
8864 instructions_count, instructions_size));
8865 if (trampoline_size > 0) {
8866 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8867 ImageWriter::TagObjectTypeAsReadOnly(zone_, "Trampoline"),
8868 trampoline_count, trampoline_size));
8869 }
8870 }
8871 // The dispatch_table_size_ will be 0 if the snapshot did not include a
8872 // dispatch table (i.e., the VM snapshot). For a precompiled isolate
8873 // snapshot, we always serialize at least _one_ byte for the DispatchTable.
8874 if (dispatch_table_size_ > 0) {
8875 const auto& dispatch_table_entries = Array::Handle(
8876 zone_,
8877 isolate_group()->object_store()->dispatch_table_code_entries());
8878 auto const entry_count =
8879 dispatch_table_entries.IsNull() ? 0 : dispatch_table_entries.Length();
8880 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8881 "DispatchTable", entry_count, dispatch_table_size_));
8882 }
8883 if (instructions_table_len_ > 0) {
8884 const intptr_t memory_size =
8885 compiler::target::InstructionsTable::InstanceSize() +
8886 compiler::target::Array::InstanceSize(instructions_table_len_);
8887 clusters_by_size.Add(new (zone_) FakeSerializationCluster(
8888 "InstructionsTable", instructions_table_len_, 0, memory_size));
8889 }
8890 clusters_by_size.Sort(CompareClusters);
8891 double total_size =
8892 static_cast<double>(bytes_written() + GetDataSize() + text_size);
8893 double cumulative_fraction = 0.0;
8894 for (intptr_t i = 0; i < clusters_by_size.length(); i++) {
8895 SerializationCluster* cluster = clusters_by_size[i];
8896 double fraction = static_cast<double>(cluster->size()) / total_size;
8897 cumulative_fraction += fraction;
8898 buffer.Printf("%25s", cluster->name());
8899 buffer.Printf(" %6" Pd "", cluster->num_objects());
8900 buffer.Printf(" %8" Pd "", cluster->size());
8901 buffer.Printf(" %1.6lf", fraction);
8902 buffer.Printf(" %1.8lf", cumulative_fraction);
8903 buffer.Printf(" %8" Pd "", cluster->target_memory_size());
8904 if (cluster->cid() != -1) {
8905 buffer.Printf(" %5" Pd "", cluster->cid());
8906 } else {
8907 buffer.Printf(" %5s", "");
8908 }
8909 if (cluster->is_canonical()) {
8910 buffer.Printf(" %9s", "canonical");
8911 } else {
8912 buffer.Printf(" %9s", "");
8913 }
8914 buffer.AddString("\n");
8915 }
8916 OS::PrintErr("%s", buffer.buffer());
8917 }
8918#endif // !defined(DART_PRECOMPILED_RUNTIME)
8919}
8920
8921Deserializer::Deserializer(Thread* thread,
8922 Snapshot::Kind kind,
8923 const uint8_t* buffer,
8924 intptr_t size,
8925 const uint8_t* data_buffer,
8926 const uint8_t* instructions_buffer,
8927 bool is_non_root_unit,
8928 intptr_t offset)
8929 : ThreadStackResource(thread),
8930 heap_(thread->isolate_group()->heap()),
8931 old_space_(heap_->old_space()),
8932 freelist_(old_space_->DataFreeList()),
8933 zone_(thread->zone()),
8934 kind_(kind),
8935 stream_(buffer, size),
8936 image_reader_(nullptr),
8937 refs_(nullptr),
8938 next_ref_index_(kFirstReference),
8939 clusters_(nullptr),
8940 is_non_root_unit_(is_non_root_unit),
8941 instructions_table_(InstructionsTable::Handle(thread->zone())) {
8942 if (Snapshot::IncludesCode(kind)) {
8943 ASSERT(instructions_buffer != nullptr);
8944 ASSERT(data_buffer != nullptr);
8945 image_reader_ = new (zone_) ImageReader(data_buffer, instructions_buffer);
8946 }
8947 stream_.SetPosition(offset);
8948}
8949
8950Deserializer::~Deserializer() {
8951 delete[] clusters_;
8952}
8953
8954DeserializationCluster* Deserializer::ReadCluster() {
8955 const uint32_t tags = Read<uint32_t>();
8956 const intptr_t cid = UntaggedObject::ClassIdTag::decode(tags);
8957 const bool is_canonical = UntaggedObject::CanonicalBit::decode(tags);
8958 const bool is_immutable = UntaggedObject::ImmutableBit::decode(tags);
8959 Zone* Z = zone_;
8960 if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
8961 return new (Z) InstanceDeserializationCluster(
8962 cid, is_canonical, is_immutable, !is_non_root_unit_);
8963 }
8964 if (IsTypedDataViewClassId(cid)) {
8965 ASSERT(!is_canonical);
8966 return new (Z) TypedDataViewDeserializationCluster(cid);
8967 }
8968 if (IsExternalTypedDataClassId(cid)) {
8969 ASSERT(!is_canonical);
8970 return new (Z) ExternalTypedDataDeserializationCluster(cid);
8971 }
8972 if (IsTypedDataClassId(cid)) {
8973 ASSERT(!is_canonical);
8974 return new (Z) TypedDataDeserializationCluster(cid);
8975 }
8976
8977#if !defined(DART_COMPRESSED_POINTERS)
8978 if (Snapshot::IncludesCode(kind_)) {
8979 switch (cid) {
8980 case kPcDescriptorsCid:
8981 case kCodeSourceMapCid:
8982 case kCompressedStackMapsCid:
8983 return new (Z)
8984 RODataDeserializationCluster(cid, is_canonical, !is_non_root_unit_);
8985 case kOneByteStringCid:
8986 case kTwoByteStringCid:
8987 case kStringCid:
8988 if (!is_non_root_unit_) {
8989 return new (Z) RODataDeserializationCluster(cid, is_canonical,
8990 !is_non_root_unit_);
8991 }
8992 break;
8993 }
8994 }
8995#endif
8996
8997 switch (cid) {
8998 case kClassCid:
8999 ASSERT(!is_canonical);
9000 return new (Z) ClassDeserializationCluster();
9001 case kTypeParametersCid:
9002 return new (Z) TypeParametersDeserializationCluster();
9003 case kTypeArgumentsCid:
9004 return new (Z)
9005 TypeArgumentsDeserializationCluster(is_canonical, !is_non_root_unit_);
9006 case kPatchClassCid:
9007 ASSERT(!is_canonical);
9008 return new (Z) PatchClassDeserializationCluster();
9009 case kFunctionCid:
9010 ASSERT(!is_canonical);
9011 return new (Z) FunctionDeserializationCluster();
9012 case kClosureDataCid:
9013 ASSERT(!is_canonical);
9014 return new (Z) ClosureDataDeserializationCluster();
9015 case kFfiTrampolineDataCid:
9016 ASSERT(!is_canonical);
9017 return new (Z) FfiTrampolineDataDeserializationCluster();
9018 case kFieldCid:
9019 ASSERT(!is_canonical);
9020 return new (Z) FieldDeserializationCluster();
9021 case kScriptCid:
9022 ASSERT(!is_canonical);
9023 return new (Z) ScriptDeserializationCluster();
9024 case kLibraryCid:
9025 ASSERT(!is_canonical);
9026 return new (Z) LibraryDeserializationCluster();
9027 case kNamespaceCid:
9028 ASSERT(!is_canonical);
9029 return new (Z) NamespaceDeserializationCluster();
9030#if !defined(DART_PRECOMPILED_RUNTIME)
9031 case kKernelProgramInfoCid:
9032 ASSERT(!is_canonical);
9033 return new (Z) KernelProgramInfoDeserializationCluster();
9034#endif // !DART_PRECOMPILED_RUNTIME
9035 case kCodeCid:
9036 ASSERT(!is_canonical);
9037 return new (Z) CodeDeserializationCluster();
9038 case kObjectPoolCid:
9039 ASSERT(!is_canonical);
9040 return new (Z) ObjectPoolDeserializationCluster();
9041 case kPcDescriptorsCid:
9042 ASSERT(!is_canonical);
9043 return new (Z) PcDescriptorsDeserializationCluster();
9044 case kCodeSourceMapCid:
9045 ASSERT(!is_canonical);
9046 return new (Z) CodeSourceMapDeserializationCluster();
9047 case kCompressedStackMapsCid:
9048 ASSERT(!is_canonical);
9049 return new (Z) CompressedStackMapsDeserializationCluster();
9050 case kExceptionHandlersCid:
9051 ASSERT(!is_canonical);
9052 return new (Z) ExceptionHandlersDeserializationCluster();
9053 case kContextCid:
9054 ASSERT(!is_canonical);
9055 return new (Z) ContextDeserializationCluster();
9056 case kContextScopeCid:
9057 ASSERT(!is_canonical);
9058 return new (Z) ContextScopeDeserializationCluster();
9059 case kUnlinkedCallCid:
9060 ASSERT(!is_canonical);
9061 return new (Z) UnlinkedCallDeserializationCluster();
9062 case kICDataCid:
9063 ASSERT(!is_canonical);
9064 return new (Z) ICDataDeserializationCluster();
9065 case kMegamorphicCacheCid:
9066 ASSERT(!is_canonical);
9067 return new (Z) MegamorphicCacheDeserializationCluster();
9068 case kSubtypeTestCacheCid:
9069 ASSERT(!is_canonical);
9070 return new (Z) SubtypeTestCacheDeserializationCluster();
9071 case kLoadingUnitCid:
9072 ASSERT(!is_canonical);
9073 return new (Z) LoadingUnitDeserializationCluster();
9074 case kLanguageErrorCid:
9075 ASSERT(!is_canonical);
9076 return new (Z) LanguageErrorDeserializationCluster();
9077 case kUnhandledExceptionCid:
9078 ASSERT(!is_canonical);
9079 return new (Z) UnhandledExceptionDeserializationCluster();
9080 case kLibraryPrefixCid:
9081 ASSERT(!is_canonical);
9082 return new (Z) LibraryPrefixDeserializationCluster();
9083 case kTypeCid:
9084 return new (Z)
9085 TypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9086 case kFunctionTypeCid:
9087 return new (Z)
9088 FunctionTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9089 case kRecordTypeCid:
9090 return new (Z)
9091 RecordTypeDeserializationCluster(is_canonical, !is_non_root_unit_);
9092 case kTypeParameterCid:
9093 return new (Z)
9094 TypeParameterDeserializationCluster(is_canonical, !is_non_root_unit_);
9095 case kClosureCid:
9096 return new (Z)
9097 ClosureDeserializationCluster(is_canonical, !is_non_root_unit_);
9098 case kMintCid:
9099 return new (Z)
9100 MintDeserializationCluster(is_canonical, !is_non_root_unit_);
9101 case kDoubleCid:
9102 return new (Z)
9103 DoubleDeserializationCluster(is_canonical, !is_non_root_unit_);
9104 case kInt32x4Cid:
9105 case kFloat32x4Cid:
9106 case kFloat64x2Cid:
9107 return new (Z)
9108 Simd128DeserializationCluster(cid, is_canonical, !is_non_root_unit_);
9109 case kGrowableObjectArrayCid:
9110 ASSERT(!is_canonical);
9111 return new (Z) GrowableObjectArrayDeserializationCluster();
9112 case kRecordCid:
9113 return new (Z)
9114 RecordDeserializationCluster(is_canonical, !is_non_root_unit_);
9115 case kStackTraceCid:
9116 ASSERT(!is_canonical);
9117 return new (Z) StackTraceDeserializationCluster();
9118 case kRegExpCid:
9119 ASSERT(!is_canonical);
9120 return new (Z) RegExpDeserializationCluster();
9121 case kWeakPropertyCid:
9122 ASSERT(!is_canonical);
9123 return new (Z) WeakPropertyDeserializationCluster();
9124 case kMapCid:
9125 // We do not have mutable hash maps in snapshots.
9126 UNREACHABLE();
9127 case kConstMapCid:
9128 return new (Z) MapDeserializationCluster(kConstMapCid, is_canonical,
9129 !is_non_root_unit_);
9130 case kSetCid:
9131 // We do not have mutable hash sets in snapshots.
9132 UNREACHABLE();
9133 case kConstSetCid:
9134 return new (Z) SetDeserializationCluster(kConstSetCid, is_canonical,
9135 !is_non_root_unit_);
9136 case kArrayCid:
9137 return new (Z) ArrayDeserializationCluster(kArrayCid, is_canonical,
9138 !is_non_root_unit_);
9139 case kImmutableArrayCid:
9140 return new (Z) ArrayDeserializationCluster(
9141 kImmutableArrayCid, is_canonical, !is_non_root_unit_);
9142 case kWeakArrayCid:
9143 return new (Z) WeakArrayDeserializationCluster();
9144 case kStringCid:
9145 return new (Z) StringDeserializationCluster(
9146 is_canonical,
9147 !is_non_root_unit_ && isolate_group() != Dart::vm_isolate_group());
9148#define CASE_FFI_CID(name) case kFfi##name##Cid:
9150#undef CASE_FFI_CID
9151 return new (Z) InstanceDeserializationCluster(
9152 cid, is_canonical, is_immutable, !is_non_root_unit_);
9153 case kDeltaEncodedTypedDataCid:
9154 return new (Z) DeltaEncodedTypedDataDeserializationCluster();
9155 default:
9156 break;
9157 }
9158 FATAL("No cluster defined for cid %" Pd, cid);
9159 return nullptr;
9160}
9161
9162void Deserializer::ReadDispatchTable(
9163 ReadStream* stream,
9164 bool deferred,
9165 const InstructionsTable& root_instruction_table,
9166 intptr_t deferred_code_start_index,
9167 intptr_t deferred_code_end_index) {
9168#if defined(DART_PRECOMPILED_RUNTIME)
9169 const uint8_t* table_snapshot_start = stream->AddressOfCurrentPosition();
9170 const intptr_t length = stream->ReadUnsigned();
9171 if (length == 0) return;
9172
9173 const intptr_t first_code_id = stream->ReadUnsigned();
9174 deferred_code_start_index -= first_code_id;
9175 deferred_code_end_index -= first_code_id;
9176
9177 auto const IG = isolate_group();
9178 auto code = IG->object_store()->dispatch_table_null_error_stub();
9179 ASSERT(code != Code::null());
9180 uword null_entry = Code::EntryPointOf(code);
9181
9182 DispatchTable* table;
9183 if (deferred) {
9184 table = IG->dispatch_table();
9185 ASSERT(table != nullptr && table->length() == length);
9186 } else {
9187 ASSERT(IG->dispatch_table() == nullptr);
9188 table = new DispatchTable(length);
9189 }
9190 auto const array = table->array();
9191 uword value = 0;
9192 uword recent[kDispatchTableRecentCount] = {0};
9193 intptr_t recent_index = 0;
9194 intptr_t repeat_count = 0;
9195 for (intptr_t i = 0; i < length; i++) {
9196 if (repeat_count > 0) {
9197 array[i] = value;
9198 repeat_count--;
9199 continue;
9200 }
9201 auto const encoded = stream->Read<intptr_t>();
9202 if (encoded == 0) {
9203 value = null_entry;
9204 } else if (encoded < 0) {
9205 intptr_t r = ~encoded;
9206 ASSERT(r < kDispatchTableRecentCount);
9207 value = recent[r];
9208 } else if (encoded <= kDispatchTableMaxRepeat) {
9209 repeat_count = encoded - 1;
9210 } else {
9211 const intptr_t code_index = encoded - kDispatchTableIndexBase;
9212 if (deferred) {
9213 const intptr_t code_id =
9214 CodeIndexToClusterIndex(root_instruction_table, code_index);
9215 if ((deferred_code_start_index <= code_id) &&
9216 (code_id < deferred_code_end_index)) {
9217 auto code = static_cast<CodePtr>(Ref(first_code_id + code_id));
9218 value = Code::EntryPointOf(code);
9219 } else {
9220 // Reuse old value from the dispatch table.
9221 value = array[i];
9222 }
9223 } else {
9224 value = GetEntryPointByCodeIndex(code_index);
9225 }
9226 recent[recent_index] = value;
9227 recent_index = (recent_index + 1) & kDispatchTableRecentMask;
9228 }
9229 array[i] = value;
9230 }
9231 ASSERT(repeat_count == 0);
9232
9233 if (!deferred) {
9234 IG->set_dispatch_table(table);
9235 intptr_t table_snapshot_size =
9236 stream->AddressOfCurrentPosition() - table_snapshot_start;
9237 IG->set_dispatch_table_snapshot(table_snapshot_start);
9238 IG->set_dispatch_table_snapshot_size(table_snapshot_size);
9239 }
9240#endif
9241}
9242
9243ApiErrorPtr Deserializer::VerifyImageAlignment() {
9244 if (image_reader_ != nullptr) {
9245 return image_reader_->VerifyAlignment();
9246 }
9247 return ApiError::null();
9248}
9249
9250char* SnapshotHeaderReader::VerifyVersionAndFeatures(
9251 IsolateGroup* isolate_group,
9252 intptr_t* offset) {
9253 char* error = VerifyVersion();
9254 if (error == nullptr) {
9255 error = VerifyFeatures(isolate_group);
9256 }
9257 if (error == nullptr) {
9258 *offset = stream_.Position();
9259 }
9260 return error;
9261}
9262
9263char* SnapshotHeaderReader::VerifyVersion() {
9264 // If the version string doesn't match, return an error.
9265 // Note: New things are allocated only if we're going to return an error.
9266
9267 const char* expected_version = Version::SnapshotString();
9268 ASSERT(expected_version != nullptr);
9269 const intptr_t version_len = strlen(expected_version);
9270 if (stream_.PendingBytes() < version_len) {
9271 const intptr_t kMessageBufferSize = 128;
9272 char message_buffer[kMessageBufferSize];
9273 Utils::SNPrint(message_buffer, kMessageBufferSize,
9274 "No full snapshot version found, expected '%s'",
9275 expected_version);
9276 return BuildError(message_buffer);
9277 }
9278
9279 const char* version =
9280 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9281 ASSERT(version != nullptr);
9282 if (strncmp(version, expected_version, version_len) != 0) {
9283 const intptr_t kMessageBufferSize = 256;
9284 char message_buffer[kMessageBufferSize];
9285 char* actual_version = Utils::StrNDup(version, version_len);
9286 Utils::SNPrint(message_buffer, kMessageBufferSize,
9287 "Wrong %s snapshot version, expected '%s' found '%s'",
9288 (Snapshot::IsFull(kind_)) ? "full" : "script",
9289 expected_version, actual_version);
9290 free(actual_version);
9291 return BuildError(message_buffer);
9292 }
9293 stream_.Advance(version_len);
9294
9295 return nullptr;
9296}
9297
9298char* SnapshotHeaderReader::VerifyFeatures(IsolateGroup* isolate_group) {
9299 const char* expected_features =
9300 Dart::FeaturesString(isolate_group, (isolate_group == nullptr), kind_);
9301 ASSERT(expected_features != nullptr);
9302 const intptr_t expected_len = strlen(expected_features);
9303
9304 const char* features = nullptr;
9305 intptr_t features_length = 0;
9306
9307 auto error = ReadFeatures(&features, &features_length);
9308 if (error != nullptr) {
9309 return error;
9310 }
9311
9312 if (features_length != expected_len ||
9313 (strncmp(features, expected_features, expected_len) != 0)) {
9314 const intptr_t kMessageBufferSize = 1024;
9315 char message_buffer[kMessageBufferSize];
9316 char* actual_features = Utils::StrNDup(
9317 features, features_length < 1024 ? features_length : 1024);
9318 Utils::SNPrint(message_buffer, kMessageBufferSize,
9319 "Snapshot not compatible with the current VM configuration: "
9320 "the snapshot requires '%s' but the VM has '%s'",
9321 actual_features, expected_features);
9322 free(const_cast<char*>(expected_features));
9323 free(actual_features);
9324 return BuildError(message_buffer);
9325 }
9326 free(const_cast<char*>(expected_features));
9327 return nullptr;
9328}
9329
9330char* SnapshotHeaderReader::ReadFeatures(const char** features,
9331 intptr_t* features_length) {
9332 const char* cursor =
9333 reinterpret_cast<const char*>(stream_.AddressOfCurrentPosition());
9334 const intptr_t length = Utils::StrNLen(cursor, stream_.PendingBytes());
9335 if (length == stream_.PendingBytes()) {
9336 return BuildError(
9337 "The features string in the snapshot was not '\\0'-terminated.");
9338 }
9339 *features = cursor;
9340 *features_length = length;
9341 stream_.Advance(length + 1);
9342 return nullptr;
9343}
9344
9345char* SnapshotHeaderReader::BuildError(const char* message) {
9346 return Utils::StrDup(message);
9347}
9348
9349ApiErrorPtr FullSnapshotReader::ConvertToApiError(char* message) {
9350 // This can also fail while bringing up the VM isolate, so make sure to
9351 // allocate the error message in old space.
9352 const String& msg = String::Handle(String::New(message, Heap::kOld));
9353
9354 // The [message] was constructed with [BuildError] and needs to be freed.
9355 free(message);
9356
9357 return ApiError::New(msg, Heap::kOld);
9358}
9359
9360void Deserializer::ReadInstructions(CodePtr code, bool deferred) {
9361#if defined(DART_PRECOMPILED_RUNTIME)
9362 if (deferred) {
9363 uword entry_point = StubCode::NotLoaded().EntryPoint();
9364 code->untag()->entry_point_ = entry_point;
9365 code->untag()->unchecked_entry_point_ = entry_point;
9366 code->untag()->monomorphic_entry_point_ = entry_point;
9367 code->untag()->monomorphic_unchecked_entry_point_ = entry_point;
9368 code->untag()->instructions_length_ = 0;
9369 return;
9370 }
9371
9372 const uword payload_start = instructions_table_.EntryPointAt(
9373 instructions_table_.rodata()->first_entry_with_code +
9374 instructions_index_);
9375 const uint32_t payload_info = ReadUnsigned();
9376 const uint32_t unchecked_offset = payload_info >> 1;
9377 const bool has_monomorphic_entrypoint = (payload_info & 0x1) == 0x1;
9378
9379 const uword entry_offset =
9380 has_monomorphic_entrypoint ? Instructions::kPolymorphicEntryOffsetAOT : 0;
9381 const uword monomorphic_entry_offset =
9382 has_monomorphic_entrypoint ? Instructions::kMonomorphicEntryOffsetAOT : 0;
9383
9384 const uword entry_point = payload_start + entry_offset;
9385 const uword monomorphic_entry_point =
9386 payload_start + monomorphic_entry_offset;
9387
9388 instructions_table_.SetCodeAt(instructions_index_++, code);
9389
9390 // There are no serialized RawInstructions objects in this mode.
9391 code->untag()->instructions_ = Instructions::null();
9392 code->untag()->entry_point_ = entry_point;
9393 code->untag()->unchecked_entry_point_ = entry_point + unchecked_offset;
9394 code->untag()->monomorphic_entry_point_ = monomorphic_entry_point;
9395 code->untag()->monomorphic_unchecked_entry_point_ =
9396 monomorphic_entry_point + unchecked_offset;
9397#else
9398 ASSERT(!deferred);
9399 InstructionsPtr instr = image_reader_->GetInstructionsAt(Read<uint32_t>());
9400 uint32_t unchecked_offset = ReadUnsigned();
9401 code->untag()->instructions_ = instr;
9402 code->untag()->unchecked_offset_ = unchecked_offset;
9403 ASSERT(kind() == Snapshot::kFullJIT);
9404 const uint32_t active_offset = Read<uint32_t>();
9405 instr = image_reader_->GetInstructionsAt(active_offset);
9406 unchecked_offset = ReadUnsigned();
9407 code->untag()->active_instructions_ = instr;
9408 Code::InitializeCachedEntryPointsFrom(code, instr, unchecked_offset);
9409#endif // defined(DART_PRECOMPILED_RUNTIME)
9410}
9411
9412void Deserializer::EndInstructions() {
9413#if defined(DART_PRECOMPILED_RUNTIME)
9414 if (instructions_table_.IsNull()) {
9415 ASSERT(instructions_index_ == 0);
9416 return;
9417 }
9418
9419 const auto& code_objects =
9420 Array::Handle(instructions_table_.ptr()->untag()->code_objects());
9421 ASSERT(code_objects.Length() == instructions_index_);
9422
9423 uword previous_end = image_reader_->GetBareInstructionsEnd();
9424 for (intptr_t i = instructions_index_ - 1; i >= 0; --i) {
9425 CodePtr code = Code::RawCast(code_objects.At(i));
9426 uword start = Code::PayloadStartOf(code);
9427 ASSERT(start <= previous_end);
9428 code->untag()->instructions_length_ = previous_end - start;
9429 previous_end = start;
9430 }
9431
9432 ObjectStore* object_store = IsolateGroup::Current()->object_store();
9433 GrowableObjectArray& tables =
9434 GrowableObjectArray::Handle(zone_, object_store->instructions_tables());
9435 if (tables.IsNull()) {
9436 tables = GrowableObjectArray::New(Heap::kOld);
9437 object_store->set_instructions_tables(tables);
9438 }
9439 if ((tables.Length() == 0) ||
9440 (tables.At(tables.Length() - 1) != instructions_table_.ptr())) {
9441 ASSERT((!is_non_root_unit_ && tables.Length() == 0) ||
9442 (is_non_root_unit_ && tables.Length() > 0));
9443 tables.Add(instructions_table_, Heap::kOld);
9444 }
9445#endif
9446}
9447
9448ObjectPtr Deserializer::GetObjectAt(uint32_t offset) const {
9449 return image_reader_->GetObjectAt(offset);
9450}
9451
9452class HeapLocker : public StackResource {
9453 public:
9454 HeapLocker(Thread* thread, PageSpace* page_space)
9455 : StackResource(thread),
9456 page_space_(page_space),
9457 freelist_(page_space->DataFreeList()) {
9458 page_space_->AcquireLock(freelist_);
9459 }
9460 ~HeapLocker() { page_space_->ReleaseLock(freelist_); }
9461
9462 private:
9463 PageSpace* page_space_;
9464 FreeList* freelist_;
9465};
9466
9467void Deserializer::Deserialize(DeserializationRoots* roots) {
9468 const void* clustered_start = AddressOfCurrentPosition();
9469
9470 Array& refs = Array::Handle(zone_);
9471 num_base_objects_ = ReadUnsigned();
9472 num_objects_ = ReadUnsigned();
9473 num_clusters_ = ReadUnsigned();
9474 const intptr_t instructions_table_len = ReadUnsigned();
9475 const uint32_t instruction_table_data_offset = ReadUnsigned();
9476 USE(instruction_table_data_offset);
9477
9478 clusters_ = new DeserializationCluster*[num_clusters_];
9479 refs = Array::New(num_objects_ + kFirstReference, Heap::kOld);
9480
9481#if defined(DART_PRECOMPILED_RUNTIME)
9482 if (instructions_table_len > 0) {
9483 ASSERT(FLAG_precompiled_mode);
9484 const uword start_pc = image_reader_->GetBareInstructionsAt(0);
9485 const uword end_pc = image_reader_->GetBareInstructionsEnd();
9486 uword instruction_table_data = 0;
9487 if (instruction_table_data_offset != 0) {
9488 // NoSafepointScope to satisfy assertion in DataStart. InstructionsTable
9489 // data resides in RO memory and is immovable and immortal making it
9490 // safe to use DataStart result outside of NoSafepointScope.
9491 NoSafepointScope no_safepoint;
9492 instruction_table_data = reinterpret_cast<uword>(
9493 OneByteString::DataStart(String::Handle(static_cast<StringPtr>(
9494 image_reader_->GetObjectAt(instruction_table_data_offset)))));
9495 }
9496 instructions_table_ = InstructionsTable::New(
9497 instructions_table_len, start_pc, end_pc, instruction_table_data);
9498 }
9499#else
9500 ASSERT(instructions_table_len == 0);
9501#endif // defined(DART_PRECOMPILED_RUNTIME)
9502
9503 {
9504 // The deserializer initializes objects without using the write barrier,
9505 // partly for speed since we know all the deserialized objects will be
9506 // long-lived and partly because the target objects can be not yet
9507 // initialized at the time of the write. To make this safe, we must ensure
9508 // there are no other threads mutating this heap, and that incremental
9509 // marking is not in progress. This is normally the case anyway for the
9510 // main snapshot being deserialized at isolate load, but needs checks for
9511 // loading secondary snapshots are part of deferred loading.
9512 HeapIterationScope iter(thread());
9513 // For bump-pointer allocation in old-space.
9514 HeapLocker hl(thread(), heap_->old_space());
9515 // Must not perform any other type of allocation, which might trigger GC
9516 // while there are still uninitialized objects.
9517 NoSafepointScope no_safepoint;
9518 refs_ = refs.ptr();
9519
9520 roots->AddBaseObjects(this);
9521
9522 if (num_base_objects_ != (next_ref_index_ - kFirstReference)) {
9523 FATAL("Snapshot expects %" Pd
9524 " base objects, but deserializer provided %" Pd,
9525 num_base_objects_, next_ref_index_ - kFirstReference);
9526 }
9527
9528 {
9529 TIMELINE_DURATION(thread(), Isolate, "ReadAlloc");
9530 for (intptr_t i = 0; i < num_clusters_; i++) {
9531 clusters_[i] = ReadCluster();
9532 clusters_[i]->ReadAlloc(this);
9533#if defined(DEBUG)
9534 intptr_t serializers_next_ref_index_ = Read<int32_t>();
9535 ASSERT_EQUAL(serializers_next_ref_index_, next_ref_index_);
9536#endif
9537 }
9538 }
9539
9540 // We should have completely filled the ref array.
9541 ASSERT_EQUAL(next_ref_index_ - kFirstReference, num_objects_);
9542
9543 {
9544 TIMELINE_DURATION(thread(), Isolate, "ReadFill");
9545 for (intptr_t i = 0; i < num_clusters_; i++) {
9546 clusters_[i]->ReadFill(this);
9547#if defined(DEBUG)
9548 int32_t section_marker = Read<int32_t>();
9549 ASSERT(section_marker == kSectionMarker);
9550#endif
9551 }
9552 }
9553
9554 roots->ReadRoots(this);
9555
9556#if defined(DEBUG)
9557 int32_t section_marker = Read<int32_t>();
9558 ASSERT(section_marker == kSectionMarker);
9559#endif
9560
9561 refs_ = nullptr;
9562 }
9563
9564 roots->PostLoad(this, refs);
9565
9566 auto isolate_group = thread()->isolate_group();
9567#if defined(DEBUG)
9568 isolate_group->ValidateClassTable();
9569 if (isolate_group != Dart::vm_isolate()->group()) {
9570 isolate_group->heap()->Verify("Deserializer::Deserialize");
9571 }
9572#endif
9573
9574 {
9575 TIMELINE_DURATION(thread(), Isolate, "PostLoad");
9576 for (intptr_t i = 0; i < num_clusters_; i++) {
9577 clusters_[i]->PostLoad(this, refs);
9578 }
9579 }
9580
9581 if (isolate_group->snapshot_is_dontneed_safe()) {
9582 size_t clustered_length =
9583 reinterpret_cast<uword>(AddressOfCurrentPosition()) -
9584 reinterpret_cast<uword>(clustered_start);
9585 VirtualMemory::DontNeed(const_cast<void*>(clustered_start),
9586 clustered_length);
9587 }
9588}
9589
9590#if !defined(DART_PRECOMPILED_RUNTIME)
9591FullSnapshotWriter::FullSnapshotWriter(
9592 Snapshot::Kind kind,
9593 NonStreamingWriteStream* vm_snapshot_data,
9594 NonStreamingWriteStream* isolate_snapshot_data,
9595 ImageWriter* vm_image_writer,
9596 ImageWriter* isolate_image_writer)
9597 : thread_(Thread::Current()),
9598 kind_(kind),
9599 vm_snapshot_data_(vm_snapshot_data),
9600 isolate_snapshot_data_(isolate_snapshot_data),
9601 vm_isolate_snapshot_size_(0),
9602 isolate_snapshot_size_(0),
9603 vm_image_writer_(vm_image_writer),
9604 isolate_image_writer_(isolate_image_writer) {
9605 ASSERT(isolate_group() != nullptr);
9606 ASSERT(heap() != nullptr);
9607 ObjectStore* object_store = isolate_group()->object_store();
9608 ASSERT(object_store != nullptr);
9609
9610#if defined(DEBUG)
9611 isolate_group()->ValidateClassTable();
9612#endif // DEBUG
9613
9614#if defined(DART_PRECOMPILER)
9615 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9616 profile_writer_ = new (zone()) V8SnapshotProfileWriter(zone());
9617 }
9618#endif
9619}
9620
9621FullSnapshotWriter::~FullSnapshotWriter() {}
9622
9623ZoneGrowableArray<Object*>* FullSnapshotWriter::WriteVMSnapshot() {
9624 TIMELINE_DURATION(thread(), Isolate, "WriteVMSnapshot");
9625
9626 ASSERT(vm_snapshot_data_ != nullptr);
9627 Serializer serializer(thread(), kind_, vm_snapshot_data_, vm_image_writer_,
9628 /*vm=*/true, profile_writer_);
9629
9630 serializer.ReserveHeader();
9631 serializer.WriteVersionAndFeatures(true);
9632 VMSerializationRoots roots(
9633 WeakArray::Handle(
9634 Dart::vm_isolate_group()->object_store()->symbol_table()),
9635 /*should_write_symbols=*/!Snapshot::IncludesStringsInROData(kind_));
9636 ZoneGrowableArray<Object*>* objects = serializer.Serialize(&roots);
9637 serializer.FillHeader(serializer.kind());
9638 clustered_vm_size_ = serializer.bytes_written();
9639 heap_vm_size_ = serializer.bytes_heap_allocated();
9640
9641 if (Snapshot::IncludesCode(kind_)) {
9642 vm_image_writer_->SetProfileWriter(profile_writer_);
9643 vm_image_writer_->Write(serializer.stream(), true);
9644 mapped_data_size_ += vm_image_writer_->data_size();
9645 mapped_text_size_ += vm_image_writer_->text_size();
9646 vm_image_writer_->ResetOffsets();
9647 vm_image_writer_->ClearProfileWriter();
9648 }
9649
9650 // The clustered part + the direct mapped data part.
9651 vm_isolate_snapshot_size_ = serializer.bytes_written();
9652 return objects;
9653}
9654
9655void FullSnapshotWriter::WriteProgramSnapshot(
9656 ZoneGrowableArray<Object*>* objects,
9657 GrowableArray<LoadingUnitSerializationData*>* units) {
9658 TIMELINE_DURATION(thread(), Isolate, "WriteProgramSnapshot");
9659
9660 ASSERT(isolate_snapshot_data_ != nullptr);
9661 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9662 isolate_image_writer_, /*vm=*/false, profile_writer_);
9663 serializer.set_loading_units(units);
9664 serializer.set_current_loading_unit_id(LoadingUnit::kRootId);
9665 ObjectStore* object_store = isolate_group()->object_store();
9666 ASSERT(object_store != nullptr);
9667
9668 // These type arguments must always be retained.
9669 ASSERT(object_store->type_argument_int()->untag()->IsCanonical());
9670 ASSERT(object_store->type_argument_double()->untag()->IsCanonical());
9671 ASSERT(object_store->type_argument_string()->untag()->IsCanonical());
9672 ASSERT(object_store->type_argument_string_dynamic()->untag()->IsCanonical());
9673 ASSERT(object_store->type_argument_string_string()->untag()->IsCanonical());
9674
9675 serializer.ReserveHeader();
9676 serializer.WriteVersionAndFeatures(false);
9677 ProgramSerializationRoots roots(objects, object_store, kind_);
9678 objects = serializer.Serialize(&roots);
9679 if (units != nullptr) {
9680 (*units)[LoadingUnit::kRootId]->set_objects(objects);
9681 }
9682 serializer.FillHeader(serializer.kind());
9683 clustered_isolate_size_ = serializer.bytes_written();
9684 heap_isolate_size_ = serializer.bytes_heap_allocated();
9685
9686 if (Snapshot::IncludesCode(kind_)) {
9687 isolate_image_writer_->SetProfileWriter(profile_writer_);
9688 isolate_image_writer_->Write(serializer.stream(), false);
9689#if defined(DART_PRECOMPILER)
9690 isolate_image_writer_->DumpStatistics();
9691#endif
9692
9693 mapped_data_size_ += isolate_image_writer_->data_size();
9694 mapped_text_size_ += isolate_image_writer_->text_size();
9695 isolate_image_writer_->ResetOffsets();
9696 isolate_image_writer_->ClearProfileWriter();
9697 }
9698
9699 // The clustered part + the direct mapped data part.
9700 isolate_snapshot_size_ = serializer.bytes_written();
9701}
9702
9703void FullSnapshotWriter::WriteUnitSnapshot(
9704 GrowableArray<LoadingUnitSerializationData*>* units,
9705 LoadingUnitSerializationData* unit,
9706 uint32_t program_hash) {
9707 TIMELINE_DURATION(thread(), Isolate, "WriteUnitSnapshot");
9708
9709 Serializer serializer(thread(), kind_, isolate_snapshot_data_,
9710 isolate_image_writer_, /*vm=*/false, profile_writer_);
9711 serializer.set_loading_units(units);
9712 serializer.set_current_loading_unit_id(unit->id());
9713
9714 serializer.ReserveHeader();
9715 serializer.WriteVersionAndFeatures(false);
9716 serializer.Write(program_hash);
9717
9718 UnitSerializationRoots roots(unit);
9719 unit->set_objects(serializer.Serialize(&roots));
9720
9721 serializer.FillHeader(serializer.kind());
9722 clustered_isolate_size_ = serializer.bytes_written();
9723
9724 if (Snapshot::IncludesCode(kind_)) {
9725 isolate_image_writer_->SetProfileWriter(profile_writer_);
9726 isolate_image_writer_->Write(serializer.stream(), false);
9727#if defined(DART_PRECOMPILER)
9728 isolate_image_writer_->DumpStatistics();
9729#endif
9730
9731 mapped_data_size_ += isolate_image_writer_->data_size();
9732 mapped_text_size_ += isolate_image_writer_->text_size();
9733 isolate_image_writer_->ResetOffsets();
9734 isolate_image_writer_->ClearProfileWriter();
9735 }
9736
9737 // The clustered part + the direct mapped data part.
9738 isolate_snapshot_size_ = serializer.bytes_written();
9739}
9740
9741void FullSnapshotWriter::WriteFullSnapshot(
9742 GrowableArray<LoadingUnitSerializationData*>* data) {
9743 ZoneGrowableArray<Object*>* objects;
9744 if (vm_snapshot_data_ != nullptr) {
9745 objects = WriteVMSnapshot();
9746 } else {
9747 objects = nullptr;
9748 }
9749
9750 if (isolate_snapshot_data_ != nullptr) {
9751 WriteProgramSnapshot(objects, data);
9752 }
9753
9754 if (FLAG_print_snapshot_sizes) {
9755 OS::Print("VMIsolate(CodeSize): %" Pd "\n", clustered_vm_size_);
9756 OS::Print("Isolate(CodeSize): %" Pd "\n", clustered_isolate_size_);
9757 OS::Print("ReadOnlyData(CodeSize): %" Pd "\n", mapped_data_size_);
9758 OS::Print("Instructions(CodeSize): %" Pd "\n", mapped_text_size_);
9759 OS::Print("Total(CodeSize): %" Pd "\n",
9760 clustered_vm_size_ + clustered_isolate_size_ + mapped_data_size_ +
9761 mapped_text_size_);
9762 OS::Print("VMIsolate(HeapSize): %" Pd "\n", heap_vm_size_);
9763 OS::Print("Isolate(HeapSize): %" Pd "\n", heap_isolate_size_);
9764 OS::Print("Total(HeapSize): %" Pd "\n", heap_vm_size_ + heap_isolate_size_);
9765 }
9766
9767#if defined(DART_PRECOMPILER)
9768 if (FLAG_write_v8_snapshot_profile_to != nullptr) {
9769 profile_writer_->Write(FLAG_write_v8_snapshot_profile_to);
9770 }
9771#endif
9772}
9773#endif // defined(DART_PRECOMPILED_RUNTIME)
9774
9775FullSnapshotReader::FullSnapshotReader(const Snapshot* snapshot,
9776 const uint8_t* instructions_buffer,
9777 Thread* thread)
9778 : kind_(snapshot->kind()),
9779 thread_(thread),
9780 buffer_(snapshot->Addr()),
9781 size_(snapshot->length()),
9782 data_image_(snapshot->DataImage()),
9783 instructions_image_(instructions_buffer) {}
9784
9785char* SnapshotHeaderReader::InitializeGlobalVMFlagsFromSnapshot(
9786 const Snapshot* snapshot) {
9787 SnapshotHeaderReader header_reader(snapshot);
9788
9789 char* error = header_reader.VerifyVersion();
9790 if (error != nullptr) {
9791 return error;
9792 }
9793
9794 const char* features = nullptr;
9795 intptr_t features_length = 0;
9796 error = header_reader.ReadFeatures(&features, &features_length);
9797 if (error != nullptr) {
9798 return error;
9799 }
9800
9801 ASSERT(features[features_length] == '\0');
9802 const char* cursor = features;
9803 while (*cursor != '\0') {
9804 while (*cursor == ' ') {
9805 cursor++;
9806 }
9807
9808 const char* end = strstr(cursor, " ");
9809 if (end == nullptr) {
9810 end = features + features_length;
9811 }
9812
9813#define SET_FLAG(name) \
9814 if (strncmp(cursor, #name, end - cursor) == 0) { \
9815 FLAG_##name = true; \
9816 cursor = end; \
9817 continue; \
9818 } \
9819 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9820 FLAG_##name = false; \
9821 cursor = end; \
9822 continue; \
9823 }
9824
9825#define CHECK_FLAG(name, mode) \
9826 if (strncmp(cursor, #name, end - cursor) == 0) { \
9827 if (!FLAG_##name) { \
9828 return header_reader.BuildError("Flag " #name \
9829 " is true in snapshot, " \
9830 "but " #name \
9831 " is always false in " mode); \
9832 } \
9833 cursor = end; \
9834 continue; \
9835 } \
9836 if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
9837 if (FLAG_##name) { \
9838 return header_reader.BuildError("Flag " #name \
9839 " is false in snapshot, " \
9840 "but " #name \
9841 " is always true in " mode); \
9842 } \
9843 cursor = end; \
9844 continue; \
9845 }
9846
9847#define SET_P(name, T, DV, C) SET_FLAG(name)
9848
9849#if defined(PRODUCT)
9850#define SET_OR_CHECK_R(name, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9851#else
9852#define SET_OR_CHECK_R(name, PV, T, DV, C) SET_FLAG(name)
9853#endif
9854
9855#if defined(PRODUCT)
9856#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) CHECK_FLAG(name, "product mode")
9857#elif defined(DART_PRECOMPILED_RUNTIME)
9858#define SET_OR_CHECK_C(name, PCV, PV, T, DV, C) \
9859 CHECK_FLAG(name, "the precompiled runtime")
9860#else
9861#define SET_OR_CHECK_C(name, PV, T, DV, C) SET_FLAG(name)
9862#endif
9863
9864#if !defined(DEBUG)
9865#define SET_OR_CHECK_D(name, T, DV, C) CHECK_FLAG(name, "non-debug mode")
9866#else
9867#define SET_OR_CHECK_D(name, T, DV, C) SET_FLAG(name)
9868#endif
9869
9871
9872#undef SET_OR_CHECK_D
9873#undef SET_OR_CHECK_C
9874#undef SET_OR_CHECK_R
9875#undef SET_P
9876#undef CHECK_FLAG
9877#undef SET_FLAG
9878
9879 cursor = end;
9880 }
9881
9882 return nullptr;
9883}
9884
9885ApiErrorPtr FullSnapshotReader::ReadVMSnapshot() {
9886 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9887
9888 intptr_t offset = 0;
9889 char* error = header_reader.VerifyVersionAndFeatures(
9890 /*isolate_group=*/nullptr, &offset);
9891 if (error != nullptr) {
9892 return ConvertToApiError(error);
9893 }
9894
9895 // Even though there's no concurrent threads we have to guard agains, some
9896 // logic we do in deserialization triggers common code that asserts the
9897 // program lock is held.
9898 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9899
9900 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
9901 instructions_image_, /*is_non_root_unit=*/false,
9902 offset);
9903 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9904 if (api_error != ApiError::null()) {
9905 return api_error;
9906 }
9907
9908 if (Snapshot::IncludesCode(kind_)) {
9909 ASSERT(data_image_ != nullptr);
9910 thread_->isolate_group()->SetupImagePage(data_image_,
9911 /* is_executable */ false);
9912 ASSERT(instructions_image_ != nullptr);
9913 thread_->isolate_group()->SetupImagePage(instructions_image_,
9914 /* is_executable */ true);
9915 }
9916
9917 VMDeserializationRoots roots;
9918 deserializer.Deserialize(&roots);
9919
9920#if defined(DART_PRECOMPILED_RUNTIME)
9921 // Initialize entries in the VM portion of the BSS segment.
9922 ASSERT(Snapshot::IncludesCode(kind_));
9923 Image image(instructions_image_);
9924 if (auto const bss = image.bss()) {
9925 BSS::Initialize(thread_, bss, /*vm=*/true);
9926 }
9927#endif // defined(DART_PRECOMPILED_RUNTIME)
9928
9929 return ApiError::null();
9930}
9931
9932ApiErrorPtr FullSnapshotReader::ReadProgramSnapshot() {
9933 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9934 intptr_t offset = 0;
9935 char* error =
9936 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
9937 if (error != nullptr) {
9938 return ConvertToApiError(error);
9939 }
9940
9941 // Even though there's no concurrent threads we have to guard agains, some
9942 // logic we do in deserialization triggers common code that asserts the
9943 // program lock is held.
9944 SafepointWriteRwLocker ml(thread_, isolate_group()->program_lock());
9945
9946 Deserializer deserializer(thread_, kind_, buffer_, size_, data_image_,
9947 instructions_image_, /*is_non_root_unit=*/false,
9948 offset);
9949 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9950 if (api_error != ApiError::null()) {
9951 return api_error;
9952 }
9953
9954 if (Snapshot::IncludesCode(kind_)) {
9955 ASSERT(data_image_ != nullptr);
9956 thread_->isolate_group()->SetupImagePage(data_image_,
9957 /* is_executable */ false);
9958 ASSERT(instructions_image_ != nullptr);
9959 thread_->isolate_group()->SetupImagePage(instructions_image_,
9960 /* is_executable */ true);
9961 }
9962
9963 ProgramDeserializationRoots roots(thread_->isolate_group()->object_store());
9964 deserializer.Deserialize(&roots);
9965
9966 if (Snapshot::IncludesCode(kind_)) {
9967 const auto& units = Array::Handle(
9968 thread_->isolate_group()->object_store()->loading_units());
9969 if (!units.IsNull()) {
9970 const auto& unit = LoadingUnit::Handle(
9971 LoadingUnit::RawCast(units.At(LoadingUnit::kRootId)));
9972 // Unlike other units, we don't explicitly load the root loading unit,
9973 // so we mark it as loaded here, setting the instructions image as well.
9974 unit.set_load_outstanding();
9975 unit.set_instructions_image(instructions_image_);
9976 unit.set_loaded(true);
9977 }
9978 }
9979
9980 InitializeBSS();
9981
9982 return ApiError::null();
9983}
9984
9985ApiErrorPtr FullSnapshotReader::ReadUnitSnapshot(const LoadingUnit& unit) {
9986 SnapshotHeaderReader header_reader(kind_, buffer_, size_);
9987 intptr_t offset = 0;
9988 char* error =
9989 header_reader.VerifyVersionAndFeatures(thread_->isolate_group(), &offset);
9990 if (error != nullptr) {
9991 return ConvertToApiError(error);
9992 }
9993
9994 Deserializer deserializer(
9995 thread_, kind_, buffer_, size_, data_image_, instructions_image_,
9996 /*is_non_root_unit=*/unit.id() != LoadingUnit::kRootId, offset);
9997 ApiErrorPtr api_error = deserializer.VerifyImageAlignment();
9998 if (api_error != ApiError::null()) {
9999 return api_error;
10000 }
10001 {
10002 Array& units =
10003 Array::Handle(isolate_group()->object_store()->loading_units());
10004 uint32_t main_program_hash = Smi::Value(Smi::RawCast(units.At(0)));
10005 uint32_t unit_program_hash = deserializer.Read<uint32_t>();
10006 if (main_program_hash != unit_program_hash) {
10007 return ApiError::New(String::Handle(
10008 String::New("Deferred loading unit is from a different "
10009 "program than the main loading unit")));
10010 }
10011 }
10012
10013 if (Snapshot::IncludesCode(kind_)) {
10014 ASSERT(data_image_ != nullptr);
10015 thread_->isolate_group()->SetupImagePage(data_image_,
10016 /* is_executable */ false);
10017 ASSERT(instructions_image_ != nullptr);
10018 thread_->isolate_group()->SetupImagePage(instructions_image_,
10019 /* is_executable */ true);
10020 unit.set_instructions_image(instructions_image_);
10021 }
10022
10023 UnitDeserializationRoots roots(unit);
10024 deserializer.Deserialize(&roots);
10025
10026 InitializeBSS();
10027
10028 return ApiError::null();
10029}
10030
10031void FullSnapshotReader::InitializeBSS() {
10032#if defined(DART_PRECOMPILED_RUNTIME)
10033 // Initialize entries in the isolate portion of the BSS segment.
10034 ASSERT(Snapshot::IncludesCode(kind_));
10035 Image image(instructions_image_);
10036 if (auto const bss = image.bss()) {
10037 BSS::Initialize(thread_, bss, /*vm=*/false);
10038 }
10039#endif // defined(DART_PRECOMPILED_RUNTIME)
10040}
10041
10042} // namespace dart
AutoreleasePool pool
Align
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
int count
static float prev(float f)
static size_t total_size(SkSBlockAllocator< N > &pool)
static bool skip(SkStream *stream, size_t amount)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
#define IG
#define SET_OR_CHECK_R(name, PV, T, DV, C)
#define SAVE_AND_RESET_ROOT(name, Type, init)
#define AutoTraceObject(obj)
#define PushFromTo(obj,...)
#define RESET_ROOT_LIST(V)
#define DECLARE_OBJECT_STORE_FIELD(Type, Name)
#define CID_CLUSTER(Type)
#define SET_P(name, T, DV, C)
#define ADD_CANONICAL_NEXT(cid)
#define CASE_FFI_CID(name)
#define WriteFromTo(obj,...)
#define SET_OR_CHECK_C(name, PV, T, DV, C)
#define AutoTraceObjectName(obj, str)
#define RESTORE_ROOT(name, Type, init)
#define SET_OR_CHECK_D(name, T, DV, C)
#define WriteCompressedField(obj, name)
#define ADD_NON_CANONICAL_NEXT(cid)
#define DECLARE_FIELD(name, Type, init)
#define WriteFieldValue(field, value)
#define WriteField(obj, field)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
#define CLASS_LIST_FFI_TYPE_MARKER(V)
Definition class_id.h:165
ObjectPtr Decompress(uword heap_base) const
#define THR_Print(format,...)
Definition log.h:20
const EmbeddedViewParams * params
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
VkInstance instance
Definition main.cc:48
sk_sp< SkImage > image
Definition examples.cpp:29
static bool b
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
if(end==-1)
glong glong end
static const uint8_t buffer[]
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
GAsyncResult * result
uint32_t * target
#define VM_GLOBAL_FLAG_LIST(P, R, C, D)
Definition flag_list.h:58
const char * name
Definition fuchsia.cc:50
size_t length
Win32Message message
SK_API bool Read(SkStreamSeekable *src, SkDocumentPage *dstArray, int dstArrayCount, const SkDeserialProcs *=nullptr)
const uint8_t * isolate_snapshot_data
const uint8_t * vm_snapshot_data
Definition main_impl.cc:56
InvalidClass kObjectAlignmentLog2
link(from_root, to_root)
Definition dart_pkg.py:44
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
static const char *const kObjectStoreFieldNames[]
static constexpr bool IsReachableReference(intptr_t ref)
DART_EXPORT bool IsNull(Dart_Handle object)
int32_t classid_t
Definition globals.h:524
@ kNumPredefinedCids
Definition class_id.h:257
static constexpr bool IsArtificialReference(intptr_t ref)
static constexpr bool IsAllocatedReference(intptr_t ref)
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
uintptr_t compressed_uword
Definition globals.h:44
static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(Serializer *s, intptr_t class_id)
bool ShouldHaveImmutabilityBitSetCid(intptr_t predefined_cid)
Definition class_id.h:507
bool IsInternalVMdefinedClassId(intptr_t index)
Definition class_id.h:549
const intptr_t cid
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static constexpr intptr_t kFirstReference
static DART_FORCE_INLINE CodePtr GetCodeAndEntryPointByIndex(const Deserializer *d, intptr_t code_index, uword *entry_point)
ArrayOfTuplesView< Code::SCallTableEntry, std::tuple< Smi, Object, Function > > StaticCallsTable
Definition object.h:13520
static int CompareClusters(SerializationCluster *const *a, SerializationCluster *const *b)
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
constexpr intptr_t kLastInternalOnlyCid
Definition class_id.h:289
bool IsStringClassId(intptr_t index)
Definition class_id.h:350
ObjectPtr CompressedObjectPtr
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot data
Definition switches.h:41
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
CanvasImage Image
Definition dart_ui.cc:55
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition switches.h:76
RefPtr< T > Ref(T *ptr)
Definition ref_ptr.h:237
std::function< void()> closure
Definition closure.h:14
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition SkVx.h:680
#define OBJECT_STORE_FIELD_LIST(R_, RW, ARW_RELAXED, ARW_AR, LAZY_CORE, LAZY_ASYNC, LAZY_ISOLATE, LAZY_INTERNAL, LAZY_FFI)
#define Pp
Definition globals.h:425
#define Px
Definition globals.h:410
#define Pd64
Definition globals.h:416
#define Pd
Definition globals.h:408
#define T
#define REUSABLE_FUNCTION_HANDLESCOPE(thread)
#define REUSABLE_OBJECT_HANDLESCOPE(thread)
#define REUSABLE_CODE_HANDLESCOPE(thread)
static const char header[]
Definition skpbench.cpp:88
Point offset
const uintptr_t id
#define TIMELINE_DURATION(thread, stream, name)
Definition timeline.h:39
#define NOT_IN_PRECOMPILED(code)
Definition globals.h:100

◆ AutoTraceObjectName

#define AutoTraceObjectName (   obj,
  str 
)     Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, str)

Definition at line 652 of file app_snapshot.cc.

◆ CASE_FFI_CID [1/2]

#define CASE_FFI_CID (   name)    case kFfi##name##Cid:

◆ CASE_FFI_CID [2/2]

#define CASE_FFI_CID (   name)    case kFfi##name##Cid:

◆ CHECK_FLAG

#define CHECK_FLAG (   name,
  mode 
)
Value:
if (strncmp(cursor, #name, end - cursor) == 0) { \
if (!FLAG_##name) { \
return header_reader.BuildError("Flag " #name \
" is true in snapshot, " \
"but " #name \
" is always false in " mode); \
} \
cursor = end; \
continue; \
} \
if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
if (FLAG_##name) { \
return header_reader.BuildError("Flag " #name \
" is false in snapshot, " \
"but " #name \
" is always true in " mode); \
} \
cursor = end; \
continue; \
}

◆ CID_CLUSTER

#define CID_CLUSTER (   Type)     reinterpret_cast<Type##SerializationCluster*>(clusters_by_cid_[k##Type##Cid])

Definition at line 8517 of file app_snapshot.cc.

◆ DECLARE_FIELD

#define DECLARE_FIELD (   name,
  Type,
  init 
)    Type& saved_##name##_ = Type::Handle();

Definition at line 7162 of file app_snapshot.cc.

◆ DECLARE_OBJECT_STORE_FIELD

#define DECLARE_OBJECT_STORE_FIELD (   Type,
  Name 
)    #Name,

◆ ONLY_IN_AOT [1/3]

#define ONLY_IN_AOT (   code)
Value:
if (snapshot_kind_ == Snapshot::kFullAOT) { \
code \
}

Definition at line 7161 of file app_snapshot.cc.

◆ ONLY_IN_AOT [2/3]

#define ONLY_IN_AOT (   code)
Value:
if (snapshot_kind_ == Snapshot::kFullAOT) { \
code \
}

Definition at line 7161 of file app_snapshot.cc.

◆ ONLY_IN_AOT [3/3]

#define ONLY_IN_AOT (   code)    code

Definition at line 7161 of file app_snapshot.cc.

◆ PushFromTo

#define PushFromTo (   obj,
  ... 
)    s->PushFromTo(obj, ##__VA_ARGS__);

Definition at line 659 of file app_snapshot.cc.

◆ RESET_ROOT_LIST

#define RESET_ROOT_LIST (   V)
Value:
V(symbol_table, WeakArray, HashTables::New<CanonicalStringSet>(4)) \
V(canonical_types, Array, HashTables::New<CanonicalTypeSet>(4)) \
V(canonical_function_types, Array, \
HashTables::New<CanonicalFunctionTypeSet>(4)) \
V(canonical_record_types, Array, HashTables::New<CanonicalRecordTypeSet>(4)) \
V(canonical_type_arguments, Array, \
HashTables::New<CanonicalTypeArgumentsSet>(4)) \
V(canonical_type_parameters, Array, \
HashTables::New<CanonicalTypeParameterSet>(4)) \
ONLY_IN_PRODUCT(ONLY_IN_AOT( \
V(closure_functions, GrowableObjectArray, GrowableObjectArray::null()))) \
ONLY_IN_AOT(V(closure_functions_table, Array, Array::null())) \
ONLY_IN_AOT(V(canonicalized_stack_map_entries, CompressedStackMaps, \
CompressedStackMaps::null()))
#define ONLY_IN_AOT(code)
#define V(name)
Definition raw_object.h:124

Definition at line 7039 of file app_snapshot.cc.

◆ RESTORE_ROOT

#define RESTORE_ROOT (   name,
  Type,
  init 
)     object_store_->set_##name(saved_##name##_);

◆ SAVE_AND_RESET_ROOT

#define SAVE_AND_RESET_ROOT (   name,
  Type,
  init 
)
Value:
do { \
saved_##name##_ = object_store->name(); \
object_store->set_##name(Type::Handle(init)); \
} while (0);

◆ SET_FLAG

#define SET_FLAG (   name)
Value:
if (strncmp(cursor, #name, end - cursor) == 0) { \
FLAG_##name = true; \
cursor = end; \
continue; \
} \
if (strncmp(cursor, "no-" #name, end - cursor) == 0) { \
FLAG_##name = false; \
cursor = end; \
continue; \
}

◆ SET_OR_CHECK_C

#define SET_OR_CHECK_C (   name,
  PV,
  T,
  DV,
  C 
)    SET_FLAG(name)

◆ SET_OR_CHECK_D

#define SET_OR_CHECK_D (   name,
  T,
  DV,
  C 
)    CHECK_FLAG(name, "non-debug mode")

◆ SET_OR_CHECK_R

#define SET_OR_CHECK_R (   name,
  PV,
  T,
  DV,
  C 
)    SET_FLAG(name)

◆ SET_P

#define SET_P (   name,
  T,
  DV,
  C 
)    SET_FLAG(name)

◆ WriteCompressedField

#define WriteCompressedField (   obj,
  name 
)     s->WritePropertyRef(obj->untag()->name(), #name "_")

Definition at line 662 of file app_snapshot.cc.

◆ WriteField

#define WriteField (   obj,
  field 
)    s->WritePropertyRef(obj->untag()->field, #field)

Definition at line 661 of file app_snapshot.cc.

◆ WriteFieldValue

#define WriteFieldValue (   field,
  value 
)    s->WritePropertyRef(value, #field);

Definition at line 655 of file app_snapshot.cc.

◆ WriteFromTo

#define WriteFromTo (   obj,
  ... 
)    s->WriteFromTo(obj, ##__VA_ARGS__);

Definition at line 657 of file app_snapshot.cc.