159 {
160 public:
161 IsolateGroupSource(const char* script_uri,
163 const uint8_t* snapshot_data,
164 const uint8_t* snapshot_instructions,
165 const uint8_t* kernel_buffer,
166 intptr_t kernel_buffer_size,
168 : script_uri(script_uri == nullptr ? nullptr : Utils::StrDup(script_uri)),
170 snapshot_data(snapshot_data),
171 snapshot_instructions(snapshot_instructions),
172 kernel_buffer(kernel_buffer),
173 kernel_buffer_size(kernel_buffer_size),
175 script_kernel_buffer(nullptr),
176 script_kernel_size(-1),
177 loaded_blobs_(nullptr),
178 num_blob_loads_(0) {}
179 ~IsolateGroupSource() {
180 free(script_uri);
182 }
183
184 void add_loaded_blob(Zone* zone_,
185 const ExternalTypedData& external_typed_data);
186
187
188
189 char* script_uri;
191 const uint8_t* snapshot_data;
192 const uint8_t* snapshot_instructions;
193 const uint8_t* kernel_buffer;
194 const intptr_t kernel_buffer_size;
196
197
198 const uint8_t* script_kernel_buffer;
199 intptr_t script_kernel_size;
200
201
202 ArrayPtr loaded_blobs_;
203 intptr_t num_blob_loads_;
204};
205
206
207class IdleTimeHandler : public ValueObject {
208 public:
209 IdleTimeHandler() {}
210
211
212
213 void InitializeWithHeap(Heap* heap);
214
215
216 bool ShouldCheckForIdle();
217
218
219 void UpdateStartIdleTime();
220
221
222 bool ShouldNotifyIdle(int64_t* expiry);
223
224
225
226 void NotifyIdle(int64_t deadline);
227
228
229 void NotifyIdleUsingDefaultDeadline();
230
231 private:
232 friend class DisableIdleTimerScope;
233
234 Mutex mutex_;
235 Heap* heap_ = nullptr;
236 intptr_t disabled_counter_ = 0;
237 int64_t idle_start_time_ = 0;
238};
239
240
241class DisableIdleTimerScope : public ValueObject {
242 public:
243 explicit DisableIdleTimerScope(IdleTimeHandler* handler);
244 ~DisableIdleTimerScope();
245
246 private:
247 IdleTimeHandler* handler_;
248};
249
250class MutatorThreadPool : public ThreadPool {
251 public:
252 MutatorThreadPool(IsolateGroup* isolate_group, intptr_t max_pool_size)
253 : ThreadPool(max_pool_size), isolate_group_(isolate_group) {}
254 virtual ~MutatorThreadPool() {}
255
256 protected:
257 virtual void OnEnterIdleLocked(MonitorLocker* ml);
258
259 private:
260 void NotifyIdle();
261
262 IsolateGroup* isolate_group_ = nullptr;
263};
264
265
266class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
267 public:
268 IsolateGroup(std::shared_ptr<IsolateGroupSource>
source,
269 void* embedder_data,
270 ObjectStore* object_store,
272 bool is_vm_isolate);
273 IsolateGroup(std::shared_ptr<IsolateGroupSource>
source,
274 void* embedder_data,
276 bool is_vm_isolate);
277 ~IsolateGroup();
278
279 void RehashConstants(Become* become);
280#if defined(DEBUG)
281 void ValidateClassTable();
282#endif
283
284 IsolateGroupSource*
source()
const {
return source_.get(); }
285 std::shared_ptr<IsolateGroupSource> shareable_source() const {
286 return source_;
287 }
288 bool is_vm_isolate() const { return is_vm_isolate_; }
289 void* embedder_data() const { return embedder_data_; }
290
291 bool initial_spawn_successful() { return initial_spawn_successful_; }
292 void set_initial_spawn_successful() { initial_spawn_successful_ = true; }
293
294 Heap* heap() const { return heap_.get(); }
295
296 BackgroundCompiler* background_compiler() const {
297#if defined(DART_PRECOMPILED_RUNTIME)
298 return nullptr;
299#else
300 return background_compiler_.get();
301#endif
302 }
303#if !defined(DART_PRECOMPILED_RUNTIME)
304 intptr_t optimization_counter_threshold() const {
305 if (IsSystemIsolateGroup(this)) {
307 }
308 return FLAG_optimization_counter_threshold;
309 }
310#endif
311
312#if !defined(PRODUCT)
313 GroupDebugger* debugger() const { return debugger_; }
314#endif
315
316 IdleTimeHandler* idle_time_handler() { return &idle_time_handler_; }
317
318
319 void RegisterIsolate(Isolate* isolate);
320 void UnregisterIsolate(Isolate* isolate);
321
322
323 bool UnregisterIsolateDecrementCount();
324
325 bool ContainsOnlyOneIsolate();
326
327 void RunWithLockedGroup(std::function<void()> fun);
328
329 void ScheduleInterrupts(uword interrupt_bits);
330
331 ThreadRegistry* thread_registry() const { return thread_registry_.get(); }
332 SafepointHandler* safepoint_handler() { return safepoint_handler_.get(); }
333
334 void CreateHeap(bool is_vm_isolate, bool is_service_or_kernel_isolate);
335 void SetupImagePage(const uint8_t* snapshot_buffer, bool is_executable);
336 void Shutdown();
337
338#define ISOLATE_METRIC_ACCESSOR(type, variable, name, unit) \
339 type* Get##variable##Metric() { return &metric_##variable##_; }
341#undef ISOLATE_METRIC_ACCESSOR
342
343#if !defined(PRODUCT)
344 void UpdateLastAllocationProfileAccumulatorResetTimestamp() {
345 last_allocationprofile_accumulator_reset_timestamp_ =
346 OS::GetCurrentTimeMillis();
347 }
348
349 int64_t last_allocationprofile_accumulator_reset_timestamp() const {
350 return last_allocationprofile_accumulator_reset_timestamp_;
351 }
352
353 void UpdateLastAllocationProfileGCTimestamp() {
354 last_allocationprofile_gc_timestamp_ = OS::GetCurrentTimeMillis();
355 }
356
357 int64_t last_allocationprofile_gc_timestamp() const {
358 return last_allocationprofile_gc_timestamp_;
359 }
360#endif
361
362 DispatchTable* dispatch_table() const { return dispatch_table_.get(); }
363 void set_dispatch_table(DispatchTable*
table) {
364 dispatch_table_.reset(
table);
365 }
366 const uint8_t* dispatch_table_snapshot() const {
367 return dispatch_table_snapshot_;
368 }
369 void set_dispatch_table_snapshot(const uint8_t* snapshot) {
370 dispatch_table_snapshot_ = snapshot;
371 }
372 intptr_t dispatch_table_snapshot_size() const {
373 return dispatch_table_snapshot_size_;
374 }
375 void set_dispatch_table_snapshot_size(intptr_t size) {
376 dispatch_table_snapshot_size_ =
size;
377 }
378
379 ClassTableAllocator* class_table_allocator() {
380 return &class_table_allocator_;
381 }
382
383 static intptr_t class_table_offset() {
385 return OFFSET_OF(IsolateGroup, class_table_);
386 }
387
388 ClassPtr* cached_class_table_table() {
389 return cached_class_table_table_.load();
390 }
391 void set_cached_class_table_table(ClassPtr* cached_class_table_table) {
392 cached_class_table_table_.store(cached_class_table_table);
393 }
394 static intptr_t cached_class_table_table_offset() {
396 kWordSize);
397 return OFFSET_OF(IsolateGroup, cached_class_table_table_);
398 }
399
400 void set_object_store(ObjectStore* object_store);
401 static intptr_t object_store_offset() {
403 return OFFSET_OF(IsolateGroup, object_store_);
404 }
405
406 void set_obfuscation_map(
const char** map) { obfuscation_map_ =
map; }
407 const char** obfuscation_map() const { return obfuscation_map_; }
408
409 Random* random() { return &random_; }
410
411 bool is_system_isolate_group() const { return is_system_isolate_group_; }
412
413
417
418#if defined(DART_PRECOMPILER)
419#define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_field)
420#else
421#define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_flag)
422#endif
423
424#if !defined(PRODUCT)
425#define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_field)
426#else
427#define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_flag)
428#endif
429
430#define FLAG_FOR_PRODUCT(from_field, from_flag) (from_field)
431
432#define DECLARE_GETTER(when, name, bitname, isolate_flag_name, flag_name) \
433 bool name() const { \
434 return FLAG_FOR_##when(bitname##Bit::decode(isolate_group_flags_), \
435 flag_name); \
436 }
438#undef FLAG_FOR_NONPRODUCT
439#undef FLAG_FOR_PRECOMPILER
440#undef FLAG_FOR_PRODUCT
441#undef DECLARE_GETTER
442
443 bool should_load_vmservice() const {
444 return ShouldLoadVmServiceBit::decode(isolate_group_flags_);
445 }
446 void set_should_load_vmservice(bool value) {
447 isolate_group_flags_ =
448 ShouldLoadVmServiceBit::update(value, isolate_group_flags_);
449 }
450
451 void set_asserts(bool value) {
452 isolate_group_flags_ =
453 EnableAssertsBit::update(value, isolate_group_flags_);
454 }
455
456 void set_branch_coverage(bool value) {
457 isolate_group_flags_ =
458 BranchCoverageBit::update(value, isolate_group_flags_);
459 }
460
461#if !defined(PRODUCT)
462#if !defined(DART_PRECOMPILED_RUNTIME)
463 bool HasAttemptedReload() const {
464 return HasAttemptedReloadBit::decode(isolate_group_flags_);
465 }
466 void SetHasAttemptedReload(bool value) {
467 isolate_group_flags_ =
468 HasAttemptedReloadBit::update(value, isolate_group_flags_);
469 }
470 void MaybeIncreaseReloadEveryNStackOverflowChecks();
471 intptr_t reload_every_n_stack_overflow_checks() const {
472 return reload_every_n_stack_overflow_checks_;
473 }
474#else
475 bool HasAttemptedReload() const { return false; }
476#endif
477#endif
478
479#if defined(PRODUCT)
480 void set_use_osr(
bool use_osr) {
ASSERT(!use_osr); }
481#else
482 void set_use_osr(bool use_osr) {
483 isolate_group_flags_ = UseOsrBit::update(use_osr, isolate_group_flags_);
484 }
485#endif
486
487
488
489
490 ClassTable* class_table() const { return class_table_; }
491
492
493
494
495
496
497 ClassTable* heap_walk_class_table() const { return heap_walk_class_table_; }
498
499 void CloneClassTableForReload();
500 void RestoreOriginalClassTable();
501 void DropOriginalClassTable();
502
503 StoreBuffer* store_buffer() const { return store_buffer_.get(); }
504 ObjectStore* object_store() const { return object_store_.get(); }
505 Mutex* symbols_mutex() { return &symbols_mutex_; }
506 Mutex* type_canonicalization_mutex() { return &type_canonicalization_mutex_; }
507 Mutex* type_arguments_canonicalization_mutex() {
508 return &type_arguments_canonicalization_mutex_;
509 }
510 Mutex* subtype_test_cache_mutex() { return &subtype_test_cache_mutex_; }
511 Mutex* megamorphic_table_mutex() { return &megamorphic_table_mutex_; }
512 Mutex* type_feedback_mutex() { return &type_feedback_mutex_; }
513 Mutex* patchable_call_mutex() { return &patchable_call_mutex_; }
514 Mutex* constant_canonicalization_mutex() {
515 return &constant_canonicalization_mutex_;
516 }
517 Mutex* kernel_data_lib_cache_mutex() { return &kernel_data_lib_cache_mutex_; }
518 Mutex* kernel_data_class_cache_mutex() {
519 return &kernel_data_class_cache_mutex_;
520 }
521 Mutex* kernel_constants_mutex() { return &kernel_constants_mutex_; }
522
523#if defined(DART_PRECOMPILED_RUNTIME)
524 Mutex* unlinked_call_map_mutex() { return &unlinked_call_map_mutex_; }
525#endif
526
527#if !defined(DART_PRECOMPILED_RUNTIME)
528 Mutex* initializer_functions_mutex() { return &initializer_functions_mutex_; }
529#endif
530
531 SafepointRwLock* program_lock() { return program_lock_.get(); }
532
533 static inline IsolateGroup* Current() {
534 Thread* thread = Thread::Current();
535 return thread == nullptr ? nullptr : thread->isolate_group();
536 }
537
538 void IncreaseMutatorCount(Isolate* mutator, bool is_nested_reenter);
539 void DecreaseMutatorCount(Isolate* mutator, bool is_nested_exit);
540 intptr_t MutatorCount() const {
541 MonitorLocker ml(active_mutators_monitor_.get());
542 return active_mutators_;
543 }
544
545 bool HasTagHandler() const { return library_tag_handler() != nullptr; }
547 const Object& arg1,
548 const Object& arg2);
550 return library_tag_handler_;
551 }
553 library_tag_handler_ = handler;
554 }
556 return deferred_load_handler_;
557 }
559 deferred_load_handler_ = handler;
560 }
561
562
563 void ReleaseStoreBuffers();
564 void FlushMarkingStacks();
565 void EnableIncrementalBarrier(MarkingStack* marking_stack,
566 MarkingStack* deferred_marking_stack);
567 void DisableIncrementalBarrier();
568
569 MarkingStack* marking_stack() const { return marking_stack_; }
570 MarkingStack* deferred_marking_stack() const {
571 return deferred_marking_stack_;
572 }
573
574
575
576
577
578
579
580
581
582 void ForEachIsolate(std::function<void(Isolate* isolate)> function,
583 bool at_safepoint = false);
584 Isolate* FirstIsolate() const;
585 Isolate* FirstIsolateLocked() const;
586
587
588
589
590
591
592
593
594
595
596
597 void RunWithStoppedMutatorsCallable(
598 Callable* single_current_mutator,
599 Callable* otherwise,
600 bool use_force_growth_in_otherwise = false);
601
602 template <typename T, typename S>
603 void RunWithStoppedMutators(
T single_current_mutator,
604 S otherwise,
605 bool use_force_growth_in_otherwise = false) {
606 LambdaCallable<T> single_callable(single_current_mutator);
607 LambdaCallable<S> otherwise_callable(otherwise);
608 RunWithStoppedMutatorsCallable(&single_callable, &otherwise_callable,
609 use_force_growth_in_otherwise);
610 }
611
612 template <typename T>
613 void RunWithStoppedMutators(
T function,
bool use_force_growth =
false) {
614 LambdaCallable<T> callable(function);
615 RunWithStoppedMutatorsCallable(&callable, &callable, use_force_growth);
616 }
617
618#ifndef PRODUCT
619 void PrintJSON(JSONStream* stream, bool ref = true);
620 void PrintToJSONObject(JSONObject* jsobj, bool ref);
621
622
623
624 void PrintMemoryUsageJSON(JSONStream* stream);
625#endif
626
627#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
628
629
630 bool ReloadSources(JSONStream* js,
631 bool force_reload,
632 const char* root_script_url = nullptr,
633 const char* packages_url = nullptr,
634 bool dont_delete_reload_context = false);
635
636
637 bool ReloadKernel(JSONStream* js,
638 bool force_reload,
639 const uint8_t* kernel_buffer = nullptr,
640 intptr_t kernel_buffer_size = 0,
641 bool dont_delete_reload_context = false);
642
643 void set_last_reload_timestamp(int64_t value) {
644 last_reload_timestamp_ =
value;
645 }
646 int64_t last_reload_timestamp() const { return last_reload_timestamp_; }
647
648 IsolateGroupReloadContext* reload_context() {
649 return group_reload_context_.get();
650 }
651 ProgramReloadContext* program_reload_context() {
652 return program_reload_context_;
653 }
654
655 void DeleteReloadContext();
656 bool CanReload();
657#else
658 bool CanReload() { return false; }
659#endif
660
661 bool IsReloading() const {
662#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
663 return group_reload_context_ != nullptr;
664#else
665 return false;
666#endif
667 }
668
669 Become* become() const { return become_; }
670 void set_become(Become* become) { become_ = become; }
671
672 uint64_t
id()
const {
return id_; }
673
676
677 static void ForEach(std::function<
void(IsolateGroup*)>
action);
678 static void RunWithIsolateGroup(uint64_t id,
679 std::function<
void(IsolateGroup*)>
action,
680 std::function<void()> not_found);
681
682
683 static void RegisterIsolateGroup(IsolateGroup* isolate_group);
684 static void UnregisterIsolateGroup(IsolateGroup* isolate_group);
685
686 static bool HasApplicationIsolateGroups();
687 static bool HasOnlyVMIsolateGroup();
688 static bool IsSystemIsolateGroup(const IsolateGroup* group);
689
690 int64_t UptimeMicros() const;
691
692 ApiState* api_state() const { return api_state_.get(); }
693
694
695
696 void VisitObjectPointers(ObjectPointerVisitor* visitor,
697 ValidationPolicy validate_frames);
698 void VisitSharedPointers(ObjectPointerVisitor* visitor);
699 void VisitStackPointers(ObjectPointerVisitor* visitor,
700 ValidationPolicy validate_frames);
701 void VisitObjectIdRingPointers(ObjectPointerVisitor* visitor);
702 void VisitWeakPersistentHandles(HandleVisitor* visitor);
703
704
705 bool all_classes_finalized() const {
706 return AllClassesFinalizedBit::decode(isolate_group_flags_);
707 }
708 void set_all_classes_finalized(bool value) {
709 isolate_group_flags_ =
710 AllClassesFinalizedBit::update(value, isolate_group_flags_);
711 }
712
713 bool remapping_cids() const {
714 return RemappingCidsBit::decode(isolate_group_flags_);
715 }
716 void set_remapping_cids(bool value) {
717 isolate_group_flags_ =
718 RemappingCidsBit::update(value, isolate_group_flags_);
719 }
720
721 void RememberLiveTemporaries();
722 void DeferredMarkLiveTemporaries();
723
724 ArrayPtr saved_unlinked_calls() const { return saved_unlinked_calls_; }
725 void set_saved_unlinked_calls(const Array& saved_unlinked_calls);
726
727 FieldTable* initial_field_table() const { return initial_field_table_.get(); }
728 std::shared_ptr<FieldTable> initial_field_table_shareable() {
729 return initial_field_table_;
730 }
731 void set_initial_field_table(std::shared_ptr<FieldTable> field_table) {
732 initial_field_table_ = field_table;
733 }
734
735 MutatorThreadPool* thread_pool() { return thread_pool_.get(); }
736
737 void RegisterClass(const Class& cls);
738 void RegisterStaticField(const Field& field, const Object& initial_value);
739 void FreeStaticField(const Field& field);
740
741 Isolate* EnterTemporaryIsolate();
742 static void ExitTemporaryIsolate();
743
744 private:
745 friend class Dart;
746 friend class Heap;
747 friend class StackFrame;
748
749 friend class Isolate;
750
751#define ISOLATE_GROUP_FLAG_BITS(V) \
752 V(AllClassesFinalized) \
753 V(EnableAsserts) \
754 V(HasAttemptedReload) \
755 V(RemappingCids) \
756 V(ShouldLoadVmService) \
757 V(Obfuscate) \
758 V(UseFieldGuards) \
759 V(UseOsr) \
760 V(SnapshotIsDontNeedSafe) \
761 V(BranchCoverage)
762
763
764 enum FlagBits {
765#define DECLARE_BIT(Name) k##Name##Bit,
767#undef DECLARE_BIT
768 };
769
770#define DECLARE_BITFIELD(Name) \
771 class Name##Bit : public BitField<uint32_t, bool, k##Name##Bit, 1> {};
773#undef DECLARE_BITFIELD
774
775 void set_heap(std::unique_ptr<Heap> value);
776
777
778 ClassTable* class_table_;
779 AcqRelAtomic<ClassPtr*> cached_class_table_table_;
780 std::unique_ptr<ObjectStore> object_store_;
781
782
783 ClassTableAllocator class_table_allocator_;
784 ClassTable* heap_walk_class_table_;
785
786 const char** obfuscation_map_ = nullptr;
787
788 bool is_vm_isolate_ = false;
789 void* embedder_data_ = nullptr;
790
791 IdleTimeHandler idle_time_handler_;
792 std::unique_ptr<MutatorThreadPool> thread_pool_;
793 std::unique_ptr<SafepointRwLock> isolates_lock_;
794 IntrusiveDList<Isolate> isolates_;
795 intptr_t isolate_count_ = 0;
796 bool initial_spawn_successful_ = false;
799 int64_t start_time_micros_;
800 bool is_system_isolate_group_;
801 Random random_;
802
803#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
804 int64_t last_reload_timestamp_;
805 std::shared_ptr<IsolateGroupReloadContext> group_reload_context_;
806
807 RelaxedAtomic<intptr_t> reload_every_n_stack_overflow_checks_;
808 ProgramReloadContext* program_reload_context_ = nullptr;
809#endif
810 Become* become_ = nullptr;
811
812#define ISOLATE_METRIC_VARIABLE(type, variable, name, unit) \
813 type metric_##variable##_;
815#undef ISOLATE_METRIC_VARIABLE
816
817#if !defined(PRODUCT)
818
819 int64_t last_allocationprofile_accumulator_reset_timestamp_ = 0;
820 int64_t last_allocationprofile_gc_timestamp_ = 0;
821
822#endif
823
824 MarkingStack* marking_stack_ = nullptr;
825 MarkingStack* deferred_marking_stack_ = nullptr;
826 std::shared_ptr<IsolateGroupSource> source_;
827 std::unique_ptr<ApiState> api_state_;
828 std::unique_ptr<ThreadRegistry> thread_registry_;
829 std::unique_ptr<SafepointHandler> safepoint_handler_;
830
831 static RwLock* isolate_groups_rwlock_;
832 static IntrusiveDList<IsolateGroup>* isolate_groups_;
833 static Random* isolate_group_random_;
834
835 uint64_t id_ = 0;
836
837 std::unique_ptr<StoreBuffer> store_buffer_;
838 std::unique_ptr<Heap> heap_;
839 std::unique_ptr<DispatchTable> dispatch_table_;
840 const uint8_t* dispatch_table_snapshot_ = nullptr;
841 intptr_t dispatch_table_snapshot_size_ = 0;
842 ArrayPtr saved_unlinked_calls_;
843 std::shared_ptr<FieldTable> initial_field_table_;
844 uint32_t isolate_group_flags_ = 0;
845
847
848 Mutex symbols_mutex_;
849 Mutex type_canonicalization_mutex_;
850 Mutex type_arguments_canonicalization_mutex_;
851 Mutex subtype_test_cache_mutex_;
852 Mutex megamorphic_table_mutex_;
853 Mutex type_feedback_mutex_;
854 Mutex patchable_call_mutex_;
855 Mutex constant_canonicalization_mutex_;
856 Mutex kernel_data_lib_cache_mutex_;
857 Mutex kernel_data_class_cache_mutex_;
858 Mutex kernel_constants_mutex_;
859
860#if defined(DART_PRECOMPILED_RUNTIME)
861 Mutex unlinked_call_map_mutex_;
862#endif
863
864#if !defined(DART_PRECOMPILED_RUNTIME)
865 Mutex initializer_functions_mutex_;
866#endif
867
868
869 Mutex field_list_mutex_;
870
871 GrowableObjectArrayPtr boxed_field_list_;
872
873
874
875
876 std::unique_ptr<SafepointRwLock> program_lock_;
877
878
879 std::unique_ptr<Monitor> active_mutators_monitor_;
880 intptr_t active_mutators_ = 0;
881 intptr_t waiting_mutators_ = 0;
882 intptr_t max_active_mutators_ = 0;
883
885};
886
887
888
889class Bequest {
890 public:
891 Bequest(PersistentHandle* handle,
Dart_Port beneficiary)
892 :
handle_(handle), beneficiary_(beneficiary) {}
893 ~Bequest();
894
895 PersistentHandle* handle() {
return handle_; }
896 PersistentHandle* TakeHandle() {
899 return handle;
900 }
901 Dart_Port beneficiary() {
return beneficiary_; }
902
903 private:
906};
907
908class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
909 public:
910
911
912 enum LibMsgId {
913 kPauseMsg = 1,
914 kResumeMsg = 2,
915 kPingMsg = 3,
916 kKillMsg = 4,
917 kAddExitMsg = 5,
918 kDelExitMsg = 6,
919 kAddErrorMsg = 7,
920 kDelErrorMsg = 8,
921 kErrorFatalMsg = 9,
922
923
924 kInterruptMsg = 10,
925 kInternalKillMsg = 11,
926 kDrainServiceExtensionsMsg = 12,
927 kCheckForReload = 13,
928 };
929
930 enum LibMsgPriority {
931 kImmediateAction = 0,
932 kBeforeNextEventAction = 1,
933 kAsEventAction = 2
934 };
935
936 ~Isolate();
937
938 static inline Isolate* Current() {
939 Thread* thread = Thread::Current();
940 return thread == nullptr ? nullptr : thread->isolate();
941 }
942
943 bool IsScheduled() { return scheduled_mutator_thread() != nullptr; }
944 Thread* scheduled_mutator_thread() const { return scheduled_mutator_thread_; }
945
946 ThreadRegistry* thread_registry()
const {
return group()->thread_registry(); }
947
948 SafepointHandler* safepoint_handler() const {
949 return group()->safepoint_handler();
950 }
951
952 FieldTable* field_table() const { return field_table_; }
953 void set_field_table(Thread*
T, FieldTable* field_table) {
954 delete field_table_;
955 field_table_ = field_table;
956 T->field_table_values_ = field_table->table();
957 }
958
959 IsolateObjectStore* isolate_object_store() const {
960 return isolate_object_store_.get();
961 }
962
964 return message_notify_callback_.load(std::memory_order_relaxed);
965 }
966
968 message_notify_callback_.store(value, std::memory_order_release);
969 }
970
972 on_shutdown_callback_ =
value;
973 }
975 return on_shutdown_callback_;
976 }
978 on_cleanup_callback_ =
value;
979 }
981 return on_cleanup_callback_;
982 }
983
984 void bequeath(std::unique_ptr<Bequest> bequest) {
985 bequest_ = std::move(bequest);
986 }
987
988 IsolateGroupSource*
source()
const {
return isolate_group_->source(); }
989 IsolateGroup*
group()
const {
return isolate_group_; }
990
991 bool HasPendingMessages();
992
993 Thread* mutator_thread() const;
994
995 const char*
name()
const {
return name_; }
996 void set_name(
const char*
name);
997
998 int64_t UptimeMicros() const;
999
1000 Dart_Port main_port()
const {
return main_port_; }
1004 }
1007 void set_pause_capability(uint64_t value) { pause_capability_ =
value; }
1008 uint64_t pause_capability() const { return pause_capability_; }
1009 void set_terminate_capability(uint64_t value) {
1010 terminate_capability_ =
value;
1011 }
1012 uint64_t terminate_capability() const { return terminate_capability_; }
1013
1014 void SendInternalLibMessage(LibMsgId msg_id, uint64_t capability);
1015 static bool SendInternalLibMessage(
Dart_Port main_port,
1016 LibMsgId msg_id,
1017 uint64_t capability);
1018
1019 void set_init_callback_data(
void* value) { init_callback_data_ =
value; }
1020 void* init_callback_data() const { return init_callback_data_; }
1021
1022 void set_finalizers(const GrowableObjectArray& value);
1023 static intptr_t finalizers_offset() {
1025 }
1026
1028 return environment_callback_;
1029 }
1031 environment_callback_ =
value;
1032 }
1033
1034 bool HasDeferredLoadHandler() const {
1035 return group()->deferred_load_handler() !=
nullptr;
1036 }
1037 ObjectPtr CallDeferredLoadHandler(intptr_t id);
1038
1039 void ScheduleInterrupts(uword interrupt_bits);
1040
1041 const char* MakeRunnable();
1042 void MakeRunnableLocked();
1044
1046
1047 bool is_runnable() const { return LoadIsolateFlagsBit<IsRunnableBit>(); }
1048 void set_is_runnable(bool value) {
1049 UpdateIsolateFlagsBit<IsRunnableBit>(value);
1050#if !defined(PRODUCT)
1051 if (is_runnable()) {
1052 set_last_resume_timestamp();
1053 }
1054#endif
1055 }
1056
1057 Mutex* mutex() { return &mutex_; }
1058
1059#if !defined(PRODUCT)
1060 Debugger* debugger() const { return debugger_; }
1061
1062
1063 SampleBlock* current_sample_block() const { return current_sample_block_; }
1064 void set_current_sample_block(SampleBlock* block) {
1065 current_sample_block_ = block;
1066 }
1067 void ProcessFreeSampleBlocks(Thread* thread);
1068
1069
1070 SampleBlock* current_allocation_sample_block() const {
1071 return current_allocation_sample_block_;
1072 }
1073 void set_current_allocation_sample_block(SampleBlock* block) {
1074 current_allocation_sample_block_ = block;
1075 }
1076
1077 bool TakeHasCompletedBlocks() {
1078 return has_completed_blocks_.exchange(0) != 0;
1079 }
1080 bool TrySetHasCompletedBlocks() {
1081 return has_completed_blocks_.exchange(1) == 0;
1082 }
1083
1084 void set_single_step(
bool value) { single_step_ =
value; }
1085 bool single_step() const { return single_step_; }
1086 static intptr_t single_step_offset() {
1087 return OFFSET_OF(Isolate, single_step_);
1088 }
1089
1090 void set_has_resumption_breakpoints(bool value) {
1091 has_resumption_breakpoints_ =
value;
1092 }
1093 bool has_resumption_breakpoints() const {
1094 return has_resumption_breakpoints_;
1095 }
1096 static intptr_t has_resumption_breakpoints_offset() {
1097 return OFFSET_OF(Isolate, has_resumption_breakpoints_);
1098 }
1099
1100 bool ResumeRequest() const { return LoadIsolateFlagsBit<ResumeRequestBit>(); }
1101
1102 void SetResumeRequest() {
1103 UpdateIsolateFlagsBit<ResumeRequestBit>(true);
1104 set_last_resume_timestamp();
1105 }
1106
1107 void set_last_resume_timestamp() {
1108 last_resume_timestamp_ = OS::GetCurrentTimeMillis();
1109 }
1110
1111 int64_t last_resume_timestamp() const { return last_resume_timestamp_; }
1112
1113
1114
1115 bool GetAndClearResumeRequest() {
1116 return UpdateIsolateFlagsBit<ResumeRequestBit>(false);
1117 }
1118#endif
1119
1120
1121
1122 bool VerifyPauseCapability(const Object& capability) const;
1123 bool VerifyTerminateCapability(const Object& capability) const;
1124
1125
1126
1127 bool AddResumeCapability(const Capability& capability);
1128 bool RemoveResumeCapability(const Capability& capability);
1129
1130 void AddExitListener(const SendPort& listener, const Instance& response);
1131 void RemoveExitListener(const SendPort& listener);
1132 void NotifyExitListeners();
1133
1134 void AddErrorListener(const SendPort& listener);
1135 void RemoveErrorListener(const SendPort& listener);
1136 bool NotifyErrorListeners(const char* msg, const char* stacktrace);
1137
1138 bool ErrorsFatal() const { return LoadIsolateFlagsBit<ErrorsFatalBit>(); }
1139 void SetErrorsFatal(bool value) {
1140 UpdateIsolateFlagsBit<ErrorsFatalBit>(value);
1141 }
1142
1143 Random* random() { return &random_; }
1144
1145 Simulator* simulator() const { return simulator_; }
1146 void set_simulator(Simulator* value) { simulator_ =
value; }
1147
1148 void IncrementSpawnCount();
1149 void DecrementSpawnCount();
1150 void WaitForOutstandingSpawns();
1151
1153 create_group_callback_ = cb;
1154 }
1156 return create_group_callback_;
1157 }
1158
1160 initialize_callback_ = cb;
1161 }
1163 return initialize_callback_;
1164 }
1165
1167 shutdown_callback_ = cb;
1168 }
1170 return shutdown_callback_;
1171 }
1172
1174 cleanup_callback_ = cb;
1175 }
1177 return cleanup_callback_;
1178 }
1179
1181 cleanup_group_callback_ = cb;
1182 }
1184 return cleanup_group_callback_;
1185 }
1186 static void SetRegisterKernelBlobCallback(
1188 register_kernel_blob_callback_ = cb;
1189 }
1191 return register_kernel_blob_callback_;
1192 }
1193 static void SetUnregisterKernelBlobCallback(
1195 unregister_kernel_blob_callback_ = cb;
1196 }
1198 return unregister_kernel_blob_callback_;
1199 }
1200
1201#if !defined(PRODUCT)
1202 ObjectIdRing* object_id_ring() const { return object_id_ring_; }
1203 ObjectIdRing* EnsureObjectIdRing();
1204#endif
1205
1206 bool IsDeoptimizing() const { return deopt_context_ != nullptr; }
1207 DeoptContext* deopt_context() const { return deopt_context_; }
1208 void set_deopt_context(DeoptContext* value) {
1209 ASSERT(value ==
nullptr || deopt_context_ ==
nullptr);
1210 deopt_context_ =
value;
1211 }
1212
1213 FfiCallbackMetadata::Trampoline CreateAsyncFfiCallback(
1214 Zone* zone,
1215 const Function& send_function,
1217 FfiCallbackMetadata::Trampoline CreateIsolateLocalFfiCallback(
1218 Zone* zone,
1219 const Function& trampoline,
1221 bool keep_isolate_alive);
1222 void DeleteFfiCallback(FfiCallbackMetadata::Trampoline
callback);
1223 void UpdateNativeCallableKeepIsolateAliveCounter(intptr_t delta);
1224 bool HasOpenNativeCallables();
1225
1226 bool HasLivePorts();
1227 ReceivePortPtr CreateReceivePort(const String& debug_name);
1228 void SetReceivePortKeepAliveState(const ReceivePort& receive_port,
1229 bool keep_isolate_alive);
1230 void CloseReceivePort(const ReceivePort& receive_port);
1231
1232
1233 FfiCallbackMetadata::Metadata* ffi_callback_list_head() {
1234 return ffi_callback_list_head_;
1235 }
1236
1237 intptr_t BlockClassFinalization() {
1238 ASSERT(defer_finalization_count_ >= 0);
1239 return defer_finalization_count_++;
1240 }
1241
1242 intptr_t UnblockClassFinalization() {
1243 ASSERT(defer_finalization_count_ > 0);
1244 return defer_finalization_count_--;
1245 }
1246
1247 bool AllowClassFinalization() {
1248 ASSERT(defer_finalization_count_ >= 0);
1249 return defer_finalization_count_ == 0;
1250 }
1251
1252#ifndef PRODUCT
1253 void PrintJSON(JSONStream* stream, bool ref = true);
1254
1255
1256
1257 void PrintMemoryUsageJSON(JSONStream* stream);
1258
1259 void PrintPauseEventJSON(JSONStream* stream);
1260#endif
1261
1262#if !defined(PRODUCT)
1263 VMTagCounters* vm_tag_counters() { return &vm_tag_counters_; }
1264#endif
1265
1266 bool IsPaused() const;
1267
1268#if !defined(PRODUCT)
1269 bool should_pause_post_service_request() const {
1270 return LoadIsolateFlagsBit<ShouldPausePostServiceRequestBit>();
1271 }
1272 void set_should_pause_post_service_request(bool value) {
1273 UpdateIsolateFlagsBit<ShouldPausePostServiceRequestBit>(value);
1274 }
1275#endif
1276
1277 ErrorPtr PausePostRequest();
1278
1279 uword user_tag()
const {
return user_tag_; }
1280 static intptr_t user_tag_offset() {
return OFFSET_OF(Isolate, user_tag_); }
1281 static intptr_t current_tag_offset() {
1282 return OFFSET_OF(Isolate, current_tag_);
1283 }
1284 static intptr_t default_tag_offset() {
1285 return OFFSET_OF(Isolate, default_tag_);
1286 }
1287
1288#if !defined(PRODUCT)
1289#define ISOLATE_METRIC_ACCESSOR(type, variable, name, unit) \
1290 type* Get##variable##Metric() { return &metric_##variable##_; }
1292#undef ISOLATE_METRIC_ACCESSOR
1293#endif
1294
1295 static intptr_t IsolateListLength();
1296
1297 GrowableObjectArrayPtr tag_table() const { return tag_table_; }
1298 void set_tag_table(const GrowableObjectArray& value);
1299
1300 UserTagPtr current_tag() const { return current_tag_; }
1301 void set_current_tag(const UserTag& tag);
1302
1303 UserTagPtr default_tag() const { return default_tag_; }
1304 void set_default_tag(const UserTag& tag);
1305
1306
1307 void SetStickyError(ErrorPtr sticky_error);
1308
1309 ErrorPtr sticky_error() const { return sticky_error_; }
1311
1312#ifndef PRODUCT
1313 ErrorPtr InvokePendingServiceExtensionCalls();
1314 void AppendServiceExtensionCall(const Instance& closure,
1315 const String& method_name,
1316 const Array& parameter_keys,
1317 const Array& parameter_values,
1318 const Instance& reply_port,
1319 const Instance& id);
1320 void RegisterServiceExtensionHandler(
const String&
name,
1321 const Instance& closure);
1322 InstancePtr LookupServiceExtensionHandler(
const String&
name);
1323#endif
1324
1325 static void VisitIsolates(IsolateVisitor* visitor);
1326
1327#if !defined(PRODUCT)
1328
1329 void PauseEventHandler();
1330#endif
1331
1332 bool is_vm_isolate() const { return LoadIsolateFlagsBit<IsVMIsolateBit>(); }
1333 void set_is_vm_isolate(bool value) {
1334 UpdateIsolateFlagsBit<IsVMIsolateBit>(value);
1335 }
1336
1337 bool is_service_registered() const {
1338 return LoadIsolateFlagsBit<IsServiceRegisteredBit>();
1339 }
1340 void set_is_service_registered(bool value) {
1341 UpdateIsolateFlagsBit<IsServiceRegisteredBit>(value);
1342 }
1343
1344
1348
1349#if defined(DART_PRECOMPILER)
1350#define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_field)
1351#else
1352#define FLAG_FOR_PRECOMPILER(from_field, from_flag) (from_flag)
1353#endif
1354
1355#if !defined(PRODUCT)
1356#define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_field)
1357#else
1358#define FLAG_FOR_NONPRODUCT(from_field, from_flag) (from_flag)
1359#endif
1360
1361#define FLAG_FOR_PRODUCT(from_field, from_flag) (from_field)
1362
1363#define DECLARE_GETTER(when, name, bitname, isolate_flag_name, flag_name) \
1364 bool name() const { \
1365 return FLAG_FOR_##when(LoadIsolateFlagsBit<bitname##Bit>(), flag_name); \
1366 }
1368#undef FLAG_FOR_NONPRODUCT
1369#undef FLAG_FOR_PRECOMPILER
1370#undef FLAG_FOR_PRODUCT
1371#undef DECLARE_GETTER
1372
1373 bool has_attempted_stepping() const {
1374 return LoadIsolateFlagsBit<HasAttemptedSteppingBit>();
1375 }
1376 void set_has_attempted_stepping(bool value) {
1377 UpdateIsolateFlagsBit<HasAttemptedSteppingBit>(value);
1378 }
1379
1380
1381 static void KillAllIsolates(LibMsgId msg_id);
1382
1383 static void KillAllSystemIsolates(LibMsgId msg_id);
1384 static void KillIfExists(Isolate* isolate, LibMsgId msg_id);
1385
1386
1387
1388 static Isolate* LookupIsolateByPort(
Dart_Port port);
1389
1390
1391
1392 static std::unique_ptr<char[]> LookupIsolateNameByPort(
Dart_Port port);
1393
1394 static void DisableIsolateCreation();
1395 static void EnableIsolateCreation();
1396 static bool IsolateCreationEnabled();
1397 static bool IsSystemIsolate(const Isolate* isolate) {
1398 return IsolateGroup::IsSystemIsolateGroup(isolate->group());
1399 }
1400 static bool IsVMInternalIsolate(const Isolate* isolate);
1401
1403
1405 return &catch_entry_moves_cache_;
1406 }
1407
1408
1409
1410 WeakTable* forward_table_new() { return forward_table_new_.get(); }
1411 void set_forward_table_new(WeakTable*
table);
1412
1413 WeakTable* forward_table_old() { return forward_table_old_.get(); }
1414 void set_forward_table_old(WeakTable*
table);
1415
1416 void RememberLiveTemporaries();
1417 void DeferredMarkLiveTemporaries();
1418
1419 std::unique_ptr<VirtualMemory> TakeRegexpBacktrackStack() {
1420 return std::move(regexp_backtracking_stack_cache_);
1421 }
1422
1423 void CacheRegexpBacktrackStack(std::unique_ptr<VirtualMemory> stack) {
1424 regexp_backtracking_stack_cache_ = std::move(stack);
1425 }
1426
1427 void init_loaded_prefixes_set_storage();
1428 bool IsPrefixLoaded(const LibraryPrefix& prefix) const;
1429 void SetPrefixIsLoaded(const LibraryPrefix& prefix);
1430
1431 MallocGrowableArray<ObjectPtr>* pointers_to_verify_at_exit() {
1432 return &pointers_to_verify_at_exit_;
1433 }
1434
1435 private:
1436 friend class Dart;
1437 friend class IsolateKillerVisitor;
1439 const char* n,
1440 char** e);
1441
1443
1444 static void InitVM();
1445 static Isolate* InitIsolate(const char* name_prefix,
1446 IsolateGroup* isolate_group,
1448 bool is_vm_isolate = false);
1449
1450
1451 void KillLocked(LibMsgId msg_id);
1452
1453 void Shutdown();
1454 void RunAndCleanupFinalizersOnShutdown();
1455 void LowLevelShutdown();
1456
1457
1458
1459
1460 static void LowLevelCleanup(Isolate* isolate);
1461
1462 void BuildName(const char* name_prefix);
1463
1464 void ProfileIdle();
1465
1466
1467
1468 void VisitObjectPointers(ObjectPointerVisitor* visitor,
1469 ValidationPolicy validate_frames);
1470 void VisitStackPointers(ObjectPointerVisitor* visitor,
1471 ValidationPolicy validate_frames);
1472
1473 void set_user_tag(uword tag) { user_tag_ = tag; }
1474
1475 void set_is_system_isolate(bool is_system_isolate) {
1476 is_system_isolate_ = is_system_isolate;
1477 }
1478
1479#if !defined(PRODUCT)
1480 GrowableObjectArrayPtr GetAndClearPendingServiceExtensionCalls();
1481 GrowableObjectArrayPtr pending_service_extension_calls() const {
1482 return pending_service_extension_calls_;
1483 }
1484 void set_pending_service_extension_calls(const GrowableObjectArray& value);
1485 GrowableObjectArrayPtr registered_service_extension_handlers() const {
1486 return registered_service_extension_handlers_;
1487 }
1488 void set_registered_service_extension_handlers(
1489 const GrowableObjectArray& value);
1490#endif
1491
1492
1493
1494 Zone* current_zone() const {
1495 ASSERT(Thread::Current() == mutator_thread());
1496 return mutator_thread()->zone();
1497 }
1498
1499
1500
1501
1502
1503
1504
1505 uword user_tag_ = 0;
1506 UserTagPtr current_tag_;
1507 UserTagPtr default_tag_;
1508 FieldTable* field_table_ = nullptr;
1509
1510
1511 GrowableObjectArrayPtr finalizers_;
1512 bool single_step_ = false;
1513 bool has_resumption_breakpoints_ = false;
1514 bool is_system_isolate_ = false;
1515
1516
1517 IsolateGroup* const isolate_group_;
1518 IdleTimeHandler idle_time_handler_;
1519 std::unique_ptr<IsolateObjectStore> isolate_object_store_;
1520
1521#define ISOLATE_FLAG_BITS(V) \
1522 V(ErrorsFatal) \
1523 V(IsRunnable) \
1524 V(IsVMIsolate) \
1525 V(IsServiceIsolate) \
1526 V(IsKernelIsolate) \
1527 V(ResumeRequest) \
1528 V(HasAttemptedStepping) \
1529 V(ShouldPausePostServiceRequest) \
1530 V(IsSystemIsolate) \
1531 V(IsServiceRegistered)
1532
1533
1534 enum FlagBits {
1535#define DECLARE_BIT(Name) k##Name##Bit,
1537#undef DECLARE_BIT
1538 };
1539
1540#define DECLARE_BITFIELD(Name) \
1541 class Name##Bit : public BitField<uint32_t, bool, k##Name##Bit, 1> {};
1543#undef DECLARE_BITFIELD
1544
1545 template <class T>
1546 bool UpdateIsolateFlagsBit(bool value) {
1547 return T::decode(value ? isolate_flags_.fetch_or(T::encode(true),
1548 std::memory_order_relaxed)
1549 : isolate_flags_.fetch_and(
1551 }
1552 template <class T>
1553 bool LoadIsolateFlagsBit() const {
1554 return T::decode(isolate_flags_.load(std::memory_order_relaxed));
1555 }
1556 std::atomic<uint32_t> isolate_flags_;
1557
1558
1559
1560#if !defined(PRODUCT)
1561 Debugger* debugger_ = nullptr;
1562
1563
1564 RelaxedAtomic<SampleBlock*> current_sample_block_ = nullptr;
1565
1566
1567 RelaxedAtomic<SampleBlock*> current_allocation_sample_block_ = nullptr;
1568
1569 RelaxedAtomic<uword> has_completed_blocks_ = {0};
1570
1571 int64_t last_resume_timestamp_;
1572
1573 VMTagCounters vm_tag_counters_;
1574
1575
1576 enum {kPendingHandlerIndex = 0, kPendingMethodNameIndex, kPendingKeysIndex,
1577 kPendingValuesIndex, kPendingReplyPortIndex, kPendingIdIndex,
1578 kPendingEntrySize};
1579 GrowableObjectArrayPtr pending_service_extension_calls_;
1580
1581
1582 enum {kRegisteredNameIndex = 0, kRegisteredHandlerIndex,
1583 kRegisteredEntrySize};
1584 GrowableObjectArrayPtr registered_service_extension_handlers_;
1585
1586
1587 Monitor* pause_loop_monitor_ = nullptr;
1588
1589#define ISOLATE_METRIC_VARIABLE(type, variable, name, unit) \
1590 type metric_##variable##_;
1592#undef ISOLATE_METRIC_VARIABLE
1593
1594
1595 ObjectIdRing* object_id_ring_ = nullptr;
1596#endif
1597
1598
1599 int64_t start_time_micros_;
1600 std::atomic<Dart_MessageNotifyCallback> message_notify_callback_;
1603 char* name_ = nullptr;
1605
1607 Mutex origin_id_mutex_;
1608 uint64_t pause_capability_ = 0;
1609 uint64_t terminate_capability_ = 0;
1610 void* init_callback_data_ = nullptr;
1612 Random random_;
1613 Simulator* simulator_ = nullptr;
1614 Mutex mutex_;
1615 IsolateMessageHandler* message_handler_ = nullptr;
1616 intptr_t defer_finalization_count_ = 0;
1617 DeoptContext* deopt_context_ = nullptr;
1618 FfiCallbackMetadata::Metadata* ffi_callback_list_head_ = nullptr;
1619 intptr_t ffi_callback_keep_alive_counter_ = 0;
1620
1621 GrowableObjectArrayPtr tag_table_;
1622
1623 ErrorPtr sticky_error_;
1624
1625 std::unique_ptr<Bequest> bequest_;
1627
1628
1629
1630 Monitor spawn_count_monitor_;
1631 intptr_t spawn_count_ = 0;
1632
1635
1636 DispatchTable* dispatch_table_ = nullptr;
1637
1638
1639 std::unique_ptr<WeakTable> forward_table_new_;
1640 std::unique_ptr<WeakTable> forward_table_old_;
1641
1642
1643
1644
1645 bool accepts_messages_ = false;
1646
1647 std::unique_ptr<VirtualMemory> regexp_backtracking_stack_cache_ = nullptr;
1648
1649 intptr_t wake_pause_event_handler_count_;
1650
1651
1652 intptr_t open_ports_ = 0;
1653
1654
1655 intptr_t open_ports_keepalive_ = 0;
1656
1664
1665#if !defined(PRODUCT)
1666 static void WakePauseEventHandler(
Dart_Isolate isolate);
1667#endif
1668
1669
1670 static bool TryMarkIsolateReady(Isolate* isolate);
1671 static void UnMarkIsolateReady(Isolate* isolate);
1672 static void MaybeNotifyVMShutdown();
1673 bool AcceptsMessagesLocked() {
1674 ASSERT(isolate_creation_monitor_->IsOwnedByCurrentThread());
1675 return accepts_messages_;
1676 }
1677
1678
1679 static Monitor* isolate_creation_monitor_;
1680 static bool creation_enabled_;
1681
1682 ArrayPtr loaded_prefixes_set_storage_;
1683
1684 MallocGrowableArray<ObjectPtr> pointers_to_verify_at_exit_;
1685
1686#define REUSABLE_FRIEND_DECLARATION(name) \
1687 friend class Reusable##name##HandleScope;
1689#undef REUSABLE_FRIEND_DECLARATION
1690
1691 friend class Become;
1692 friend class GCCompactor;
1693 friend class GCMarker;
1694 friend class SafepointHandler;
1695 friend class ObjectGraph;
1696 friend class HeapSnapshotWriter;
1697 friend class Scavenger;
1698 friend class HeapIterationScope;
1699 friend class ServiceIsolate;
1700 friend class Thread;
1701 friend class Timeline;
1702 friend class IsolateGroup;
1703
1705};
1706
1707
1708
1709class StartIsolateScope {
1710 public:
1711 explicit StartIsolateScope(Isolate* new_isolate)
1712 : new_isolate_(new_isolate), saved_isolate_(Isolate::Current()) {
1713 if (new_isolate_ == nullptr) {
1714 ASSERT(Isolate::Current() ==
nullptr);
1715
1716 return;
1717 }
1718 if (saved_isolate_ != new_isolate_) {
1719 ASSERT(Isolate::Current() ==
nullptr);
1720 Thread::EnterIsolate(new_isolate_);
1721
1722 ASSERT(Thread::Current()->top_exit_frame_info() == 0);
1723 }
1724 }
1725
1726 ~StartIsolateScope() {
1727 if (new_isolate_ == nullptr) {
1728 ASSERT(Isolate::Current() ==
nullptr);
1729
1730 return;
1731 }
1732 if (saved_isolate_ != new_isolate_) {
1733 ASSERT(saved_isolate_ ==
nullptr);
1734
1735 ASSERT(Thread::Current()->top_exit_frame_info() == 0);
1736 Thread::ExitIsolate();
1737 }
1738 }
1739
1740 private:
1741 Isolate* new_isolate_;
1742 Isolate* saved_isolate_;
1743
1745};
1746
1747class EnterIsolateGroupScope {
1748 public:
1749 explicit EnterIsolateGroupScope(IsolateGroup* isolate_group)
1750 : isolate_group_(isolate_group) {
1751 ASSERT(IsolateGroup::Current() ==
nullptr);
1752 const bool result = Thread::EnterIsolateGroupAsHelper(
1753 isolate_group_, Thread::kUnknownTask, false);
1755 }
1756
1757 ~EnterIsolateGroupScope() {
1758 Thread::ExitIsolateGroupAsHelper(false);
1759 }
1760
1761 private:
1762 IsolateGroup* isolate_group_;
1763
1765};
1766
1767
1768
1769
1770
1771class NoActiveIsolateScope : public StackResource {
1772 public:
1773 NoActiveIsolateScope() : NoActiveIsolateScope(Thread::Current()) {}
1774 explicit NoActiveIsolateScope(Thread* thread)
1775 : StackResource(thread), thread_(thread) {
1776 outer_ = thread_->no_active_isolate_scope_;
1777 saved_isolate_ = thread_->isolate_;
1778
1779 thread_->no_active_isolate_scope_ = this;
1780 thread_->isolate_ = nullptr;
1781 }
1782 ~NoActiveIsolateScope() {
1783 ASSERT(thread_->isolate_ ==
nullptr);
1784 thread_->isolate_ = saved_isolate_;
1785 thread_->no_active_isolate_scope_ = outer_;
1786 }
1787
1788 private:
1789 friend class ActiveIsolateScope;
1790
1791 Thread* thread_;
1792 Isolate* saved_isolate_;
1793 NoActiveIsolateScope* outer_;
1794};
1795
1796class ActiveIsolateScope : public StackResource {
1797 public:
1798 explicit ActiveIsolateScope(Thread* thread)
1799 : ActiveIsolateScope(thread,
1800 thread->no_active_isolate_scope_->saved_isolate_) {}
1801
1802 ActiveIsolateScope(Thread* thread, Isolate* isolate)
1803 : StackResource(thread), thread_(thread) {
1805 thread_->isolate_ = isolate;
1806 }
1807 ~ActiveIsolateScope() {
1808 ASSERT(thread_->isolate_ !=
nullptr);
1809 thread_->isolate_ = nullptr;
1810 }
1811
1812 private:
1813 Thread* thread_;
1814};
1815
1816}
1817
1818#endif
static void encode(uint8_t output[16], const uint32_t input[4])
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
void(* Dart_UnregisterKernelBlobCallback)(const char *kernel_blob_uri)
Dart_Handle(* Dart_EnvironmentCallback)(Dart_Handle name)
Dart_Isolate(* Dart_IsolateGroupCreateCallback)(const char *script_uri, const char *main, const char *package_root, const char *package_config, Dart_IsolateFlags *flags, void *isolate_data, char **error)
void(* Dart_IsolateCleanupCallback)(void *isolate_group_data, void *isolate_data)
void(* Dart_MessageNotifyCallback)(Dart_Isolate destination_isolate)
const char *(* Dart_RegisterKernelBlobCallback)(const uint8_t *kernel_buffer, intptr_t kernel_buffer_size)
#define DART_WARN_UNUSED_RESULT
struct _Dart_Isolate * Dart_Isolate
bool(* Dart_InitializeIsolateCallback)(void **child_isolate_data, char **error)
void(* Dart_IsolateGroupCleanupCallback)(void *isolate_group_data)
void(* Dart_IsolateShutdownCallback)(void *isolate_group_data, void *isolate_data)
Dart_Handle(* Dart_LibraryTagHandler)(Dart_LibraryTag tag, Dart_Handle library_or_package_map_url, Dart_Handle url)
Dart_Handle(* Dart_DeferredLoadHandler)(intptr_t loading_unit_id)
FlutterSemanticsFlag flags
FlKeyEvent uint64_t FlKeyResponderAsyncCallback callback
constexpr intptr_t kDefaultOptimizationCounterThreshold
#define ISOLATE_METRIC_VARIABLE(type, variable, name, unit)
#define DECLARE_BIT(Name)
#define BOOL_ISOLATE_FLAG_LIST(V)
#define ISOLATE_METRIC_ACCESSOR(type, variable, name, unit)
#define BOOL_ISOLATE_GROUP_FLAG_LIST(V)
#define ISOLATE_GROUP_FLAG_BITS(V)
#define DECLARE_GETTER(when, name, bitname, isolate_flag_name, flag_name)
#define DECLARE_BITFIELD(Name)
#define REUSABLE_FRIEND_DECLARATION(name)
#define ISOLATE_FLAG_BITS(V)
#define ISOLATE_METRIC_LIST(V)
#define ISOLATE_GROUP_METRIC_LIST(V)
FixedCache< intptr_t, ExceptionHandlerInfo, 16 > HandlerInfoCache
FixedCache< intptr_t, CatchEntryMovesRefPtr, 16 > CatchEntryMovesCache
Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *group, const char *name, char **error)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service port
std::function< void(const T &message, const MessageReply< T > &reply)> MessageHandler
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
#define REUSABLE_HANDLE_LIST(V)
#define NOT_IN_PRECOMPILED(code)
#define NOT_IN_PRODUCT(code)
#define OFFSET_OF(type, field)