270 {
273};
274
276
277
279
281};
282
283
284
285
286
287
289
291
293
295
297
298
300};
301
302
303struct TsanUtils {
304
305
306
307
308#if defined(USING_THREAD_SANITIZER)
309 void* setjmp_function = reinterpret_cast<void*>(&setjmp);
310#else
311
312 void* setjmp_function = nullptr;
313#endif
314 jmp_buf* setjmp_buffer = nullptr;
315 uword exception_pc = 0;
316 uword exception_sp = 0;
317 uword exception_fp = 0;
318
319 static intptr_t setjmp_function_offset() {
320 return OFFSET_OF(TsanUtils, setjmp_function);
321 }
322 static intptr_t setjmp_buffer_offset() {
323 return OFFSET_OF(TsanUtils, setjmp_buffer);
324 }
325 static intptr_t exception_pc_offset() {
326 return OFFSET_OF(TsanUtils, exception_pc);
327 }
328 static intptr_t exception_sp_offset() {
329 return OFFSET_OF(TsanUtils, exception_sp);
330 }
331 static intptr_t exception_fp_offset() {
332 return OFFSET_OF(TsanUtils, exception_fp);
333 }
334};
335
336
337
338
339
340
341class Thread : public ThreadState {
342 public:
343
344 enum TaskKind {
345 kUnknownTask = 0x0,
346 kMutatorTask = 0x1,
347 kCompilerTask = 0x2,
348 kMarkerTask = 0x4,
349 kSweeperTask = 0x8,
350 kCompactorTask = 0x10,
351 kScavengerTask = 0x20,
352 kSampleBlockTask = 0x40,
353 };
354
355 static const char* TaskKindToCString(TaskKind kind);
356
357 ~Thread();
358
359
360 static Thread* Current() {
361 return static_cast<Thread*>(OSThread::CurrentVMThread());
362 }
363
364
365
366 bool HasActiveState();
367 void AssertNonMutatorInvariants();
368 void AssertNonDartMutatorInvariants();
369 void AssertEmptyStackInvariants();
370 void AssertEmptyThreadInvariants();
371
372
373 static void EnterIsolate(Isolate* isolate);
374
375 static void ExitIsolate(bool isolate_shutdown = false);
376
377 static bool EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
378 TaskKind kind,
379 bool bypass_safepoint);
380 static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
381
382 static bool EnterIsolateGroupAsNonMutator(IsolateGroup* isolate_group,
383 TaskKind kind);
384 static void ExitIsolateGroupAsNonMutator();
385
386
387 void ReleaseStoreBuffer();
388 void AcquireMarkingStack();
389 void ReleaseMarkingStack();
390
391 void SetStackLimit(uword value);
392 void ClearStackLimit();
393
394
395
396
397 uword stack_limit_address()
const {
398 return reinterpret_cast<uword>(&stack_limit_);
399 }
400 static intptr_t stack_limit_offset() {
402 }
403
404
405 static intptr_t saved_stack_limit_offset() {
406 return OFFSET_OF(Thread, saved_stack_limit_);
407 }
408 uword saved_stack_limit()
const {
return saved_stack_limit_; }
409
410#if defined(USING_SAFE_STACK)
411 uword saved_safestack_limit()
const {
return saved_safestack_limit_; }
412 void set_saved_safestack_limit(uword limit) {
413 saved_safestack_limit_ = limit;
414 }
415#endif
416 uword saved_shadow_call_stack()
const {
return saved_shadow_call_stack_; }
417 static uword saved_shadow_call_stack_offset() {
418 return OFFSET_OF(Thread, saved_shadow_call_stack_);
419 }
420
421
422 enum {
423 kOsrRequest = 0x1,
424 };
425
426 uword write_barrier_mask()
const {
return write_barrier_mask_; }
427 uword heap_base()
const {
428#if defined(DART_COMPRESSED_POINTERS)
429 return heap_base_;
430#else
431 return 0;
432#endif
433 }
434
435 static intptr_t write_barrier_mask_offset() {
436 return OFFSET_OF(Thread, write_barrier_mask_);
437 }
438#if defined(DART_COMPRESSED_POINTERS)
439 static intptr_t heap_base_offset() {
return OFFSET_OF(Thread, heap_base_); }
440#endif
441 static intptr_t stack_overflow_flags_offset() {
442 return OFFSET_OF(Thread, stack_overflow_flags_);
443 }
444
445 int32_t IncrementAndGetStackOverflowCount() {
446 return ++stack_overflow_count_;
447 }
448
449 uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
450
451 static uword stack_overflow_shared_stub_entry_point_offset(
bool fpu_regs) {
452 return fpu_regs
453 ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
454 : stack_overflow_shared_without_fpu_regs_entry_point_offset();
455 }
456
457 static intptr_t safepoint_state_offset() {
458 return OFFSET_OF(Thread, safepoint_state_);
459 }
460
461
462 enum {
463
464 kDidNotExit = 0,
465
466
467 kExitThroughFfi = 1,
468
469
470 kExitThroughRuntimeCall = 2,
471 };
472
473 static intptr_t exit_through_ffi_offset() {
474 return OFFSET_OF(Thread, exit_through_ffi_);
475 }
476
477 TaskKind task_kind() const { return task_kind_; }
478
479
480
481
482 uword GetAndClearStackOverflowFlags();
483
484
485 enum {
486 kVMInterrupt = 0x1,
487 kMessageInterrupt = 0x2,
488
489 kInterruptsMask = (kVMInterrupt | kMessageInterrupt),
490 };
491
492 void ScheduleInterrupts(uword interrupt_bits);
494 uword GetAndClearInterrupts();
495 bool HasScheduledInterrupts() const {
496 return (stack_limit_.load() & kInterruptsMask) != 0;
497 }
498
499
500 Monitor* thread_lock() const { return &thread_lock_; }
501
502
503 ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
504 void set_api_reusable_scope(ApiLocalScope* value) {
505 ASSERT(value ==
nullptr || api_reusable_scope_ ==
nullptr);
506 api_reusable_scope_ =
value;
507 }
508
509
510
511 ApiLocalScope* api_top_scope() const { return api_top_scope_; }
512 void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ =
value; }
513 static intptr_t api_top_scope_offset() {
514 return OFFSET_OF(Thread, api_top_scope_);
515 }
516
517 void EnterApiScope();
518 void ExitApiScope();
519
520 static intptr_t double_truncate_round_supported_offset() {
521 return OFFSET_OF(Thread, double_truncate_round_supported_);
522 }
523
524 static intptr_t tsan_utils_offset() {
return OFFSET_OF(Thread, tsan_utils_); }
525
526#if defined(USING_THREAD_SANITIZER)
527 uword exit_through_ffi()
const {
return exit_through_ffi_; }
528 TsanUtils* tsan_utils() const { return tsan_utils_; }
529#endif
530
531
532 Isolate* isolate() const { return isolate_; }
533 static intptr_t isolate_offset() {
return OFFSET_OF(Thread, isolate_); }
534 static intptr_t isolate_group_offset() {
535 return OFFSET_OF(Thread, isolate_group_);
536 }
537
538
539 IsolateGroup* isolate_group() const { return isolate_group_; }
540
541 static intptr_t field_table_values_offset() {
542 return OFFSET_OF(Thread, field_table_values_);
543 }
544
545 bool IsDartMutatorThread() const {
546 return scheduled_dart_mutator_isolate_ != nullptr;
547 }
548
549
550
551
552
553
554
555
556 Isolate* scheduled_dart_mutator_isolate() const {
557 return scheduled_dart_mutator_isolate_;
558 }
559
560#if defined(DEBUG)
561 bool IsInsideCompiler() const { return inside_compiler_; }
562#endif
563
564
565 static intptr_t dart_stream_offset() {
567 }
568
569
570 static intptr_t service_extension_stream_offset() {
571 return OFFSET_OF(Thread, service_extension_stream_);
572 }
573
574
575 bool IsExecutingDartCode() const;
576
577
578 bool HasExitedDartCode() const;
579
580 bool HasCompilerState() const { return compiler_state_ != nullptr; }
581
582 CompilerState& compiler_state() {
583 ASSERT(HasCompilerState());
584 return *compiler_state_;
585 }
586
587 HierarchyInfo* hierarchy_info() const {
588 ASSERT(isolate_group_ !=
nullptr);
589 return hierarchy_info_;
590 }
591
592 void set_hierarchy_info(HierarchyInfo* value) {
593 ASSERT(isolate_group_ !=
nullptr);
594 ASSERT((hierarchy_info_ ==
nullptr && value !=
nullptr) ||
595 (hierarchy_info_ != nullptr && value == nullptr));
596 hierarchy_info_ =
value;
597 }
598
599 TypeUsageInfo* type_usage_info() const {
600 ASSERT(isolate_group_ !=
nullptr);
601 return type_usage_info_;
602 }
603
604 void set_type_usage_info(TypeUsageInfo* value) {
605 ASSERT(isolate_group_ !=
nullptr);
606 ASSERT((type_usage_info_ ==
nullptr && value !=
nullptr) ||
607 (type_usage_info_ != nullptr && value == nullptr));
608 type_usage_info_ =
value;
609 }
610
611 CompilerTimings* compiler_timings() const { return compiler_timings_; }
612
613 void set_compiler_timings(CompilerTimings* stats) {
614 compiler_timings_ =
stats;
615 }
616
617 int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
618 void IncrementNoCallbackScopeDepth() {
619 ASSERT(no_callback_scope_depth_ < INT_MAX);
620 no_callback_scope_depth_ += 1;
621 }
622 void DecrementNoCallbackScopeDepth() {
623 ASSERT(no_callback_scope_depth_ > 0);
624 no_callback_scope_depth_ -= 1;
625 }
626
627 bool force_growth() const { return force_growth_scope_depth_ != 0; }
628 void IncrementForceGrowthScopeDepth() {
629 ASSERT(force_growth_scope_depth_ < INT_MAX);
630 force_growth_scope_depth_ += 1;
631 }
632 void DecrementForceGrowthScopeDepth() {
633 ASSERT(force_growth_scope_depth_ > 0);
634 force_growth_scope_depth_ -= 1;
635 }
636
637 bool is_unwind_in_progress() const { return is_unwind_in_progress_; }
638
639 void StartUnwindError() {
640 is_unwind_in_progress_ = true;
641 SetUnwindErrorInProgress(true);
642 }
643
644#if defined(DEBUG)
645 void EnterCompiler() {
646 ASSERT(!IsInsideCompiler());
647 inside_compiler_ = true;
648 }
649
650 void LeaveCompiler() {
651 ASSERT(IsInsideCompiler());
652 inside_compiler_ = false;
653 }
654#endif
655
656 void StoreBufferAddObject(ObjectPtr obj);
657 void StoreBufferAddObjectGC(ObjectPtr obj);
658#if defined(TESTING)
659 bool StoreBufferContains(ObjectPtr obj) const {
660 return store_buffer_block_->Contains(obj);
661 }
662#endif
663 void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
664 static intptr_t store_buffer_block_offset() {
665 return OFFSET_OF(Thread, store_buffer_block_);
666 }
667
668 bool is_marking() const { return marking_stack_block_ != nullptr; }
669 void MarkingStackAddObject(ObjectPtr obj);
670 void DeferredMarkingStackAddObject(ObjectPtr obj);
671 void MarkingStackBlockProcess();
672 void DeferredMarkingStackBlockProcess();
673 static intptr_t marking_stack_block_offset() {
674 return OFFSET_OF(Thread, marking_stack_block_);
675 }
676
677 uword top_exit_frame_info()
const {
return top_exit_frame_info_; }
678 void set_top_exit_frame_info(uword top_exit_frame_info) {
679 top_exit_frame_info_ = top_exit_frame_info;
680 }
681 static intptr_t top_exit_frame_info_offset() {
682 return OFFSET_OF(Thread, top_exit_frame_info_);
683 }
684
685 Heap* heap() const;
686
687
688
689
690
691
692
693
694
695 uword top()
const {
return top_; }
697 uword true_end()
const {
return true_end_; }
698 void set_top(uword top) { top_ = top; }
699 void set_end(uword
end) { end_ =
end; }
700 void set_true_end(uword true_end) { true_end_ = true_end; }
701 static intptr_t top_offset() {
return OFFSET_OF(Thread, top_); }
702 static intptr_t end_offset() {
return OFFSET_OF(Thread, end_); }
703
704 int32_t no_safepoint_scope_depth() const {
705#if defined(DEBUG)
706 return no_safepoint_scope_depth_;
707#else
708 return 0;
709#endif
710 }
711
712 void IncrementNoSafepointScopeDepth() {
713#if defined(DEBUG)
714 ASSERT(no_safepoint_scope_depth_ < INT_MAX);
715 no_safepoint_scope_depth_ += 1;
716#endif
717 }
718
719 void DecrementNoSafepointScopeDepth() {
720#if defined(DEBUG)
721 ASSERT(no_safepoint_scope_depth_ > 0);
722 no_safepoint_scope_depth_ -= 1;
723#endif
724 }
725
726 bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
727
728 bool IsInStoppedMutatorsScope() const {
729 return stopped_mutators_scope_depth_ > 0;
730 }
731
732#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
733 static intptr_t member_name##offset() { \
734 return OFFSET_OF(Thread, member_name); \
735 }
737#undef DEFINE_OFFSET_METHOD
738
739 static intptr_t write_barrier_wrappers_thread_offset(Register reg) {
740 ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
741 intptr_t index = 0;
743 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
744 if (i == reg) break;
745 ++index;
746 }
747 return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
748 index *
sizeof(
uword);
749 }
750
751 static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg) {
752 intptr_t index = 0;
754 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
755 if (i == reg) {
757 }
758 ++index;
759 }
761 return 0;
762 }
763
764#define DEFINE_OFFSET_METHOD(name) \
765 static intptr_t name##_entry_point_offset() { \
766 return OFFSET_OF(Thread, name##_entry_point_); \
767 }
769#undef DEFINE_OFFSET_METHOD
770
771#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
772 static intptr_t name##_entry_point_offset() { \
773 return OFFSET_OF(Thread, name##_entry_point_); \
774 }
776#undef DEFINE_OFFSET_METHOD
777
778 ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
779 void set_global_object_pool(ObjectPoolPtr raw_value) {
780 global_object_pool_ = raw_value;
781 }
782
783 const uword* dispatch_table_array()
const {
return dispatch_table_array_; }
784 void set_dispatch_table_array(const uword* array) {
785 dispatch_table_array_ = array;
786 }
787
789 static intptr_t OffsetFromThread(const Object& object);
790 static bool ObjectAtOffset(intptr_t
offset, Object*
object);
791 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
792
793#define DEFINE_OFFSET_METHOD(name) \
794 static intptr_t name##_entry_point_offset() { \
795 return OFFSET_OF(Thread, name##_entry_point_); \
796 }
798#undef DEFINE_OFFSET_METHOD
799
800#if defined(DEBUG)
801
802
803 bool TopErrorHandlerIsSetJump() const;
804 bool TopErrorHandlerIsExitFrame() const;
805#endif
806
807 uword vm_tag()
const {
return vm_tag_; }
808 void set_vm_tag(uword tag) { vm_tag_ = tag; }
809 static intptr_t vm_tag_offset() {
return OFFSET_OF(Thread, vm_tag_); }
810
811 int64_t unboxed_int64_runtime_arg() const {
812 return unboxed_runtime_arg_.int64_storage[0];
813 }
814 void set_unboxed_int64_runtime_arg(int64_t value) {
815 unboxed_runtime_arg_.int64_storage[0] =
value;
816 }
817 int64_t unboxed_int64_runtime_second_arg() const {
818 return unboxed_runtime_arg_.int64_storage[1];
819 }
820 void set_unboxed_int64_runtime_second_arg(int64_t value) {
821 unboxed_runtime_arg_.int64_storage[1] =
value;
822 }
823 double unboxed_double_runtime_arg() const {
824 return unboxed_runtime_arg_.double_storage[0];
825 }
826 void set_unboxed_double_runtime_arg(double value) {
827 unboxed_runtime_arg_.double_storage[0] =
value;
828 }
829 simd128_value_t unboxed_simd128_runtime_arg() const {
830 return unboxed_runtime_arg_;
831 }
832 void set_unboxed_simd128_runtime_arg(simd128_value_t value) {
833 unboxed_runtime_arg_ =
value;
834 }
835 static intptr_t unboxed_runtime_arg_offset() {
836 return OFFSET_OF(Thread, unboxed_runtime_arg_);
837 }
838
839 static intptr_t global_object_pool_offset() {
840 return OFFSET_OF(Thread, global_object_pool_);
841 }
842
843 static intptr_t dispatch_table_array_offset() {
844 return OFFSET_OF(Thread, dispatch_table_array_);
845 }
846
847 ObjectPtr active_exception() const { return active_exception_; }
848 void set_active_exception(const Object& value);
849 static intptr_t active_exception_offset() {
850 return OFFSET_OF(Thread, active_exception_);
851 }
852
853 ObjectPtr active_stacktrace() const { return active_stacktrace_; }
854 void set_active_stacktrace(const Object& value);
855 static intptr_t active_stacktrace_offset() {
856 return OFFSET_OF(Thread, active_stacktrace_);
857 }
858
859 uword resume_pc()
const {
return resume_pc_; }
860 void set_resume_pc(uword value) { resume_pc_ =
value; }
861 static uword resume_pc_offset() {
return OFFSET_OF(Thread, resume_pc_); }
862
863 ErrorPtr sticky_error() const;
864 void set_sticky_error(const Error& value);
865 void ClearStickyError();
867
868#if defined(DEBUG)
869#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
870 void set_reusable_##object##_handle_scope_active(bool value) { \
871 reusable_##object##_handle_scope_active_ = value; \
872 } \
873 bool reusable_##object##_handle_scope_active() const { \
874 return reusable_##object##_handle_scope_active_; \
875 }
877#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
878
879 bool IsAnyReusableHandleScopeActive() const {
880#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
881 if (reusable_##object##_handle_scope_active_) { \
882 return true; \
883 }
885 return false;
886#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
887 }
888#endif
889
890 void ClearReusableHandles();
891
892#define REUSABLE_HANDLE(object) \
893 object& object##Handle() const { return *object##_handle_; }
895#undef REUSABLE_HANDLE
896
897 static bool IsAtSafepoint(SafepointLevel level, uword
state) {
898 const uword mask = AtSafepointBits(level);
899 return (
state & mask) == mask;
900 }
901
902
903 bool IsAtSafepoint() const {
904
905 return IsAtSafepoint(SafepointLevel::kGC);
906 }
907 bool IsAtSafepoint(SafepointLevel level) const {
908 return IsAtSafepoint(level, safepoint_state_.load());
909 }
910 void SetAtSafepoint(bool value, SafepointLevel level) {
911 ASSERT(thread_lock()->IsOwnedByCurrentThread());
912 ASSERT(level <= current_safepoint_level());
913 if (value) {
914 safepoint_state_ |= AtSafepointBits(level);
915 } else {
916 safepoint_state_ &= ~AtSafepointBits(level);
917 }
918 }
919 bool IsSafepointRequestedLocked(SafepointLevel level) const {
920 ASSERT(thread_lock()->IsOwnedByCurrentThread());
921 return IsSafepointRequested(level);
922 }
923 bool IsSafepointRequested() const {
924 return IsSafepointRequested(current_safepoint_level());
925 }
926 bool IsSafepointRequested(SafepointLevel level) const {
928 for (intptr_t i = level; i >= 0; --i) {
930 return true;
931 }
932 return false;
933 }
934 bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
935 ASSERT(thread_lock()->IsOwnedByCurrentThread());
936 if (level > current_safepoint_level()) return false;
938 return IsSafepointLevelRequested(
state, level);
939 }
940
941 static bool IsSafepointLevelRequested(uword
state, SafepointLevel level) {
942 switch (level) {
943 case SafepointLevel::kGC:
944 return (
state & SafepointRequestedField::mask_in_place()) != 0;
945 case SafepointLevel::kGCAndDeopt:
946 return (
state & DeoptSafepointRequestedField::mask_in_place()) != 0;
947 case SafepointLevel::kGCAndDeoptAndReload:
948 return (
state & ReloadSafepointRequestedField::mask_in_place()) != 0;
949 default:
951 }
952 }
953
954 void BlockForSafepoint();
955
956 uword SetSafepointRequested(SafepointLevel level,
bool value) {
957 ASSERT(thread_lock()->IsOwnedByCurrentThread());
958
960 switch (level) {
961 case SafepointLevel::kGC:
962 mask = SafepointRequestedField::mask_in_place();
963 break;
964 case SafepointLevel::kGCAndDeopt:
965 mask = DeoptSafepointRequestedField::mask_in_place();
966 break;
967 case SafepointLevel::kGCAndDeoptAndReload:
968 mask = ReloadSafepointRequestedField::mask_in_place();
969 break;
970 default:
972 }
973
974 if (value) {
975
976 return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
977 } else {
978
979 return safepoint_state_.fetch_and(~mask, std::memory_order_release);
980 }
981 }
982 static bool IsBlockedForSafepoint(uword
state) {
983 return BlockedForSafepointField::decode(
state);
984 }
985 bool IsBlockedForSafepoint() const {
986 return BlockedForSafepointField::decode(safepoint_state_);
987 }
988 void SetBlockedForSafepoint(bool value) {
989 ASSERT(thread_lock()->IsOwnedByCurrentThread());
990 safepoint_state_ =
991 BlockedForSafepointField::update(value, safepoint_state_);
992 }
993 bool BypassSafepoints() const {
994 return BypassSafepointsField::decode(safepoint_state_);
995 }
996 static uword SetBypassSafepoints(
bool value, uword
state) {
997 return BypassSafepointsField::update(value,
state);
998 }
999 bool UnwindErrorInProgress() const {
1000 return UnwindErrorInProgressField::decode(safepoint_state_);
1001 }
1002 void SetUnwindErrorInProgress(bool value) {
1003 const uword mask = UnwindErrorInProgressField::mask_in_place();
1004 if (value) {
1005 safepoint_state_.fetch_or(mask);
1006 } else {
1007 safepoint_state_.fetch_and(~mask);
1008 }
1009 }
1010
1011 bool OwnsGCSafepoint() const;
1012 bool OwnsReloadSafepoint() const;
1013 bool OwnsDeoptSafepoint() const;
1014 bool OwnsSafepoint() const;
1015 bool CanAcquireSafepointLocks() const;
1016
1017 uword safepoint_state() {
return safepoint_state_; }
1018
1019 enum ExecutionState {
1020 kThreadInVM = 0,
1021 kThreadInGenerated,
1022 kThreadInNative,
1023 kThreadInBlockedState
1024 };
1025
1026 ExecutionState execution_state() const {
1027 return static_cast<ExecutionState>(execution_state_);
1028 }
1029
1031 ExecutionState execution_state_cross_thread_for_testing() const {
1032 return static_cast<ExecutionState>(execution_state_);
1033 }
1034 void set_execution_state(ExecutionState
state) {
1035 execution_state_ =
static_cast<uword>(
state);
1036 }
1037 static intptr_t execution_state_offset() {
1038 return OFFSET_OF(Thread, execution_state_);
1039 }
1040
1041 virtual bool MayAllocateHandles() {
1042 return (execution_state() == kThreadInVM) ||
1043 (execution_state() == kThreadInGenerated);
1044 }
1045
1046 static uword full_safepoint_state_unacquired() {
1047 return (0 << AtSafepointField::shift()) |
1048 (0 << AtDeoptSafepointField::shift());
1049 }
1050 static uword full_safepoint_state_acquired() {
1051 return (1 << AtSafepointField::shift()) |
1052 (1 << AtDeoptSafepointField::shift());
1053 }
1054
1055 bool TryEnterSafepoint() {
1056 uword old_state = 0;
1057 uword new_state = AtSafepointBits(current_safepoint_level());
1058 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1059 std::memory_order_release);
1060 }
1061
1062 void EnterSafepoint() {
1063 ASSERT(no_safepoint_scope_depth() == 0);
1064
1065
1066 if (!TryEnterSafepoint()) {
1067
1068
1069 EnterSafepointUsingLock();
1070 }
1071 }
1072
1073 bool TryExitSafepoint() {
1074 uword old_state = AtSafepointBits(current_safepoint_level());
1075 uword new_state = 0;
1076 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1077 std::memory_order_acquire);
1078 }
1079
1080 void ExitSafepoint() {
1081
1082
1083 if (!TryExitSafepoint()) {
1084
1085
1086 ExitSafepointUsingLock();
1087 }
1088 }
1089
1090 void CheckForSafepoint() {
1091
1092
1093 ASSERT(no_safepoint_scope_depth() == 0);
1094 if (IsSafepointRequested()) {
1095 BlockForSafepoint();
1096 }
1097 }
1098
1099 Thread*
next()
const {
return next_; }
1100
1101
1102 void VisitObjectPointers(ObjectPointerVisitor* visitor,
1103 ValidationPolicy validate_frames);
1104 void RememberLiveTemporaries();
1105 void DeferredMarkLiveTemporaries();
1106
1108 bool IsValidLocalHandle(
Dart_Handle object)
const;
1109 intptr_t CountLocalHandles() const;
1110 int ZoneSizeInBytes() const;
1111 void UnwindScopes(uword stack_marker);
1112
1113 void InitVMConstants();
1114
1115 int64_t GetNextTaskId() { return next_task_id_++; }
1116 static intptr_t next_task_id_offset() {
1117 return OFFSET_OF(Thread, next_task_id_);
1118 }
1119 Random* random() { return &thread_random_; }
1120 static intptr_t random_offset() {
return OFFSET_OF(Thread, thread_random_); }
1121
1122#ifndef PRODUCT
1123 void PrintJSON(JSONStream* stream) const;
1124#endif
1125
1126#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1127 HeapProfileSampler& heap_sampler() { return heap_sampler_; }
1128#endif
1129
1130 PendingDeopts& pending_deopts() { return pending_deopts_; }
1131
1133 if (runtime_call_deopt_ability_ ==
1134 RuntimeCallDeoptAbility::kCannotLazyDeopt) {
1135 return SafepointLevel::kGC;
1136 }
1137 if (no_reload_scope_depth_ > 0 || allow_reload_scope_depth_ <= 0) {
1138 return SafepointLevel::kGCAndDeopt;
1139 }
1140 return SafepointLevel::kGCAndDeoptAndReload;
1141 }
1142
1143 private:
1144 template <class T>
1145 T* AllocateReusableHandle();
1146
1147 enum class RestoreWriteBarrierInvariantOp {
1148 kAddToRememberedSet,
1149 kAddToDeferredMarkingStack
1150 };
1151 friend class RestoreWriteBarrierInvariantVisitor;
1152 void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
1153
1154
1155 CompilerState* SetCompilerState(CompilerState*
state) {
1156 CompilerState* previous = compiler_state_;
1157 compiler_state_ =
state;
1158 return previous;
1159 }
1160
1161
1162
1163
1164
1165
1166
1167 volatile RelaxedAtomic<uword> stack_limit_ = 0;
1168 uword write_barrier_mask_;
1169#if defined(DART_COMPRESSED_POINTERS)
1170 uword heap_base_ = 0;
1171#endif
1174 const uword* dispatch_table_array_ =
nullptr;
1175 ObjectPtr* field_table_values_ = nullptr;
1176
1177
1178
1179
1180
1181
1182#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
1183 type_name member_name;
1185#undef DECLARE_MEMBERS
1186
1187#define DECLARE_MEMBERS(name) uword name##_entry_point_;
1189#undef DECLARE_MEMBERS
1190
1191#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
1193#undef DECLARE_MEMBERS
1194
1196
1197#define DECLARE_MEMBERS(name) uword name##_entry_point_ = 0;
1199#undef DECLARE_MEMBERS
1200
1201 Isolate* isolate_ = nullptr;
1202 IsolateGroup* isolate_group_ = nullptr;
1203
1204 uword saved_stack_limit_ = OSThread::kInvalidStackLimit;
1205
1206
1207
1208 uword stack_overflow_flags_ = 0;
1209 uword volatile top_exit_frame_info_ = 0;
1213 uword volatile vm_tag_ = 0;
1214
1215
1216
1217
1218 ALIGN8 simd128_value_t unboxed_runtime_arg_;
1219
1220
1221 ObjectPtr active_exception_;
1222 ObjectPtr active_stacktrace_;
1223
1224 ObjectPoolPtr global_object_pool_;
1226 uword saved_shadow_call_stack_ = 0;
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 uword execution_state_;
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 std::atomic<uword> safepoint_state_;
1269 uword exit_through_ffi_ = 0;
1270 ApiLocalScope* api_top_scope_;
1271 uint8_t double_truncate_round_supported_;
1272 ALIGN8 int64_t next_task_id_;
1273 ALIGN8 Random thread_random_;
1274
1275 TsanUtils* tsan_utils_ = nullptr;
1276
1277
1278
1279
1280
1281
1282
1283
1284 uword true_end_ = 0;
1285 TaskKind task_kind_;
1286 TimelineStream* const dart_stream_;
1287 StreamInfo* const service_extension_stream_;
1288 mutable Monitor thread_lock_;
1289 ApiLocalScope* api_reusable_scope_;
1290 int32_t no_callback_scope_depth_;
1291 int32_t force_growth_scope_depth_ = 0;
1292 intptr_t no_reload_scope_depth_ = 0;
1293 intptr_t allow_reload_scope_depth_ = 0;
1294 intptr_t stopped_mutators_scope_depth_ = 0;
1295#if defined(DEBUG)
1296 int32_t no_safepoint_scope_depth_;
1297#endif
1298 VMHandles reusable_handles_;
1299 int32_t stack_overflow_count_;
1300 uint32_t runtime_call_count_ = 0;
1301
1302
1304 RuntimeCallDeoptAbility::kCanLazyDeopt;
1305 PendingDeopts pending_deopts_;
1306
1307
1308 CompilerState* compiler_state_ = nullptr;
1309 HierarchyInfo* hierarchy_info_;
1310 TypeUsageInfo* type_usage_info_;
1311 NoActiveIsolateScope* no_active_isolate_scope_ = nullptr;
1312
1313 CompilerTimings* compiler_timings_ = nullptr;
1314
1315 ErrorPtr sticky_error_;
1316
1317 ObjectPtr* field_table_values() const { return field_table_values_; }
1318
1319
1320#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
1322#undef REUSABLE_HANDLE_FIELDS
1323
1324#if defined(DEBUG)
1325#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
1326 bool reusable_##object##_handle_scope_active_;
1328#undef REUSABLE_HANDLE_SCOPE_VARIABLE
1329#endif
1330
1331 class AtSafepointField : public BitField<uword, bool, 0, 1> {};
1332 class SafepointRequestedField
1333 : public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
1334
1335 class AtDeoptSafepointField
1336 : public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
1337 class DeoptSafepointRequestedField
1338 : public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
1339
1340 class AtReloadSafepointField
1341 : public BitField<uword,
1342 bool,
1343 DeoptSafepointRequestedField::kNextBit,
1344 1> {};
1345 class ReloadSafepointRequestedField
1346 : public BitField<uword, bool, AtReloadSafepointField::kNextBit, 1> {};
1347
1348 class BlockedForSafepointField
1349 : public BitField<uword,
1350 bool,
1351 ReloadSafepointRequestedField::kNextBit,
1352 1> {};
1353 class BypassSafepointsField
1354 : public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
1355 class UnwindErrorInProgressField
1356 : public BitField<uword, bool, BypassSafepointsField::kNextBit, 1> {};
1357
1358 static uword AtSafepointBits(SafepointLevel level) {
1359 switch (level) {
1360 case SafepointLevel::kGC:
1361 return AtSafepointField::mask_in_place();
1362 case SafepointLevel::kGCAndDeopt:
1363 return AtSafepointField::mask_in_place() |
1364 AtDeoptSafepointField::mask_in_place();
1365 case SafepointLevel::kGCAndDeoptAndReload:
1366 return AtSafepointField::mask_in_place() |
1367 AtDeoptSafepointField::mask_in_place() |
1368 AtReloadSafepointField::mask_in_place();
1369 default:
1371 }
1372 }
1373
1374#if defined(USING_SAFE_STACK)
1375 uword saved_safestack_limit_;
1376#endif
1377
1378 Thread* next_;
1379 Isolate* scheduled_dart_mutator_isolate_ = nullptr;
1380
1381 bool is_unwind_in_progress_ = false;
1382
1383#if defined(DEBUG)
1384 bool inside_compiler_ = false;
1385#endif
1386
1387#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1388 HeapProfileSampler heap_sampler_;
1389#endif
1390
1391 explicit Thread(bool is_vm_isolate);
1392
1393 void StoreBufferRelease(
1394 StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
1395 void StoreBufferAcquire();
1396
1397 void MarkingStackRelease();
1398 void MarkingStackAcquire();
1399 void MarkingStackFlush();
1400 void DeferredMarkingStackRelease();
1401 void DeferredMarkingStackAcquire();
1402 void DeferredMarkingStackFlush();
1403
1404 void set_safepoint_state(uint32_t value) { safepoint_state_ =
value; }
1405 void EnterSafepointUsingLock();
1406 void ExitSafepointUsingLock();
1407
1408 void SetupState(TaskKind kind);
1409 void ResetState();
1410
1411 void SetupMutatorState(TaskKind kind);
1412 void ResetMutatorState();
1413
1414 void SetupDartMutatorState(Isolate* isolate);
1415 void SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group);
1416 void ResetDartMutatorState(Isolate* isolate);
1417
1418 static void SuspendDartMutatorThreadInternal(Thread* thread,
1419 VMTag::VMTagId tag);
1420 static void ResumeDartMutatorThreadInternal(Thread* thread);
1421
1422 static void SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag);
1423 static void ResumeThreadInternal(Thread* thread);
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 static Thread* AddActiveThread(IsolateGroup* group,
1434 Isolate* isolate,
1435 bool is_dart_mutator,
1436 bool bypass_safepoint);
1437
1438
1439
1440
1441 static void FreeActiveThread(Thread* thread, bool bypass_safepoint);
1442
1443 static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
1444
1445#define REUSABLE_FRIEND_DECLARATION(name) \
1446 friend class Reusable##name##HandleScope;
1448#undef REUSABLE_FRIEND_DECLARATION
1449
1450 friend class ApiZone;
1451 friend class ActiveIsolateScope;
1452 friend class InterruptChecker;
1453 friend class Isolate;
1454 friend class IsolateGroup;
1455 friend class NoActiveIsolateScope;
1456 friend class NoReloadScope;
1457 friend class RawReloadParticipationScope;
1458 friend class Simulator;
1459 friend class StackZone;
1460 friend class StoppedMutatorsScope;
1461 friend class ThreadRegistry;
1462 friend class CompilerState;
1463 friend class compiler::target::Thread;
1464 friend class FieldTable;
1465 friend class RuntimeCallDeoptScope;
1466 friend class Dart;
1467 friend class
1468 TransitionGeneratedToVM;
1469 friend class
1470 TransitionVMToGenerated;
1471 friend class MonitorLocker;
1473 const char*,
1474 char**);
1476};
1477
1478class RuntimeCallDeoptScope : public StackResource {
1479 public:
1480 RuntimeCallDeoptScope(Thread* thread, RuntimeCallDeoptAbility kind)
1481 : StackResource(thread) {
1482
1483 ASSERT(thread->runtime_call_deopt_ability_ ==
1484 RuntimeCallDeoptAbility::kCanLazyDeopt);
1485 thread->runtime_call_deopt_ability_ = kind;
1486 }
1487 virtual ~RuntimeCallDeoptScope() {
1488 thread()->runtime_call_deopt_ability_ =
1489 RuntimeCallDeoptAbility::kCanLazyDeopt;
1490 }
1491
1492 private:
1493 Thread* thread() {
1494 return reinterpret_cast<Thread*>(StackResource::thread());
1495 }
1496};
1497
1498#if defined(DART_HOST_OS_WINDOWS)
1499
1500void WindowsThreadCleanUp();
1501#endif
1502
1503#if !defined(PRODUCT)
1504
1505class DisableThreadInterruptsScope : public StackResource {
1506 public:
1507 explicit DisableThreadInterruptsScope(Thread* thread);
1508 ~DisableThreadInterruptsScope();
1509};
1510#else
1511class DisableThreadInterruptsScope : public StackResource {
1512 public:
1513 explicit DisableThreadInterruptsScope(Thread* thread)
1514 : StackResource(thread) {}
1515 ~DisableThreadInterruptsScope() {}
1516};
1517#endif
1518
1519
1520
1521#if defined(DEBUG)
1522class NoSafepointScope : public ThreadStackResource {
1523 public:
1524 explicit NoSafepointScope(Thread* thread = nullptr)
1525 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1526 this->thread()->IncrementNoSafepointScopeDepth();
1527 }
1528 ~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
1529
1530 private:
1532};
1533#else
1534class NoSafepointScope : public ValueObject {
1535 public:
1536 explicit NoSafepointScope(Thread* thread = nullptr) {}
1537
1538 private:
1540};
1541#endif
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560class NoReloadScope : public ThreadStackResource {
1561 public:
1562 explicit NoReloadScope(Thread* thread);
1563 ~NoReloadScope();
1564
1565 private:
1567};
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582class RawReloadParticipationScope {
1583 public:
1584 explicit RawReloadParticipationScope(Thread* thread) : thread_(thread) {
1585#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1586 if (thread->allow_reload_scope_depth_ == 0) {
1587 ASSERT(thread->current_safepoint_level() == SafepointLevel::kGCAndDeopt);
1588 }
1589 thread->allow_reload_scope_depth_++;
1590 ASSERT(thread->allow_reload_scope_depth_ >= 0);
1591#endif
1592 }
1593
1594 ~RawReloadParticipationScope() {
1595#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1596 thread_->allow_reload_scope_depth_ -= 1;
1597 ASSERT(thread_->allow_reload_scope_depth_ >= 0);
1598 if (thread_->allow_reload_scope_depth_ == 0) {
1599 ASSERT(thread_->current_safepoint_level() == SafepointLevel::kGCAndDeopt);
1600 }
1601#endif
1602 }
1603
1604 private:
1605 Thread* thread_;
1606
1608};
1609
1611 AsThreadStackResource<RawReloadParticipationScope>;
1612
1613class StoppedMutatorsScope : public ThreadStackResource {
1614 public:
1615 explicit StoppedMutatorsScope(Thread* thread) : ThreadStackResource(thread) {
1616#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1617 thread->stopped_mutators_scope_depth_++;
1618 ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
1619#endif
1620 }
1621
1622 ~StoppedMutatorsScope() {
1623#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1624 thread()->stopped_mutators_scope_depth_ -= 1;
1625 ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
1626#endif
1627 }
1628
1629 private:
1631};
1632
1633
1634#if defined(DEBUG)
1635class EnterCompilerScope : public ThreadStackResource {
1636 public:
1637 explicit EnterCompilerScope(Thread* thread = nullptr)
1638 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1639 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1640 if (!previously_is_inside_compiler_) {
1641 this->thread()->EnterCompiler();
1642 }
1643 }
1644 ~EnterCompilerScope() {
1645 if (!previously_is_inside_compiler_) {
1646 thread()->LeaveCompiler();
1647 }
1648 }
1649
1650 private:
1651 bool previously_is_inside_compiler_;
1653};
1654#else
1655class EnterCompilerScope : public ValueObject {
1656 public:
1657 explicit EnterCompilerScope(Thread* thread = nullptr) {}
1658
1659 private:
1661};
1662#endif
1663
1664
1665#if defined(DEBUG)
1666class LeaveCompilerScope : public ThreadStackResource {
1667 public:
1668 explicit LeaveCompilerScope(Thread* thread = nullptr)
1669 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1670 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1671 if (previously_is_inside_compiler_) {
1672 this->thread()->LeaveCompiler();
1673 }
1674 }
1675 ~LeaveCompilerScope() {
1676 if (previously_is_inside_compiler_) {
1677 thread()->EnterCompiler();
1678 }
1679 }
1680
1681 private:
1682 bool previously_is_inside_compiler_;
1684};
1685#else
1686class LeaveCompilerScope : public ValueObject {
1687 public:
1688 explicit LeaveCompilerScope(Thread* thread = nullptr) {}
1689
1690 private:
1692};
1693#endif
1694
1695}
1696
1697#endif
static float next(float f)
struct _Dart_Handle * Dart_Handle
#define DART_WARN_UNUSED_RESULT
#define REUSABLE_FRIEND_DECLARATION(name)
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
StoreBuffer::Block StoreBufferBlock
MarkingStack::Block MarkingStackBlock
AsThreadStackResource< RawReloadParticipationScope > ReloadParticipationScope
const intptr_t kStoreBufferWrapperSize
void HandleInterrupts(Thread *thread)
Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *group, const char *name, char **error)
constexpr int kNumberOfDartAvailableCpuRegs
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
#define REUSABLE_HANDLE_LIST(V)
#define REUSABLE_HANDLE_FIELDS(object)
#define CACHED_CONSTANTS_LIST(V)
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value)
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)
#define REUSABLE_HANDLE(object)
#define NO_SANITIZE_THREAD
#define OFFSET_OF(type, field)