Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Namespaces | Macros | Typedefs | Enumerations
thread.h File Reference
#include <setjmp.h>
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/atomic.h"
#include "platform/safe_stack.h"
#include "vm/bitfield.h"
#include "vm/compiler/runtime_api.h"
#include "vm/constants.h"
#include "vm/globals.h"
#include "vm/handles.h"
#include "vm/heap/pointer_block.h"
#include "vm/heap/sampler.h"
#include "vm/os_thread.h"
#include "vm/pending_deopts.h"
#include "vm/random.h"
#include "vm/runtime_entry_list.h"
#include "vm/tags.h"
#include "vm/thread_stack_resource.h"
#include "vm/thread_state.h"

Go to the source code of this file.

Classes

struct  dart::TsanUtils
 
class  dart::Thread
 
class  dart::RuntimeCallDeoptScope
 
class  dart::DisableThreadInterruptsScope
 
class  dart::NoSafepointScope
 
class  dart::NoReloadScope
 
class  dart::RawReloadParticipationScope
 
class  dart::StoppedMutatorsScope
 
class  dart::EnterCompilerScope
 
class  dart::LeaveCompilerScope
 

Namespaces

namespace  dart
 
namespace  dart::compiler
 
namespace  dart::compiler::target
 

Macros

#define REUSABLE_HANDLE_LIST(V)
 
#define CACHED_VM_STUBS_LIST(V)
 
#define CACHED_NON_VM_STUB_LIST(V)
 
#define CACHED_VM_OBJECTS_LIST(V)
 
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
 
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()
 
#define CACHED_VM_STUBS_ADDRESSES_LIST(V)
 
#define CACHED_ADDRESSES_LIST(V)
 
#define CACHED_CONSTANTS_LIST(V)
 
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value)
 
#define DEFINE_OFFSET_METHOD(name)
 
#define DEFINE_OFFSET_METHOD(returntype, name, ...)
 
#define DEFINE_OFFSET_METHOD(name)
 
#define REUSABLE_HANDLE(object)    object& object##Handle() const { return *object##_handle_; }
 
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)    type_name member_name;
 
#define DECLARE_MEMBERS(name)   uword name##_entry_point_;
 
#define DECLARE_MEMBERS(returntype, name, ...)   uword name##_entry_point_;
 
#define DECLARE_MEMBERS(name)   uword name##_entry_point_ = 0;
 
#define REUSABLE_HANDLE_FIELDS(object)   object* object##_handle_;
 
#define REUSABLE_FRIEND_DECLARATION(name)    friend class Reusable##name##HandleScope;
 

Typedefs

using dart::ReloadParticipationScope = AsThreadStackResource< RawReloadParticipationScope >
 

Enumerations

enum class  dart::ValidationPolicy { dart::kValidateFrames = 0 , dart::kDontValidateFrames = 1 }
 
enum class  dart::RuntimeCallDeoptAbility { dart::kCanLazyDeopt , dart::kCannotLazyDeopt }
 
enum  dart::SafepointLevel {
  dart::kGC , dart::kGCAndDeopt , dart::kGCAndDeoptAndReload , dart::kNumLevels ,
  dart::kNoSafepoint
}
 

Macro Definition Documentation

◆ ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE

#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE ( )
Value:
ASSERT((Thread::bool_true_offset() + kWordSize) == \
Thread::bool_false_offset());
#define ASSERT(E)

Definition at line 204 of file thread.h.

270 {
271 kValidateFrames = 0,
273};
274
275enum class RuntimeCallDeoptAbility {
276 // There was no leaf call or a leaf call that can cause deoptimization
277 // after-call.
279 // There was a leaf call and the VM cannot cause deoptimize after-call.
281};
282
283// The safepoint level a thread is on or a safepoint operation is requested for
284//
285// The higher the number the stronger the guarantees:
286// * the time-to-safepoint latency increases with level
287// * the frequency of hitting possible safe points decreases with level
288enum SafepointLevel {
289 // Safe to GC
290 kGC,
291 // Safe to GC as well as Deopt.
293 // Safe to GC, Deopt as well as Reload.
295 // Number of levels.
297
298 // No safepoint.
300};
301
302// Accessed from generated code.
303struct TsanUtils {
304 // Used to allow unwinding runtime C frames using longjmp() when throwing
305 // exceptions. This allows triggering the normal TSAN shadow stack unwinding
306 // implementation.
307 // -> See https://dartbug.com/47472#issuecomment-948235479 for details.
308#if defined(USING_THREAD_SANITIZER)
309 void* setjmp_function = reinterpret_cast<void*>(&setjmp);
310#else
311 // MSVC (on Windows) is not happy with getting address of purely intrinsic.
312 void* setjmp_function = nullptr;
313#endif
314 jmp_buf* setjmp_buffer = nullptr;
315 uword exception_pc = 0;
316 uword exception_sp = 0;
317 uword exception_fp = 0;
318
319 static intptr_t setjmp_function_offset() {
320 return OFFSET_OF(TsanUtils, setjmp_function);
321 }
322 static intptr_t setjmp_buffer_offset() {
323 return OFFSET_OF(TsanUtils, setjmp_buffer);
324 }
325 static intptr_t exception_pc_offset() {
326 return OFFSET_OF(TsanUtils, exception_pc);
327 }
328 static intptr_t exception_sp_offset() {
329 return OFFSET_OF(TsanUtils, exception_sp);
330 }
331 static intptr_t exception_fp_offset() {
332 return OFFSET_OF(TsanUtils, exception_fp);
333 }
334};
335
336// A VM thread; may be executing Dart code or performing helper tasks like
337// garbage collection or compilation. The Thread structure associated with
338// a thread is allocated by EnsureInit before entering an isolate, and destroyed
339// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
340// must currently be called manually (issue 23474).
341class Thread : public ThreadState {
342 public:
343 // The kind of task this thread is performing. Sampled by the profiler.
344 enum TaskKind {
345 kUnknownTask = 0x0,
346 kMutatorTask = 0x1,
347 kCompilerTask = 0x2,
348 kMarkerTask = 0x4,
349 kSweeperTask = 0x8,
350 kCompactorTask = 0x10,
351 kScavengerTask = 0x20,
352 kSampleBlockTask = 0x40,
353 };
354 // Converts a TaskKind to its corresponding C-String name.
355 static const char* TaskKindToCString(TaskKind kind);
356
357 ~Thread();
358
359 // The currently executing thread, or nullptr if not yet initialized.
360 static Thread* Current() {
361 return static_cast<Thread*>(OSThread::CurrentVMThread());
362 }
363
364 // Whether there's any active state on the [thread] that needs to be preserved
365 // across `Thread::ExitIsolate()` and `Thread::EnterIsolate()`.
366 bool HasActiveState();
367 void AssertNonMutatorInvariants();
368 void AssertNonDartMutatorInvariants();
369 void AssertEmptyStackInvariants();
370 void AssertEmptyThreadInvariants();
371
372 // Makes the current thread enter 'isolate'.
373 static void EnterIsolate(Isolate* isolate);
374 // Makes the current thread exit its isolate.
375 static void ExitIsolate(bool isolate_shutdown = false);
376
377 static bool EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
378 TaskKind kind,
379 bool bypass_safepoint);
380 static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
381
382 static bool EnterIsolateGroupAsNonMutator(IsolateGroup* isolate_group,
383 TaskKind kind);
384 static void ExitIsolateGroupAsNonMutator();
385
386 // Empties the store buffer block into the isolate.
387 void ReleaseStoreBuffer();
388 void AcquireMarkingStack();
389 void ReleaseMarkingStack();
390
391 void SetStackLimit(uword value);
392 void ClearStackLimit();
393
394 // Access to the current stack limit for generated code. Either the true OS
395 // thread's stack limit minus some headroom, or a special value to trigger
396 // interrupts.
397 uword stack_limit_address() const {
398 return reinterpret_cast<uword>(&stack_limit_);
399 }
400 static intptr_t stack_limit_offset() {
401 return OFFSET_OF(Thread, stack_limit_);
402 }
403
404 // The true stack limit for this OS thread.
405 static intptr_t saved_stack_limit_offset() {
406 return OFFSET_OF(Thread, saved_stack_limit_);
407 }
408 uword saved_stack_limit() const { return saved_stack_limit_; }
409
410#if defined(USING_SAFE_STACK)
411 uword saved_safestack_limit() const { return saved_safestack_limit_; }
412 void set_saved_safestack_limit(uword limit) {
413 saved_safestack_limit_ = limit;
414 }
415#endif
416 uword saved_shadow_call_stack() const { return saved_shadow_call_stack_; }
417 static uword saved_shadow_call_stack_offset() {
418 return OFFSET_OF(Thread, saved_shadow_call_stack_);
419 }
420
421 // Stack overflow flags
422 enum {
423 kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
424 };
425
426 uword write_barrier_mask() const { return write_barrier_mask_; }
427 uword heap_base() const {
428#if defined(DART_COMPRESSED_POINTERS)
429 return heap_base_;
430#else
431 return 0;
432#endif
433 }
434
435 static intptr_t write_barrier_mask_offset() {
436 return OFFSET_OF(Thread, write_barrier_mask_);
437 }
438#if defined(DART_COMPRESSED_POINTERS)
439 static intptr_t heap_base_offset() { return OFFSET_OF(Thread, heap_base_); }
440#endif
441 static intptr_t stack_overflow_flags_offset() {
442 return OFFSET_OF(Thread, stack_overflow_flags_);
443 }
444
445 int32_t IncrementAndGetStackOverflowCount() {
446 return ++stack_overflow_count_;
447 }
448
449 uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
450
451 static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
452 return fpu_regs
453 ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
454 : stack_overflow_shared_without_fpu_regs_entry_point_offset();
455 }
456
457 static intptr_t safepoint_state_offset() {
458 return OFFSET_OF(Thread, safepoint_state_);
459 }
460
461 // Tag state is maintained on transitions.
462 enum {
463 // Always true in generated state.
464 kDidNotExit = 0,
465 // The VM exited the generated state through FFI.
466 // This can be true in both native and VM state.
467 kExitThroughFfi = 1,
468 // The VM exited the generated state through a runtime call.
469 // This can be true in both native and VM state.
470 kExitThroughRuntimeCall = 2,
471 };
472
473 static intptr_t exit_through_ffi_offset() {
474 return OFFSET_OF(Thread, exit_through_ffi_);
475 }
476
477 TaskKind task_kind() const { return task_kind_; }
478
479 // Retrieves and clears the stack overflow flags. These are set by
480 // the generated code before the slow path runtime routine for a
481 // stack overflow is called.
482 uword GetAndClearStackOverflowFlags();
483
484 // Interrupt bits.
485 enum {
486 kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
487 kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
488
489 kInterruptsMask = (kVMInterrupt | kMessageInterrupt),
490 };
491
492 void ScheduleInterrupts(uword interrupt_bits);
493 ErrorPtr HandleInterrupts();
494 uword GetAndClearInterrupts();
495 bool HasScheduledInterrupts() const {
496 return (stack_limit_.load() & kInterruptsMask) != 0;
497 }
498
499 // Monitor corresponding to this thread.
500 Monitor* thread_lock() const { return &thread_lock_; }
501
502 // The reusable api local scope for this thread.
503 ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
504 void set_api_reusable_scope(ApiLocalScope* value) {
505 ASSERT(value == nullptr || api_reusable_scope_ == nullptr);
506 api_reusable_scope_ = value;
507 }
508
509 // The api local scope for this thread, this where all local handles
510 // are allocated.
511 ApiLocalScope* api_top_scope() const { return api_top_scope_; }
512 void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
513 static intptr_t api_top_scope_offset() {
514 return OFFSET_OF(Thread, api_top_scope_);
515 }
516
517 void EnterApiScope();
518 void ExitApiScope();
519
520 static intptr_t double_truncate_round_supported_offset() {
521 return OFFSET_OF(Thread, double_truncate_round_supported_);
522 }
523
524 static intptr_t tsan_utils_offset() { return OFFSET_OF(Thread, tsan_utils_); }
525
526#if defined(USING_THREAD_SANITIZER)
527 uword exit_through_ffi() const { return exit_through_ffi_; }
528 TsanUtils* tsan_utils() const { return tsan_utils_; }
529#endif // defined(USING_THREAD_SANITIZER)
530
531 // The isolate that this thread is operating on, or nullptr if none.
532 Isolate* isolate() const { return isolate_; }
533 static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
534 static intptr_t isolate_group_offset() {
535 return OFFSET_OF(Thread, isolate_group_);
536 }
537
538 // The isolate group that this thread is operating on, or nullptr if none.
539 IsolateGroup* isolate_group() const { return isolate_group_; }
540
541 static intptr_t field_table_values_offset() {
542 return OFFSET_OF(Thread, field_table_values_);
543 }
544
545 bool IsDartMutatorThread() const {
546 return scheduled_dart_mutator_isolate_ != nullptr;
547 }
548
549 // Returns the dart mutator [Isolate] this thread belongs to or nullptr.
550 //
551 // `isolate()` in comparison can return
552 // - `nullptr` for dart mutators (e.g. if the mutator runs under
553 // [NoActiveIsolateScope])
554 // - an incorrect isolate (e.g. if [ActiveIsolateScope] is used to seemingly
555 // enter another isolate)
556 Isolate* scheduled_dart_mutator_isolate() const {
557 return scheduled_dart_mutator_isolate_;
558 }
559
560#if defined(DEBUG)
561 bool IsInsideCompiler() const { return inside_compiler_; }
562#endif
563
564 // Offset of Dart TimelineStream object.
565 static intptr_t dart_stream_offset() {
566 return OFFSET_OF(Thread, dart_stream_);
567 }
568
569 // Offset of the Dart VM Service Extension StreamInfo object.
570 static intptr_t service_extension_stream_offset() {
571 return OFFSET_OF(Thread, service_extension_stream_);
572 }
573
574 // Is |this| executing Dart code?
575 bool IsExecutingDartCode() const;
576
577 // Has |this| exited Dart code?
578 bool HasExitedDartCode() const;
579
580 bool HasCompilerState() const { return compiler_state_ != nullptr; }
581
582 CompilerState& compiler_state() {
583 ASSERT(HasCompilerState());
584 return *compiler_state_;
585 }
586
587 HierarchyInfo* hierarchy_info() const {
588 ASSERT(isolate_group_ != nullptr);
589 return hierarchy_info_;
590 }
591
592 void set_hierarchy_info(HierarchyInfo* value) {
593 ASSERT(isolate_group_ != nullptr);
594 ASSERT((hierarchy_info_ == nullptr && value != nullptr) ||
595 (hierarchy_info_ != nullptr && value == nullptr));
596 hierarchy_info_ = value;
597 }
598
599 TypeUsageInfo* type_usage_info() const {
600 ASSERT(isolate_group_ != nullptr);
601 return type_usage_info_;
602 }
603
604 void set_type_usage_info(TypeUsageInfo* value) {
605 ASSERT(isolate_group_ != nullptr);
606 ASSERT((type_usage_info_ == nullptr && value != nullptr) ||
607 (type_usage_info_ != nullptr && value == nullptr));
608 type_usage_info_ = value;
609 }
610
611 CompilerTimings* compiler_timings() const { return compiler_timings_; }
612
613 void set_compiler_timings(CompilerTimings* stats) {
614 compiler_timings_ = stats;
615 }
616
617 int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
618 void IncrementNoCallbackScopeDepth() {
619 ASSERT(no_callback_scope_depth_ < INT_MAX);
620 no_callback_scope_depth_ += 1;
621 }
622 void DecrementNoCallbackScopeDepth() {
623 ASSERT(no_callback_scope_depth_ > 0);
624 no_callback_scope_depth_ -= 1;
625 }
626
627 bool force_growth() const { return force_growth_scope_depth_ != 0; }
628 void IncrementForceGrowthScopeDepth() {
629 ASSERT(force_growth_scope_depth_ < INT_MAX);
630 force_growth_scope_depth_ += 1;
631 }
632 void DecrementForceGrowthScopeDepth() {
633 ASSERT(force_growth_scope_depth_ > 0);
634 force_growth_scope_depth_ -= 1;
635 }
636
637 bool is_unwind_in_progress() const { return is_unwind_in_progress_; }
638
639 void StartUnwindError() {
640 is_unwind_in_progress_ = true;
641 SetUnwindErrorInProgress(true);
642 }
643
644#if defined(DEBUG)
645 void EnterCompiler() {
646 ASSERT(!IsInsideCompiler());
647 inside_compiler_ = true;
648 }
649
650 void LeaveCompiler() {
651 ASSERT(IsInsideCompiler());
652 inside_compiler_ = false;
653 }
654#endif
655
656 void StoreBufferAddObject(ObjectPtr obj);
657 void StoreBufferAddObjectGC(ObjectPtr obj);
658#if defined(TESTING)
659 bool StoreBufferContains(ObjectPtr obj) const {
660 return store_buffer_block_->Contains(obj);
661 }
662#endif
663 void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
664 static intptr_t store_buffer_block_offset() {
665 return OFFSET_OF(Thread, store_buffer_block_);
666 }
667
668 bool is_marking() const { return marking_stack_block_ != nullptr; }
669 void MarkingStackAddObject(ObjectPtr obj);
670 void DeferredMarkingStackAddObject(ObjectPtr obj);
671 void MarkingStackBlockProcess();
672 void DeferredMarkingStackBlockProcess();
673 static intptr_t marking_stack_block_offset() {
674 return OFFSET_OF(Thread, marking_stack_block_);
675 }
676
677 uword top_exit_frame_info() const { return top_exit_frame_info_; }
678 void set_top_exit_frame_info(uword top_exit_frame_info) {
679 top_exit_frame_info_ = top_exit_frame_info;
680 }
681 static intptr_t top_exit_frame_info_offset() {
682 return OFFSET_OF(Thread, top_exit_frame_info_);
683 }
684
685 Heap* heap() const;
686
687 // The TLAB memory boundaries.
688 //
689 // When the heap sampling profiler is enabled, we use the TLAB boundary to
690 // trigger slow path allocations so we can take a sample. This means that
691 // true_end() >= end(), where true_end() is the actual end address of the
692 // TLAB and end() is the chosen sampling boundary for the thread.
693 //
694 // When the heap sampling profiler is disabled, true_end() == end().
695 uword top() const { return top_; }
696 uword end() const { return end_; }
697 uword true_end() const { return true_end_; }
698 void set_top(uword top) { top_ = top; }
699 void set_end(uword end) { end_ = end; }
700 void set_true_end(uword true_end) { true_end_ = true_end; }
701 static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
702 static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
703
704 int32_t no_safepoint_scope_depth() const {
705#if defined(DEBUG)
706 return no_safepoint_scope_depth_;
707#else
708 return 0;
709#endif
710 }
711
712 void IncrementNoSafepointScopeDepth() {
713#if defined(DEBUG)
714 ASSERT(no_safepoint_scope_depth_ < INT_MAX);
715 no_safepoint_scope_depth_ += 1;
716#endif
717 }
718
719 void DecrementNoSafepointScopeDepth() {
720#if defined(DEBUG)
721 ASSERT(no_safepoint_scope_depth_ > 0);
722 no_safepoint_scope_depth_ -= 1;
723#endif
724 }
725
726 bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
727
728 bool IsInStoppedMutatorsScope() const {
729 return stopped_mutators_scope_depth_ > 0;
730 }
731
732#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
733 static intptr_t member_name##offset() { \
734 return OFFSET_OF(Thread, member_name); \
735 }
737#undef DEFINE_OFFSET_METHOD
738
739 static intptr_t write_barrier_wrappers_thread_offset(Register reg) {
740 ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
741 intptr_t index = 0;
742 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
743 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
744 if (i == reg) break;
745 ++index;
746 }
747 return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
748 index * sizeof(uword);
749 }
750
751 static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg) {
752 intptr_t index = 0;
753 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
754 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
755 if (i == reg) {
756 return index * kStoreBufferWrapperSize;
757 }
758 ++index;
759 }
760 UNREACHABLE();
761 return 0;
762 }
763
764#define DEFINE_OFFSET_METHOD(name) \
765 static intptr_t name##_entry_point_offset() { \
766 return OFFSET_OF(Thread, name##_entry_point_); \
767 }
769#undef DEFINE_OFFSET_METHOD
770
771#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
772 static intptr_t name##_entry_point_offset() { \
773 return OFFSET_OF(Thread, name##_entry_point_); \
774 }
776#undef DEFINE_OFFSET_METHOD
777
778 ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
779 void set_global_object_pool(ObjectPoolPtr raw_value) {
780 global_object_pool_ = raw_value;
781 }
782
783 const uword* dispatch_table_array() const { return dispatch_table_array_; }
784 void set_dispatch_table_array(const uword* array) {
785 dispatch_table_array_ = array;
786 }
787
788 static bool CanLoadFromThread(const Object& object);
789 static intptr_t OffsetFromThread(const Object& object);
790 static bool ObjectAtOffset(intptr_t offset, Object* object);
791 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
792
793#define DEFINE_OFFSET_METHOD(name) \
794 static intptr_t name##_entry_point_offset() { \
795 return OFFSET_OF(Thread, name##_entry_point_); \
796 }
798#undef DEFINE_OFFSET_METHOD
799
800#if defined(DEBUG)
801 // For asserts only. Has false positives when running with a simulator or
802 // SafeStack.
803 bool TopErrorHandlerIsSetJump() const;
804 bool TopErrorHandlerIsExitFrame() const;
805#endif
806
807 uword vm_tag() const { return vm_tag_; }
808 void set_vm_tag(uword tag) { vm_tag_ = tag; }
809 static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
810
811 int64_t unboxed_int64_runtime_arg() const {
812 return unboxed_runtime_arg_.int64_storage[0];
813 }
814 void set_unboxed_int64_runtime_arg(int64_t value) {
815 unboxed_runtime_arg_.int64_storage[0] = value;
816 }
817 int64_t unboxed_int64_runtime_second_arg() const {
818 return unboxed_runtime_arg_.int64_storage[1];
819 }
820 void set_unboxed_int64_runtime_second_arg(int64_t value) {
821 unboxed_runtime_arg_.int64_storage[1] = value;
822 }
823 double unboxed_double_runtime_arg() const {
824 return unboxed_runtime_arg_.double_storage[0];
825 }
826 void set_unboxed_double_runtime_arg(double value) {
827 unboxed_runtime_arg_.double_storage[0] = value;
828 }
829 simd128_value_t unboxed_simd128_runtime_arg() const {
830 return unboxed_runtime_arg_;
831 }
832 void set_unboxed_simd128_runtime_arg(simd128_value_t value) {
833 unboxed_runtime_arg_ = value;
834 }
835 static intptr_t unboxed_runtime_arg_offset() {
836 return OFFSET_OF(Thread, unboxed_runtime_arg_);
837 }
838
839 static intptr_t global_object_pool_offset() {
840 return OFFSET_OF(Thread, global_object_pool_);
841 }
842
843 static intptr_t dispatch_table_array_offset() {
844 return OFFSET_OF(Thread, dispatch_table_array_);
845 }
846
847 ObjectPtr active_exception() const { return active_exception_; }
848 void set_active_exception(const Object& value);
849 static intptr_t active_exception_offset() {
850 return OFFSET_OF(Thread, active_exception_);
851 }
852
853 ObjectPtr active_stacktrace() const { return active_stacktrace_; }
854 void set_active_stacktrace(const Object& value);
855 static intptr_t active_stacktrace_offset() {
856 return OFFSET_OF(Thread, active_stacktrace_);
857 }
858
859 uword resume_pc() const { return resume_pc_; }
860 void set_resume_pc(uword value) { resume_pc_ = value; }
861 static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
862
863 ErrorPtr sticky_error() const;
864 void set_sticky_error(const Error& value);
865 void ClearStickyError();
866 DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError();
867
868#if defined(DEBUG)
869#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
870 void set_reusable_##object##_handle_scope_active(bool value) { \
871 reusable_##object##_handle_scope_active_ = value; \
872 } \
873 bool reusable_##object##_handle_scope_active() const { \
874 return reusable_##object##_handle_scope_active_; \
875 }
876 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
877#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
878
879 bool IsAnyReusableHandleScopeActive() const {
880#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
881 if (reusable_##object##_handle_scope_active_) { \
882 return true; \
883 }
884 REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
885 return false;
886#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
887 }
888#endif // defined(DEBUG)
889
890 void ClearReusableHandles();
891
892#define REUSABLE_HANDLE(object) \
893 object& object##Handle() const { return *object##_handle_; }
895#undef REUSABLE_HANDLE
896
897 static bool IsAtSafepoint(SafepointLevel level, uword state) {
898 const uword mask = AtSafepointBits(level);
899 return (state & mask) == mask;
900 }
901
902 // Whether the current thread is owning any safepoint level.
903 bool IsAtSafepoint() const {
904 // Owning a higher level safepoint implies owning the lower levels as well.
905 return IsAtSafepoint(SafepointLevel::kGC);
906 }
907 bool IsAtSafepoint(SafepointLevel level) const {
908 return IsAtSafepoint(level, safepoint_state_.load());
909 }
910 void SetAtSafepoint(bool value, SafepointLevel level) {
911 ASSERT(thread_lock()->IsOwnedByCurrentThread());
912 ASSERT(level <= current_safepoint_level());
913 if (value) {
914 safepoint_state_ |= AtSafepointBits(level);
915 } else {
916 safepoint_state_ &= ~AtSafepointBits(level);
917 }
918 }
919 bool IsSafepointRequestedLocked(SafepointLevel level) const {
920 ASSERT(thread_lock()->IsOwnedByCurrentThread());
921 return IsSafepointRequested(level);
922 }
923 bool IsSafepointRequested() const {
924 return IsSafepointRequested(current_safepoint_level());
925 }
926 bool IsSafepointRequested(SafepointLevel level) const {
927 const uword state = safepoint_state_.load();
928 for (intptr_t i = level; i >= 0; --i) {
929 if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(i)))
930 return true;
931 }
932 return false;
933 }
934 bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
935 ASSERT(thread_lock()->IsOwnedByCurrentThread());
936 if (level > current_safepoint_level()) return false;
937 const uword state = safepoint_state_.load();
938 return IsSafepointLevelRequested(state, level);
939 }
940
941 static bool IsSafepointLevelRequested(uword state, SafepointLevel level) {
942 switch (level) {
943 case SafepointLevel::kGC:
944 return (state & SafepointRequestedField::mask_in_place()) != 0;
945 case SafepointLevel::kGCAndDeopt:
946 return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
947 case SafepointLevel::kGCAndDeoptAndReload:
948 return (state & ReloadSafepointRequestedField::mask_in_place()) != 0;
949 default:
950 UNREACHABLE();
951 }
952 }
953
954 void BlockForSafepoint();
955
956 uword SetSafepointRequested(SafepointLevel level, bool value) {
957 ASSERT(thread_lock()->IsOwnedByCurrentThread());
958
959 uword mask = 0;
960 switch (level) {
961 case SafepointLevel::kGC:
962 mask = SafepointRequestedField::mask_in_place();
963 break;
964 case SafepointLevel::kGCAndDeopt:
965 mask = DeoptSafepointRequestedField::mask_in_place();
966 break;
967 case SafepointLevel::kGCAndDeoptAndReload:
968 mask = ReloadSafepointRequestedField::mask_in_place();
969 break;
970 default:
971 UNREACHABLE();
972 }
973
974 if (value) {
975 // acquire pulls from the release in TryEnterSafepoint.
976 return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
977 } else {
978 // release pushes to the acquire in TryExitSafepoint.
979 return safepoint_state_.fetch_and(~mask, std::memory_order_release);
980 }
981 }
982 static bool IsBlockedForSafepoint(uword state) {
983 return BlockedForSafepointField::decode(state);
984 }
985 bool IsBlockedForSafepoint() const {
986 return BlockedForSafepointField::decode(safepoint_state_);
987 }
988 void SetBlockedForSafepoint(bool value) {
989 ASSERT(thread_lock()->IsOwnedByCurrentThread());
990 safepoint_state_ =
991 BlockedForSafepointField::update(value, safepoint_state_);
992 }
993 bool BypassSafepoints() const {
994 return BypassSafepointsField::decode(safepoint_state_);
995 }
996 static uword SetBypassSafepoints(bool value, uword state) {
997 return BypassSafepointsField::update(value, state);
998 }
999 bool UnwindErrorInProgress() const {
1000 return UnwindErrorInProgressField::decode(safepoint_state_);
1001 }
1002 void SetUnwindErrorInProgress(bool value) {
1003 const uword mask = UnwindErrorInProgressField::mask_in_place();
1004 if (value) {
1005 safepoint_state_.fetch_or(mask);
1006 } else {
1007 safepoint_state_.fetch_and(~mask);
1008 }
1009 }
1010
1011 bool OwnsGCSafepoint() const;
1012 bool OwnsReloadSafepoint() const;
1013 bool OwnsDeoptSafepoint() const;
1014 bool OwnsSafepoint() const;
1015 bool CanAcquireSafepointLocks() const;
1016
1017 uword safepoint_state() { return safepoint_state_; }
1018
1019 enum ExecutionState {
1020 kThreadInVM = 0,
1021 kThreadInGenerated,
1022 kThreadInNative,
1023 kThreadInBlockedState
1024 };
1025
1026 ExecutionState execution_state() const {
1027 return static_cast<ExecutionState>(execution_state_);
1028 }
1029 // Normally execution state is only accessed for the current thread.
1031 ExecutionState execution_state_cross_thread_for_testing() const {
1032 return static_cast<ExecutionState>(execution_state_);
1033 }
1034 void set_execution_state(ExecutionState state) {
1035 execution_state_ = static_cast<uword>(state);
1036 }
1037 static intptr_t execution_state_offset() {
1038 return OFFSET_OF(Thread, execution_state_);
1039 }
1040
1041 virtual bool MayAllocateHandles() {
1042 return (execution_state() == kThreadInVM) ||
1043 (execution_state() == kThreadInGenerated);
1044 }
1045
1046 static uword full_safepoint_state_unacquired() {
1047 return (0 << AtSafepointField::shift()) |
1048 (0 << AtDeoptSafepointField::shift());
1049 }
1050 static uword full_safepoint_state_acquired() {
1051 return (1 << AtSafepointField::shift()) |
1052 (1 << AtDeoptSafepointField::shift());
1053 }
1054
1055 bool TryEnterSafepoint() {
1056 uword old_state = 0;
1057 uword new_state = AtSafepointBits(current_safepoint_level());
1058 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1059 std::memory_order_release);
1060 }
1061
1062 void EnterSafepoint() {
1063 ASSERT(no_safepoint_scope_depth() == 0);
1064 // First try a fast update of the thread state to indicate it is at a
1065 // safepoint.
1066 if (!TryEnterSafepoint()) {
1067 // Fast update failed which means we could potentially be in the middle
1068 // of a safepoint operation.
1069 EnterSafepointUsingLock();
1070 }
1071 }
1072
1073 bool TryExitSafepoint() {
1074 uword old_state = AtSafepointBits(current_safepoint_level());
1075 uword new_state = 0;
1076 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1077 std::memory_order_acquire);
1078 }
1079
1080 void ExitSafepoint() {
1081 // First try a fast update of the thread state to indicate it is not at a
1082 // safepoint anymore.
1083 if (!TryExitSafepoint()) {
1084 // Fast update failed which means we could potentially be in the middle
1085 // of a safepoint operation.
1086 ExitSafepointUsingLock();
1087 }
1088 }
1089
1090 void CheckForSafepoint() {
1091 // If we are in a runtime call that doesn't support lazy deopt, we will only
1092 // respond to gc safepointing requests.
1093 ASSERT(no_safepoint_scope_depth() == 0);
1094 if (IsSafepointRequested()) {
1095 BlockForSafepoint();
1096 }
1097 }
1098
1099 Thread* next() const { return next_; }
1100
1101 // Visit all object pointers.
1102 void VisitObjectPointers(ObjectPointerVisitor* visitor,
1103 ValidationPolicy validate_frames);
1104 void RememberLiveTemporaries();
1105 void DeferredMarkLiveTemporaries();
1106
1107 bool IsValidHandle(Dart_Handle object) const;
1108 bool IsValidLocalHandle(Dart_Handle object) const;
1109 intptr_t CountLocalHandles() const;
1110 int ZoneSizeInBytes() const;
1111 void UnwindScopes(uword stack_marker);
1112
1113 void InitVMConstants();
1114
1115 int64_t GetNextTaskId() { return next_task_id_++; }
1116 static intptr_t next_task_id_offset() {
1117 return OFFSET_OF(Thread, next_task_id_);
1118 }
1119 Random* random() { return &thread_random_; }
1120 static intptr_t random_offset() { return OFFSET_OF(Thread, thread_random_); }
1121
1122#ifndef PRODUCT
1123 void PrintJSON(JSONStream* stream) const;
1124#endif
1125
1126#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1127 HeapProfileSampler& heap_sampler() { return heap_sampler_; }
1128#endif
1129
1130 PendingDeopts& pending_deopts() { return pending_deopts_; }
1131
1132 SafepointLevel current_safepoint_level() const {
1133 if (runtime_call_deopt_ability_ ==
1134 RuntimeCallDeoptAbility::kCannotLazyDeopt) {
1135 return SafepointLevel::kGC;
1136 }
1137 if (no_reload_scope_depth_ > 0 || allow_reload_scope_depth_ <= 0) {
1138 return SafepointLevel::kGCAndDeopt;
1139 }
1140 return SafepointLevel::kGCAndDeoptAndReload;
1141 }
1142
1143 private:
1144 template <class T>
1145 T* AllocateReusableHandle();
1146
1147 enum class RestoreWriteBarrierInvariantOp {
1148 kAddToRememberedSet,
1149 kAddToDeferredMarkingStack
1150 };
1151 friend class RestoreWriteBarrierInvariantVisitor;
1152 void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
1153
1154 // Set the current compiler state and return the previous compiler state.
1155 CompilerState* SetCompilerState(CompilerState* state) {
1156 CompilerState* previous = compiler_state_;
1157 compiler_state_ = state;
1158 return previous;
1159 }
1160
1161 // Accessed from generated code.
1162 // ** This block of fields must come first! **
1163 // For AOT cross-compilation, we rely on these members having the same offsets
1164 // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
1165 // We use only word-sized fields to avoid differences in struct packing on the
1166 // different architectures. See also CheckOffsets in dart.cc.
1167 volatile RelaxedAtomic<uword> stack_limit_ = 0;
1168 uword write_barrier_mask_;
1169#if defined(DART_COMPRESSED_POINTERS)
1170 uword heap_base_ = 0;
1171#endif
1172 uword top_ = 0;
1173 uword end_ = 0;
1174 const uword* dispatch_table_array_ = nullptr;
1175 ObjectPtr* field_table_values_ = nullptr;
1176
1177 // Offsets up to this point can all fit in a byte on X64. All of the above
1178 // fields are very abundantly accessed from code. Thus, keeping them first
1179 // is important for code size (although code size on X64 is not a priority).
1180
1181// State that is cached in the TLS for fast access in generated code.
1182#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
1183 type_name member_name;
1185#undef DECLARE_MEMBERS
1186
1187#define DECLARE_MEMBERS(name) uword name##_entry_point_;
1189#undef DECLARE_MEMBERS
1190
1191#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
1193#undef DECLARE_MEMBERS
1194
1195 uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
1196
1197#define DECLARE_MEMBERS(name) uword name##_entry_point_ = 0;
1199#undef DECLARE_MEMBERS
1200
1201 Isolate* isolate_ = nullptr;
1202 IsolateGroup* isolate_group_ = nullptr;
1203
1204 uword saved_stack_limit_ = OSThread::kInvalidStackLimit;
1205 // The mutator uses this to indicate it wants to OSR (by
1206 // setting [Thread::kOsrRequest]) before going to runtime which will see this
1207 // bit.
1208 uword stack_overflow_flags_ = 0;
1209 uword volatile top_exit_frame_info_ = 0;
1210 StoreBufferBlock* store_buffer_block_ = nullptr;
1211 MarkingStackBlock* marking_stack_block_ = nullptr;
1212 MarkingStackBlock* deferred_marking_stack_block_ = nullptr;
1213 uword volatile vm_tag_ = 0;
1214 // Memory locations dedicated for passing unboxed int64 and double
1215 // values from generated code to runtime.
1216 // TODO(dartbug.com/33549): Clean this up when unboxed values
1217 // could be passed as arguments.
1218 ALIGN8 simd128_value_t unboxed_runtime_arg_;
1219
1220 // JumpToExceptionHandler state:
1221 ObjectPtr active_exception_;
1222 ObjectPtr active_stacktrace_;
1223
1224 ObjectPoolPtr global_object_pool_;
1225 uword resume_pc_;
1226 uword saved_shadow_call_stack_ = 0;
1227
1228 /*
1229 * The execution state for a thread.
1230 *
1231 * Potential execution states a thread could be in:
1232 * kThreadInGenerated - The thread is running jitted dart/stub code.
1233 * kThreadInVM - The thread is running VM code.
1234 * kThreadInNative - The thread is running native code.
1235 * kThreadInBlockedState - The thread is blocked waiting for a resource.
1236 *
1237 * Warning: Execution state doesn't imply the safepoint state. It's possible
1238 * to be in [kThreadInNative] and still not be at-safepoint (e.g. due to a
1239 * pending Dart_TypedDataAcquire() that increases no-callback-scope)
1240 */
1241 uword execution_state_;
1242
1243 /*
1244 * Stores
1245 *
1246 * - whether the thread is at a safepoint (current thread sets these)
1247 * [AtSafepointField]
1248 * [AtDeoptSafepointField]
1249 * [AtReloadSafepointField]
1250 *
1251 * - whether the thread is requested to safepoint (other thread sets these)
1252 * [SafepointRequestedField]
1253 * [DeoptSafepointRequestedField]
1254 * [ReloadSafepointRequestedField]
1255 *
1256 * - whether the thread is blocked due to safepoint request and needs to
1257 * be resumed after safepoint is done (current thread sets this)
1258 * [BlockedForSafepointField]
1259 *
1260 * - whether the thread should be ignored for safepointing purposes
1261 * [BypassSafepointsField]
1262 *
1263 * - whether the isolate running this thread has triggered an unwind error,
1264 * which requires enforced exit on a transition from native back to
1265 * generated.
1266 * [UnwindErrorInProgressField]
1267 */
1268 std::atomic<uword> safepoint_state_;
1269 uword exit_through_ffi_ = 0;
1270 ApiLocalScope* api_top_scope_;
1271 uint8_t double_truncate_round_supported_;
1272 ALIGN8 int64_t next_task_id_;
1273 ALIGN8 Random thread_random_;
1274
1275 TsanUtils* tsan_utils_ = nullptr;
1276
1277 // ---- End accessed from generated code. ----
1278
1279 // The layout of Thread object up to this point should not depend
1280 // on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code.
1281 // The code is generated without DART_PRECOMPILED_RUNTIME, but used with
1282 // DART_PRECOMPILED_RUNTIME.
1283
1284 uword true_end_ = 0;
1285 TaskKind task_kind_;
1286 TimelineStream* const dart_stream_;
1287 StreamInfo* const service_extension_stream_;
1288 mutable Monitor thread_lock_;
1289 ApiLocalScope* api_reusable_scope_;
1290 int32_t no_callback_scope_depth_;
1291 int32_t force_growth_scope_depth_ = 0;
1292 intptr_t no_reload_scope_depth_ = 0;
1293 intptr_t allow_reload_scope_depth_ = 0;
1294 intptr_t stopped_mutators_scope_depth_ = 0;
1295#if defined(DEBUG)
1296 int32_t no_safepoint_scope_depth_;
1297#endif
1298 VMHandles reusable_handles_;
1299 int32_t stack_overflow_count_;
1300 uint32_t runtime_call_count_ = 0;
1301
1302 // Deoptimization of stack frames.
1303 RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
1304 RuntimeCallDeoptAbility::kCanLazyDeopt;
1305 PendingDeopts pending_deopts_;
1306
1307 // Compiler state:
1308 CompilerState* compiler_state_ = nullptr;
1309 HierarchyInfo* hierarchy_info_;
1310 TypeUsageInfo* type_usage_info_;
1311 NoActiveIsolateScope* no_active_isolate_scope_ = nullptr;
1312
1313 CompilerTimings* compiler_timings_ = nullptr;
1314
1315 ErrorPtr sticky_error_;
1316
1317 ObjectPtr* field_table_values() const { return field_table_values_; }
1318
1319// Reusable handles support.
1320#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
1322#undef REUSABLE_HANDLE_FIELDS
1323
1324#if defined(DEBUG)
1325#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
1326 bool reusable_##object##_handle_scope_active_;
1327 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
1328#undef REUSABLE_HANDLE_SCOPE_VARIABLE
1329#endif // defined(DEBUG)
1330
1331 class AtSafepointField : public BitField<uword, bool, 0, 1> {};
1332 class SafepointRequestedField
1333 : public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
1334
1335 class AtDeoptSafepointField
1336 : public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
1337 class DeoptSafepointRequestedField
1338 : public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
1339
1340 class AtReloadSafepointField
1341 : public BitField<uword,
1342 bool,
1343 DeoptSafepointRequestedField::kNextBit,
1344 1> {};
1345 class ReloadSafepointRequestedField
1346 : public BitField<uword, bool, AtReloadSafepointField::kNextBit, 1> {};
1347
1348 class BlockedForSafepointField
1349 : public BitField<uword,
1350 bool,
1351 ReloadSafepointRequestedField::kNextBit,
1352 1> {};
1353 class BypassSafepointsField
1354 : public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
1355 class UnwindErrorInProgressField
1356 : public BitField<uword, bool, BypassSafepointsField::kNextBit, 1> {};
1357
1358 static uword AtSafepointBits(SafepointLevel level) {
1359 switch (level) {
1360 case SafepointLevel::kGC:
1361 return AtSafepointField::mask_in_place();
1362 case SafepointLevel::kGCAndDeopt:
1363 return AtSafepointField::mask_in_place() |
1364 AtDeoptSafepointField::mask_in_place();
1365 case SafepointLevel::kGCAndDeoptAndReload:
1366 return AtSafepointField::mask_in_place() |
1367 AtDeoptSafepointField::mask_in_place() |
1368 AtReloadSafepointField::mask_in_place();
1369 default:
1370 UNREACHABLE();
1371 }
1372 }
1373
1374#if defined(USING_SAFE_STACK)
1375 uword saved_safestack_limit_;
1376#endif
1377
1378 Thread* next_; // Used to chain the thread structures in an isolate.
1379 Isolate* scheduled_dart_mutator_isolate_ = nullptr;
1380
1381 bool is_unwind_in_progress_ = false;
1382
1383#if defined(DEBUG)
1384 bool inside_compiler_ = false;
1385#endif
1386
1387#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1388 HeapProfileSampler heap_sampler_;
1389#endif
1390
1391 explicit Thread(bool is_vm_isolate);
1392
1393 void StoreBufferRelease(
1394 StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
1395 void StoreBufferAcquire();
1396
1397 void MarkingStackRelease();
1398 void MarkingStackAcquire();
1399 void MarkingStackFlush();
1400 void DeferredMarkingStackRelease();
1401 void DeferredMarkingStackAcquire();
1402 void DeferredMarkingStackFlush();
1403
1404 void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
1405 void EnterSafepointUsingLock();
1406 void ExitSafepointUsingLock();
1407
1408 void SetupState(TaskKind kind);
1409 void ResetState();
1410
1411 void SetupMutatorState(TaskKind kind);
1412 void ResetMutatorState();
1413
1414 void SetupDartMutatorState(Isolate* isolate);
1415 void SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group);
1416 void ResetDartMutatorState(Isolate* isolate);
1417
1418 static void SuspendDartMutatorThreadInternal(Thread* thread,
1419 VMTag::VMTagId tag);
1420 static void ResumeDartMutatorThreadInternal(Thread* thread);
1421
1422 static void SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag);
1423 static void ResumeThreadInternal(Thread* thread);
1424
1425 // Adds a new active mutator thread to thread registry while associating it
1426 // with the given isolate (group).
1427 //
1428 // All existing safepoint operations are waited for before adding the thread
1429 // to the thread registry.
1430 //
1431 // => Anyone who iterates the active threads will first have to get us to
1432 // safepoint (but can access `Thread::isolate()`).
1433 static Thread* AddActiveThread(IsolateGroup* group,
1434 Isolate* isolate,
1435 bool is_dart_mutator,
1436 bool bypass_safepoint);
1437
1438 // Releases a active mutator threads from the thread registry.
1439 //
1440 // Thread needs to be at-safepoint.
1441 static void FreeActiveThread(Thread* thread, bool bypass_safepoint);
1442
1443 static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
1444
1445#define REUSABLE_FRIEND_DECLARATION(name) \
1446 friend class Reusable##name##HandleScope;
1448#undef REUSABLE_FRIEND_DECLARATION
1449
1450 friend class ApiZone;
1451 friend class ActiveIsolateScope;
1452 friend class InterruptChecker;
1453 friend class Isolate;
1454 friend class IsolateGroup;
1455 friend class NoActiveIsolateScope;
1456 friend class NoReloadScope;
1457 friend class RawReloadParticipationScope;
1458 friend class Simulator;
1459 friend class StackZone;
1460 friend class StoppedMutatorsScope;
1461 friend class ThreadRegistry;
1462 friend class CompilerState;
1463 friend class compiler::target::Thread;
1464 friend class FieldTable;
1465 friend class RuntimeCallDeoptScope;
1466 friend class Dart; // Calls SetupCachedEntryPoints after snapshot reading
1467 friend class
1468 TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
1469 friend class
1470 TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
1471 friend class MonitorLocker; // ExitSafepointUsingLock
1472 friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*,
1473 const char*,
1474 char**);
1476};
1477
1478class RuntimeCallDeoptScope : public StackResource {
1479 public:
1480 RuntimeCallDeoptScope(Thread* thread, RuntimeCallDeoptAbility kind)
1481 : StackResource(thread) {
1482 // We cannot have nested calls into the VM without deopt support.
1483 ASSERT(thread->runtime_call_deopt_ability_ ==
1484 RuntimeCallDeoptAbility::kCanLazyDeopt);
1485 thread->runtime_call_deopt_ability_ = kind;
1486 }
1487 virtual ~RuntimeCallDeoptScope() {
1488 thread()->runtime_call_deopt_ability_ =
1489 RuntimeCallDeoptAbility::kCanLazyDeopt;
1490 }
1491
1492 private:
1493 Thread* thread() {
1494 return reinterpret_cast<Thread*>(StackResource::thread());
1495 }
1496};
1497
1498#if defined(DART_HOST_OS_WINDOWS)
1499// Clears the state of the current thread and frees the allocation.
1500void WindowsThreadCleanUp();
1501#endif
1502
1503#if !defined(PRODUCT)
1504// Disable thread interrupts.
1505class DisableThreadInterruptsScope : public StackResource {
1506 public:
1507 explicit DisableThreadInterruptsScope(Thread* thread);
1508 ~DisableThreadInterruptsScope();
1509};
1510#else
1511class DisableThreadInterruptsScope : public StackResource {
1512 public:
1513 explicit DisableThreadInterruptsScope(Thread* thread)
1514 : StackResource(thread) {}
1515 ~DisableThreadInterruptsScope() {}
1516};
1517#endif // !defined(PRODUCT)
1518
1519// Within a NoSafepointScope, the thread must not reach any safepoint. Used
1520// around code that manipulates raw object pointers directly without handles.
1521#if defined(DEBUG)
1522class NoSafepointScope : public ThreadStackResource {
1523 public:
1524 explicit NoSafepointScope(Thread* thread = nullptr)
1525 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1526 this->thread()->IncrementNoSafepointScopeDepth();
1527 }
1528 ~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
1529
1530 private:
1531 DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
1532};
1533#else // defined(DEBUG)
1534class NoSafepointScope : public ValueObject {
1535 public:
1536 explicit NoSafepointScope(Thread* thread = nullptr) {}
1537
1538 private:
1539 DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
1540};
1541#endif // defined(DEBUG)
1542
1543// Disables initiating a reload operation as well as participating in another
1544// threads reload operation.
1545//
1546// Reload triggered by a mutator thread happens by sending all other mutator
1547// threads (that are running) OOB messages to check into a safepoint. The thread
1548// initiating the reload operation will block until all mutators are at a reload
1549// safepoint.
1550//
1551// When running under this scope, the processing of those OOB messages will
1552// ignore reload safepoint checkin requests. Yet we'll have to ensure that the
1553// dropped message is still acted upon.
1554//
1555// => To solve this we make the [~NoReloadScope] destructor resend a new reload
1556// OOB request to itself (the [~NoReloadScope] destructor is not necessarily at
1557// well-defined place where reload can happen - those places will explicitly
1558// opt-in via [ReloadParticipationScope]).
1559//
1560class NoReloadScope : public ThreadStackResource {
1561 public:
1562 explicit NoReloadScope(Thread* thread);
1563 ~NoReloadScope();
1564
1565 private:
1566 DISALLOW_COPY_AND_ASSIGN(NoReloadScope);
1567};
1568
1569// Allows triggering reload safepoint operations as well as participating in
1570// reload operations (at safepoint checks).
1571//
1572// By-default safepoint checkins will not participate in reload operations, as
1573// reload has to happen at very well-defined places. This scope is intended
1574// for those places where we explicitly want to allow safepoint checkins to
1575// participate in reload operations (triggered by other threads).
1576//
1577// If there is any [NoReloadScope] active we will still disable the safepoint
1578// checkins to participate in reload.
1579//
1580// We also require the thread inititating a reload operation to explicitly
1581// opt-in via this scope.
1582class RawReloadParticipationScope {
1583 public:
1584 explicit RawReloadParticipationScope(Thread* thread) : thread_(thread) {
1585#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1586 if (thread->allow_reload_scope_depth_ == 0) {
1587 ASSERT(thread->current_safepoint_level() == SafepointLevel::kGCAndDeopt);
1588 }
1589 thread->allow_reload_scope_depth_++;
1590 ASSERT(thread->allow_reload_scope_depth_ >= 0);
1591#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1592 }
1593
1594 ~RawReloadParticipationScope() {
1595#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1596 thread_->allow_reload_scope_depth_ -= 1;
1597 ASSERT(thread_->allow_reload_scope_depth_ >= 0);
1598 if (thread_->allow_reload_scope_depth_ == 0) {
1599 ASSERT(thread_->current_safepoint_level() == SafepointLevel::kGCAndDeopt);
1600 }
1601#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1602 }
1603
1604 private:
1605 Thread* thread_;
1606
1607 DISALLOW_COPY_AND_ASSIGN(RawReloadParticipationScope);
1608};
1609
1611 AsThreadStackResource<RawReloadParticipationScope>;
1612
1613class StoppedMutatorsScope : public ThreadStackResource {
1614 public:
1615 explicit StoppedMutatorsScope(Thread* thread) : ThreadStackResource(thread) {
1616#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1617 thread->stopped_mutators_scope_depth_++;
1618 ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
1619#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1620 }
1621
1622 ~StoppedMutatorsScope() {
1623#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1624 thread()->stopped_mutators_scope_depth_ -= 1;
1625 ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
1626#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1627 }
1628
1629 private:
1630 DISALLOW_COPY_AND_ASSIGN(StoppedMutatorsScope);
1631};
1632
1633// Within a EnterCompilerScope, the thread must operate on cloned fields.
1634#if defined(DEBUG)
1635class EnterCompilerScope : public ThreadStackResource {
1636 public:
1637 explicit EnterCompilerScope(Thread* thread = nullptr)
1638 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1639 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1640 if (!previously_is_inside_compiler_) {
1641 this->thread()->EnterCompiler();
1642 }
1643 }
1644 ~EnterCompilerScope() {
1645 if (!previously_is_inside_compiler_) {
1646 thread()->LeaveCompiler();
1647 }
1648 }
1649
1650 private:
1651 bool previously_is_inside_compiler_;
1652 DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
1653};
1654#else // defined(DEBUG)
1655class EnterCompilerScope : public ValueObject {
1656 public:
1657 explicit EnterCompilerScope(Thread* thread = nullptr) {}
1658
1659 private:
1660 DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
1661};
1662#endif // defined(DEBUG)
1663
1664// Within a LeaveCompilerScope, the thread must operate on cloned fields.
1665#if defined(DEBUG)
1666class LeaveCompilerScope : public ThreadStackResource {
1667 public:
1668 explicit LeaveCompilerScope(Thread* thread = nullptr)
1669 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1670 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1671 if (previously_is_inside_compiler_) {
1672 this->thread()->LeaveCompiler();
1673 }
1674 }
1675 ~LeaveCompilerScope() {
1676 if (previously_is_inside_compiler_) {
1677 thread()->EnterCompiler();
1678 }
1679 }
1680
1681 private:
1682 bool previously_is_inside_compiler_;
1683 DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
1684};
1685#else // defined(DEBUG)
1686class LeaveCompilerScope : public ValueObject {
1687 public:
1688 explicit LeaveCompilerScope(Thread* thread = nullptr) {}
1689
1690 private:
1691 DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
1692};
1693#endif // defined(DEBUG)
1694
1695} // namespace dart
1696
1697#endif // RUNTIME_VM_THREAD_H_
static float next(float f)
#define UNREACHABLE()
Definition assert.h:248
struct _Dart_Handle * Dart_Handle
Definition dart_api.h:258
#define DART_WARN_UNUSED_RESULT
Definition dart_api.h:66
AtkStateType state
glong glong end
uint8_t value
#define REUSABLE_FRIEND_DECLARATION(name)
Definition isolate.h:1687
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
StoreBuffer::Block StoreBufferBlock
SafepointLevel
Definition thread.h:289
@ kGC
Definition thread.h:291
@ kNumLevels
Definition thread.h:297
@ kNoSafepoint
Definition thread.h:300
@ kGCAndDeoptAndReload
Definition thread.h:295
@ kGCAndDeopt
Definition thread.h:293
MarkingStack::Block MarkingStackBlock
uintptr_t uword
Definition globals.h:501
@ kNumberOfCpuRegisters
AsThreadStackResource< RawReloadParticipationScope > ReloadParticipationScope
Definition thread.h:1612
const intptr_t kStoreBufferWrapperSize
void HandleInterrupts(Thread *thread)
RuntimeCallDeoptAbility
Definition thread.h:276
Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *group, const char *name, char **error)
constexpr int kNumberOfDartAvailableCpuRegs
dict stats
Definition malisc.py:20
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define T
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
Point offset
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
Definition thread.h:189
#define REUSABLE_HANDLE_LIST(V)
Definition thread.h:78
#define REUSABLE_HANDLE_FIELDS(object)
Definition thread.h:1321
#define CACHED_CONSTANTS_LIST(V)
Definition thread.h:267
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value)
Definition thread.h:733
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)
Definition thread.h:1183
#define REUSABLE_HANDLE(object)
Definition thread.h:893
#define NO_SANITIZE_THREAD
#define ALIGN8
Definition globals.h:171
#define OFFSET_OF(type, field)
Definition globals.h:138

◆ CACHED_ADDRESSES_LIST

#define CACHED_ADDRESSES_LIST (   V)
Value:
V(uword, bootstrap_native_wrapper_entry_point_, \
NativeEntry::BootstrapNativeCallWrapperEntry(), 0) \
V(uword, no_scope_native_wrapper_entry_point_, \
NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
V(uword, auto_scope_native_wrapper_entry_point_, \
NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(), \
nullptr) \
V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
0) \
V(uword, double_negate_address_, \
reinterpret_cast<uword>(&double_negate_constant), 0) \
V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
0) \
V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
0) \
V(uword, float_negate_address_, \
reinterpret_cast<uword>(&float_negate_constant), 0) \
V(uword, float_absolute_address_, \
reinterpret_cast<uword>(&float_absolute_constant), 0) \
V(uword, float_zerow_address_, \
reinterpret_cast<uword>(&float_zerow_constant), 0)
T __attribute__((ext_vector_type(N))) V
#define CACHED_VM_STUBS_ADDRESSES_LIST(V)
Definition thread.h:208

Definition at line 242 of file thread.h.

◆ CACHED_CONSTANTS_LIST

#define CACHED_CONSTANTS_LIST (   V)
Value:
CACHED_ADDRESSES_LIST(V)
#define CACHED_VM_OBJECTS_LIST(V)
Definition thread.h:185

Definition at line 267 of file thread.h.

◆ CACHED_FUNCTION_ENTRY_POINTS_LIST

#define CACHED_FUNCTION_ENTRY_POINTS_LIST (   V)
Value:
V(suspend_state_init_async) \
V(suspend_state_await) \
V(suspend_state_await_with_type_check) \
V(suspend_state_return_async) \
V(suspend_state_return_async_not_future) \
V(suspend_state_init_async_star) \
V(suspend_state_yield_async_star) \
V(suspend_state_return_async_star) \
V(suspend_state_init_sync_star) \
V(suspend_state_suspend_sync_star_at_start) \
V(suspend_state_handle_exception)
#define V(name)
Definition raw_object.h:124

Definition at line 189 of file thread.h.

◆ CACHED_NON_VM_STUB_LIST

#define CACHED_NON_VM_STUB_LIST (   V)
Value:
V(ObjectPtr, object_null_, Object::null(), nullptr) \
V(BoolPtr, bool_true_, Object::bool_true().ptr(), nullptr) \
V(BoolPtr, bool_false_, Object::bool_false().ptr(), nullptr) \
V(ArrayPtr, empty_array_, Object::empty_array().ptr(), nullptr) \
V(TypeArgumentsPtr, empty_type_arguments_, \
Object::empty_type_arguments().ptr(), nullptr) \
V(TypePtr, dynamic_type_, Type::dynamic_type().ptr(), nullptr)

Definition at line 174 of file thread.h.

◆ CACHED_VM_OBJECTS_LIST

#define CACHED_VM_OBJECTS_LIST (   V)
Value:
CACHED_VM_STUBS_LIST(V)
#define CACHED_NON_VM_STUB_LIST(V)
Definition thread.h:174

Definition at line 185 of file thread.h.

◆ CACHED_VM_STUBS_ADDRESSES_LIST

#define CACHED_VM_STUBS_ADDRESSES_LIST (   V)

Definition at line 208 of file thread.h.

◆ CACHED_VM_STUBS_LIST

#define CACHED_VM_STUBS_LIST (   V)

Definition at line 100 of file thread.h.

◆ DECLARE_MEMBERS [1/4]

#define DECLARE_MEMBERS (   name)    uword name##_entry_point_;

Definition at line 1183 of file thread.h.

◆ DECLARE_MEMBERS [2/4]

#define DECLARE_MEMBERS (   name)    uword name##_entry_point_ = 0;

Definition at line 1183 of file thread.h.

◆ DECLARE_MEMBERS [3/4]

#define DECLARE_MEMBERS (   returntype,
  name,
  ... 
)    uword name##_entry_point_;

Definition at line 1183 of file thread.h.

◆ DECLARE_MEMBERS [4/4]

#define DECLARE_MEMBERS (   type_name,
  member_name,
  expr,
  default_init_value 
)     type_name member_name;

Definition at line 1183 of file thread.h.

◆ DEFINE_OFFSET_METHOD [1/4]

#define DEFINE_OFFSET_METHOD (   name)
Value:
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}
const char * name
Definition fuchsia.cc:50

Definition at line 733 of file thread.h.

734 { \
735 return OFFSET_OF(Thread, member_name); \
736 }

◆ DEFINE_OFFSET_METHOD [2/4]

#define DEFINE_OFFSET_METHOD (   name)
Value:
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}

Definition at line 733 of file thread.h.

734 { \
735 return OFFSET_OF(Thread, member_name); \
736 }

◆ DEFINE_OFFSET_METHOD [3/4]

#define DEFINE_OFFSET_METHOD (   returntype,
  name,
  ... 
)
Value:
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}

Definition at line 733 of file thread.h.

734 { \
735 return OFFSET_OF(Thread, member_name); \
736 }

◆ DEFINE_OFFSET_METHOD [4/4]

#define DEFINE_OFFSET_METHOD (   type_name,
  member_name,
  expr,
  default_init_value 
)
Value:
static intptr_t member_name##offset() { \
return OFFSET_OF(Thread, member_name); \
}

Definition at line 733 of file thread.h.

734 { \
735 return OFFSET_OF(Thread, member_name); \
736 }

◆ REUSABLE_FRIEND_DECLARATION

#define REUSABLE_FRIEND_DECLARATION (   name)     friend class Reusable##name##HandleScope;

Definition at line 1446 of file thread.h.

◆ REUSABLE_HANDLE

#define REUSABLE_HANDLE (   object)     object& object##Handle() const { return *object##_handle_; }

Definition at line 893 of file thread.h.

894 { return *object##_handle_; }

◆ REUSABLE_HANDLE_FIELDS

#define REUSABLE_HANDLE_FIELDS (   object)    object* object##_handle_;

Definition at line 1321 of file thread.h.

◆ REUSABLE_HANDLE_LIST

#define REUSABLE_HANDLE_LIST (   V)
Value:
V(AbstractType) \
V(Array) \
V(Class) \
V(Code) \
V(Error) \
V(ExceptionHandlers) \
V(Field) \
V(Function) \
V(GrowableObjectArray) \
V(Instance) \
V(Library) \
V(LoadingUnit) \
V(Object) \
V(PcDescriptors) \
V(Smi) \
V(String) \
V(TypeParameters) \
V(TypeArguments) \
V(TypeParameter) \
V(WeakArray)

Definition at line 78 of file thread.h.