Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
thread.h
Go to the documentation of this file.
1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_THREAD_H_
6#define RUNTIME_VM_THREAD_H_
7
8#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
9#error "Should not include runtime"
10#endif
11
12#include <setjmp.h>
13
14#include "include/dart_api.h"
15#include "platform/assert.h"
16#include "platform/atomic.h"
17#include "platform/safe_stack.h"
18#include "vm/bitfield.h"
20#include "vm/constants.h"
21#include "vm/globals.h"
22#include "vm/handles.h"
24#include "vm/heap/sampler.h"
25#include "vm/os_thread.h"
26#include "vm/pending_deopts.h"
27#include "vm/random.h"
29#include "vm/tags.h"
31#include "vm/thread_state.h"
32
33namespace dart {
34
35class AbstractType;
36class ApiLocalScope;
37class Array;
38class CompilerState;
39class CompilerTimings;
40class Class;
41class Code;
42class Error;
43class ExceptionHandlers;
44class Field;
45class FieldTable;
46class Function;
47class GrowableObjectArray;
48class HandleScope;
49class Heap;
50class HierarchyInfo;
51class Instance;
52class Isolate;
53class IsolateGroup;
54class Library;
55class Object;
56class OSThread;
57class JSONObject;
58class NoActiveIsolateScope;
59class PcDescriptors;
60class RuntimeEntry;
61class Smi;
62class StackResource;
63class StackTrace;
64class StreamInfo;
65class String;
66class TimelineStream;
67class TypeArguments;
68class TypeParameter;
69class TypeUsageInfo;
70class Zone;
71
72namespace compiler {
73namespace target {
74class Thread;
75} // namespace target
76} // namespace compiler
77
78#define REUSABLE_HANDLE_LIST(V) \
79 V(AbstractType) \
80 V(Array) \
81 V(Class) \
82 V(Code) \
83 V(Error) \
84 V(ExceptionHandlers) \
85 V(Field) \
86 V(Function) \
87 V(GrowableObjectArray) \
88 V(Instance) \
89 V(Library) \
90 V(LoadingUnit) \
91 V(Object) \
92 V(PcDescriptors) \
93 V(Smi) \
94 V(String) \
95 V(TypeParameters) \
96 V(TypeArguments) \
97 V(TypeParameter) \
98 V(WeakArray)
99
100#define CACHED_VM_STUBS_LIST(V) \
101 V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().ptr(), \
102 nullptr) \
103 V(CodePtr, fix_allocation_stub_code_, \
104 StubCode::FixAllocationStubTarget().ptr(), nullptr) \
105 V(CodePtr, invoke_dart_code_stub_, StubCode::InvokeDartCode().ptr(), \
106 nullptr) \
107 V(CodePtr, call_to_runtime_stub_, StubCode::CallToRuntime().ptr(), nullptr) \
108 V(CodePtr, late_initialization_error_shared_without_fpu_regs_stub_, \
109 StubCode::LateInitializationErrorSharedWithoutFPURegs().ptr(), nullptr) \
110 V(CodePtr, late_initialization_error_shared_with_fpu_regs_stub_, \
111 StubCode::LateInitializationErrorSharedWithFPURegs().ptr(), nullptr) \
112 V(CodePtr, null_error_shared_without_fpu_regs_stub_, \
113 StubCode::NullErrorSharedWithoutFPURegs().ptr(), nullptr) \
114 V(CodePtr, null_error_shared_with_fpu_regs_stub_, \
115 StubCode::NullErrorSharedWithFPURegs().ptr(), nullptr) \
116 V(CodePtr, null_arg_error_shared_without_fpu_regs_stub_, \
117 StubCode::NullArgErrorSharedWithoutFPURegs().ptr(), nullptr) \
118 V(CodePtr, null_arg_error_shared_with_fpu_regs_stub_, \
119 StubCode::NullArgErrorSharedWithFPURegs().ptr(), nullptr) \
120 V(CodePtr, null_cast_error_shared_without_fpu_regs_stub_, \
121 StubCode::NullCastErrorSharedWithoutFPURegs().ptr(), nullptr) \
122 V(CodePtr, null_cast_error_shared_with_fpu_regs_stub_, \
123 StubCode::NullCastErrorSharedWithFPURegs().ptr(), nullptr) \
124 V(CodePtr, range_error_shared_without_fpu_regs_stub_, \
125 StubCode::RangeErrorSharedWithoutFPURegs().ptr(), nullptr) \
126 V(CodePtr, range_error_shared_with_fpu_regs_stub_, \
127 StubCode::RangeErrorSharedWithFPURegs().ptr(), nullptr) \
128 V(CodePtr, write_error_shared_without_fpu_regs_stub_, \
129 StubCode::WriteErrorSharedWithoutFPURegs().ptr(), nullptr) \
130 V(CodePtr, write_error_shared_with_fpu_regs_stub_, \
131 StubCode::WriteErrorSharedWithFPURegs().ptr(), nullptr) \
132 V(CodePtr, allocate_mint_with_fpu_regs_stub_, \
133 StubCode::AllocateMintSharedWithFPURegs().ptr(), nullptr) \
134 V(CodePtr, allocate_mint_without_fpu_regs_stub_, \
135 StubCode::AllocateMintSharedWithoutFPURegs().ptr(), nullptr) \
136 V(CodePtr, allocate_object_stub_, StubCode::AllocateObject().ptr(), nullptr) \
137 V(CodePtr, allocate_object_parameterized_stub_, \
138 StubCode::AllocateObjectParameterized().ptr(), nullptr) \
139 V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().ptr(), \
140 nullptr) \
141 V(CodePtr, async_exception_handler_stub_, \
142 StubCode::AsyncExceptionHandler().ptr(), nullptr) \
143 V(CodePtr, resume_stub_, StubCode::Resume().ptr(), nullptr) \
144 V(CodePtr, return_async_stub_, StubCode::ReturnAsync().ptr(), nullptr) \
145 V(CodePtr, return_async_not_future_stub_, \
146 StubCode::ReturnAsyncNotFuture().ptr(), nullptr) \
147 V(CodePtr, return_async_star_stub_, StubCode::ReturnAsyncStar().ptr(), \
148 nullptr) \
149 V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_, \
150 StubCode::StackOverflowSharedWithoutFPURegs().ptr(), nullptr) \
151 V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_, \
152 StubCode::StackOverflowSharedWithFPURegs().ptr(), nullptr) \
153 V(CodePtr, switchable_call_miss_stub_, StubCode::SwitchableCallMiss().ptr(), \
154 nullptr) \
155 V(CodePtr, throw_stub_, StubCode::Throw().ptr(), nullptr) \
156 V(CodePtr, re_throw_stub_, StubCode::Throw().ptr(), nullptr) \
157 V(CodePtr, assert_boolean_stub_, StubCode::AssertBoolean().ptr(), nullptr) \
158 V(CodePtr, optimize_stub_, StubCode::OptimizeFunction().ptr(), nullptr) \
159 V(CodePtr, deoptimize_stub_, StubCode::Deoptimize().ptr(), nullptr) \
160 V(CodePtr, lazy_deopt_from_return_stub_, \
161 StubCode::DeoptimizeLazyFromReturn().ptr(), nullptr) \
162 V(CodePtr, lazy_deopt_from_throw_stub_, \
163 StubCode::DeoptimizeLazyFromThrow().ptr(), nullptr) \
164 V(CodePtr, slow_type_test_stub_, StubCode::SlowTypeTest().ptr(), nullptr) \
165 V(CodePtr, lazy_specialize_type_test_stub_, \
166 StubCode::LazySpecializeTypeTest().ptr(), nullptr) \
167 V(CodePtr, enter_safepoint_stub_, StubCode::EnterSafepoint().ptr(), nullptr) \
168 V(CodePtr, exit_safepoint_stub_, StubCode::ExitSafepoint().ptr(), nullptr) \
169 V(CodePtr, exit_safepoint_ignore_unwind_in_progress_stub_, \
170 StubCode::ExitSafepointIgnoreUnwindInProgress().ptr(), nullptr) \
171 V(CodePtr, call_native_through_safepoint_stub_, \
172 StubCode::CallNativeThroughSafepoint().ptr(), nullptr)
173
174#define CACHED_NON_VM_STUB_LIST(V) \
175 V(ObjectPtr, object_null_, Object::null(), nullptr) \
176 V(BoolPtr, bool_true_, Object::bool_true().ptr(), nullptr) \
177 V(BoolPtr, bool_false_, Object::bool_false().ptr(), nullptr) \
178 V(ArrayPtr, empty_array_, Object::empty_array().ptr(), nullptr) \
179 V(TypeArgumentsPtr, empty_type_arguments_, \
180 Object::empty_type_arguments().ptr(), nullptr) \
181 V(TypePtr, dynamic_type_, Type::dynamic_type().ptr(), nullptr)
182
183// List of VM-global objects/addresses cached in each Thread object.
184// Important: constant false must immediately follow constant true.
185#define CACHED_VM_OBJECTS_LIST(V) \
186 CACHED_NON_VM_STUB_LIST(V) \
187 CACHED_VM_STUBS_LIST(V)
188
189#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V) \
190 V(suspend_state_init_async) \
191 V(suspend_state_await) \
192 V(suspend_state_await_with_type_check) \
193 V(suspend_state_return_async) \
194 V(suspend_state_return_async_not_future) \
195 V(suspend_state_init_async_star) \
196 V(suspend_state_yield_async_star) \
197 V(suspend_state_return_async_star) \
198 V(suspend_state_init_sync_star) \
199 V(suspend_state_suspend_sync_star_at_start) \
200 V(suspend_state_handle_exception)
201
202// This assertion marks places which assume that boolean false immediate
203// follows bool true in the CACHED_VM_OBJECTS_LIST
204#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE() \
205 ASSERT((Thread::bool_true_offset() + kWordSize) == \
206 Thread::bool_false_offset());
207
208#define CACHED_VM_STUBS_ADDRESSES_LIST(V) \
209 V(uword, write_barrier_entry_point_, StubCode::WriteBarrier().EntryPoint(), \
210 0) \
211 V(uword, array_write_barrier_entry_point_, \
212 StubCode::ArrayWriteBarrier().EntryPoint(), 0) \
213 V(uword, call_to_runtime_entry_point_, \
214 StubCode::CallToRuntime().EntryPoint(), 0) \
215 V(uword, allocate_mint_with_fpu_regs_entry_point_, \
216 StubCode::AllocateMintSharedWithFPURegs().EntryPoint(), 0) \
217 V(uword, allocate_mint_without_fpu_regs_entry_point_, \
218 StubCode::AllocateMintSharedWithoutFPURegs().EntryPoint(), 0) \
219 V(uword, allocate_object_entry_point_, \
220 StubCode::AllocateObject().EntryPoint(), 0) \
221 V(uword, allocate_object_parameterized_entry_point_, \
222 StubCode::AllocateObjectParameterized().EntryPoint(), 0) \
223 V(uword, allocate_object_slow_entry_point_, \
224 StubCode::AllocateObjectSlow().EntryPoint(), 0) \
225 V(uword, stack_overflow_shared_without_fpu_regs_entry_point_, \
226 StubCode::StackOverflowSharedWithoutFPURegs().EntryPoint(), 0) \
227 V(uword, stack_overflow_shared_with_fpu_regs_entry_point_, \
228 StubCode::StackOverflowSharedWithFPURegs().EntryPoint(), 0) \
229 V(uword, megamorphic_call_checked_entry_, \
230 StubCode::MegamorphicCall().EntryPoint(), 0) \
231 V(uword, switchable_call_miss_entry_, \
232 StubCode::SwitchableCallMiss().EntryPoint(), 0) \
233 V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0) \
234 V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0) \
235 V(uword, call_native_through_safepoint_entry_point_, \
236 StubCode::CallNativeThroughSafepoint().EntryPoint(), 0) \
237 V(uword, jump_to_frame_entry_point_, StubCode::JumpToFrame().EntryPoint(), \
238 0) \
239 V(uword, slow_type_test_entry_point_, StubCode::SlowTypeTest().EntryPoint(), \
240 0)
241
242#define CACHED_ADDRESSES_LIST(V) \
243 CACHED_VM_STUBS_ADDRESSES_LIST(V) \
244 V(uword, bootstrap_native_wrapper_entry_point_, \
245 NativeEntry::BootstrapNativeCallWrapperEntry(), 0) \
246 V(uword, no_scope_native_wrapper_entry_point_, \
247 NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
248 V(uword, auto_scope_native_wrapper_entry_point_, \
249 NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
250 V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(), \
251 nullptr) \
252 V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
253 0) \
254 V(uword, double_negate_address_, \
255 reinterpret_cast<uword>(&double_negate_constant), 0) \
256 V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
257 0) \
258 V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
259 0) \
260 V(uword, float_negate_address_, \
261 reinterpret_cast<uword>(&float_negate_constant), 0) \
262 V(uword, float_absolute_address_, \
263 reinterpret_cast<uword>(&float_absolute_constant), 0) \
264 V(uword, float_zerow_address_, \
265 reinterpret_cast<uword>(&float_zerow_constant), 0)
266
267#define CACHED_CONSTANTS_LIST(V) \
268 CACHED_VM_OBJECTS_LIST(V) \
269 CACHED_ADDRESSES_LIST(V)
270
272 kValidateFrames = 0,
274};
275
277 // There was no leaf call or a leaf call that can cause deoptimization
278 // after-call.
280 // There was a leaf call and the VM cannot cause deoptimize after-call.
282};
283
284// The safepoint level a thread is on or a safepoint operation is requested for
285//
286// The higher the number the stronger the guarantees:
287// * the time-to-safepoint latency increases with level
288// * the frequency of hitting possible safe points decreases with level
290 // Safe to GC
292 // Safe to GC as well as Deopt.
294 // Safe to GC, Deopt as well as Reload.
296 // Number of levels.
298
299 // No safepoint.
301};
302
303// Accessed from generated code.
304struct TsanUtils {
305 // Used to allow unwinding runtime C frames using longjmp() when throwing
306 // exceptions. This allows triggering the normal TSAN shadow stack unwinding
307 // implementation.
308 // -> See https://dartbug.com/47472#issuecomment-948235479 for details.
309#if defined(USING_THREAD_SANITIZER)
310 void* setjmp_function = reinterpret_cast<void*>(&setjmp);
311#else
312 // MSVC (on Windows) is not happy with getting address of purely intrinsic.
313 void* setjmp_function = nullptr;
314#endif
315 jmp_buf* setjmp_buffer = nullptr;
319
320 static intptr_t setjmp_function_offset() {
322 }
323 static intptr_t setjmp_buffer_offset() {
325 }
326 static intptr_t exception_pc_offset() {
328 }
329 static intptr_t exception_sp_offset() {
331 }
332 static intptr_t exception_fp_offset() {
334 }
335};
336
337// A VM thread; may be executing Dart code or performing helper tasks like
338// garbage collection or compilation. The Thread structure associated with
339// a thread is allocated by EnsureInit before entering an isolate, and destroyed
340// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
341// must currently be called manually (issue 23474).
342class Thread : public ThreadState {
343 public:
344 // The kind of task this thread is performing. Sampled by the profiler.
355 // Converts a TaskKind to its corresponding C-String name.
356 static const char* TaskKindToCString(TaskKind kind);
357
358 ~Thread();
359
360 // The currently executing thread, or nullptr if not yet initialized.
361 static Thread* Current() {
362 return static_cast<Thread*>(OSThread::CurrentVMThread());
363 }
364
365 // Whether there's any active state on the [thread] that needs to be preserved
366 // across `Thread::ExitIsolate()` and `Thread::EnterIsolate()`.
367 bool HasActiveState();
372
373 // Makes the current thread enter 'isolate'.
374 static void EnterIsolate(Isolate* isolate);
375 // Makes the current thread exit its isolate.
376 static void ExitIsolate(bool isolate_shutdown = false);
377
379 TaskKind kind,
380 bool bypass_safepoint);
381 static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
382
384 TaskKind kind);
385 static void ExitIsolateGroupAsNonMutator();
386
387 // Empties the store buffer block into the isolate.
388 void ReleaseStoreBuffer();
391
392 void SetStackLimit(uword value);
393 void ClearStackLimit();
394
395 // Access to the current stack limit for generated code. Either the true OS
396 // thread's stack limit minus some headroom, or a special value to trigger
397 // interrupts.
399 return reinterpret_cast<uword>(&stack_limit_);
400 }
401 static intptr_t stack_limit_offset() {
402 return OFFSET_OF(Thread, stack_limit_);
403 }
404
405 // The true stack limit for this OS thread.
406 static intptr_t saved_stack_limit_offset() {
407 return OFFSET_OF(Thread, saved_stack_limit_);
408 }
409 uword saved_stack_limit() const { return saved_stack_limit_; }
410
411#if defined(USING_SAFE_STACK)
412 uword saved_safestack_limit() const { return saved_safestack_limit_; }
413 void set_saved_safestack_limit(uword limit) {
414 saved_safestack_limit_ = limit;
415 }
416#endif
417 uword saved_shadow_call_stack() const { return saved_shadow_call_stack_; }
419 return OFFSET_OF(Thread, saved_shadow_call_stack_);
420 }
421
422 // Stack overflow flags
423 enum {
424 kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
425 };
426
427 uword write_barrier_mask() const { return write_barrier_mask_; }
428 uword heap_base() const {
429#if defined(DART_COMPRESSED_POINTERS)
430 return heap_base_;
431#else
432 return 0;
433#endif
434 }
435
436 static intptr_t write_barrier_mask_offset() {
437 return OFFSET_OF(Thread, write_barrier_mask_);
438 }
439#if defined(DART_COMPRESSED_POINTERS)
440 static intptr_t heap_base_offset() { return OFFSET_OF(Thread, heap_base_); }
441#endif
442 static intptr_t stack_overflow_flags_offset() {
443 return OFFSET_OF(Thread, stack_overflow_flags_);
444 }
445
447 return ++stack_overflow_count_;
448 }
449
450 uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
451
453 return fpu_regs
454 ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
455 : stack_overflow_shared_without_fpu_regs_entry_point_offset();
456 }
457
458 static intptr_t safepoint_state_offset() {
459 return OFFSET_OF(Thread, safepoint_state_);
460 }
461
462 // Tag state is maintained on transitions.
463 enum {
464 // Always true in generated state.
466 // The VM exited the generated state through FFI.
467 // This can be true in both native and VM state.
469 // The VM exited the generated state through a runtime call.
470 // This can be true in both native and VM state.
472 };
473
474 static intptr_t exit_through_ffi_offset() {
475 return OFFSET_OF(Thread, exit_through_ffi_);
476 }
477
478 TaskKind task_kind() const { return task_kind_; }
479
480 // Retrieves and clears the stack overflow flags. These are set by
481 // the generated code before the slow path runtime routine for a
482 // stack overflow is called.
484
485 // Interrupt bits.
486 enum {
487 kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
488 kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
489
491 };
492
493 void ScheduleInterrupts(uword interrupt_bits);
494 ErrorPtr HandleInterrupts();
497 return (stack_limit_.load() & kInterruptsMask) != 0;
498 }
499
500 // Monitor corresponding to this thread.
501 Monitor* thread_lock() const { return &thread_lock_; }
502
503 // The reusable api local scope for this thread.
504 ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
506 ASSERT(value == nullptr || api_reusable_scope_ == nullptr);
507 api_reusable_scope_ = value;
508 }
509
510 // The api local scope for this thread, this where all local handles
511 // are allocated.
512 ApiLocalScope* api_top_scope() const { return api_top_scope_; }
513 void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
514 static intptr_t api_top_scope_offset() {
515 return OFFSET_OF(Thread, api_top_scope_);
516 }
517
518 void EnterApiScope();
519 void ExitApiScope();
520
522 return OFFSET_OF(Thread, double_truncate_round_supported_);
523 }
524
525 static intptr_t tsan_utils_offset() { return OFFSET_OF(Thread, tsan_utils_); }
526
527#if defined(USING_THREAD_SANITIZER)
528 uword exit_through_ffi() const { return exit_through_ffi_; }
529 TsanUtils* tsan_utils() const { return tsan_utils_; }
530#endif // defined(USING_THREAD_SANITIZER)
531
532 // The isolate that this thread is operating on, or nullptr if none.
533 Isolate* isolate() const { return isolate_; }
534 static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
535 static intptr_t isolate_group_offset() {
536 return OFFSET_OF(Thread, isolate_group_);
537 }
538
539 // The isolate group that this thread is operating on, or nullptr if none.
540 IsolateGroup* isolate_group() const { return isolate_group_; }
541
542 static intptr_t field_table_values_offset() {
543 return OFFSET_OF(Thread, field_table_values_);
544 }
545
546 bool IsDartMutatorThread() const {
547 return scheduled_dart_mutator_isolate_ != nullptr;
548 }
549
550 // Returns the dart mutator [Isolate] this thread belongs to or nullptr.
551 //
552 // `isolate()` in comparison can return
553 // - `nullptr` for dart mutators (e.g. if the mutator runs under
554 // [NoActiveIsolateScope])
555 // - an incorrect isolate (e.g. if [ActiveIsolateScope] is used to seemingly
556 // enter another isolate)
558 return scheduled_dart_mutator_isolate_;
559 }
560
561#if defined(DEBUG)
562 bool IsInsideCompiler() const { return inside_compiler_; }
563#endif
564
565 // Offset of Dart TimelineStream object.
566 static intptr_t dart_stream_offset() {
567 return OFFSET_OF(Thread, dart_stream_);
568 }
569
570 // Offset of the Dart VM Service Extension StreamInfo object.
572 return OFFSET_OF(Thread, service_extension_stream_);
573 }
574
575 // Is |this| executing Dart code?
576 bool IsExecutingDartCode() const;
577
578 // Has |this| exited Dart code?
579 bool HasExitedDartCode() const;
580
581 bool HasCompilerState() const { return compiler_state_ != nullptr; }
582
585 return *compiler_state_;
586 }
587
589 ASSERT(isolate_group_ != nullptr);
590 return hierarchy_info_;
591 }
592
594 ASSERT(isolate_group_ != nullptr);
595 ASSERT((hierarchy_info_ == nullptr && value != nullptr) ||
596 (hierarchy_info_ != nullptr && value == nullptr));
597 hierarchy_info_ = value;
598 }
599
601 ASSERT(isolate_group_ != nullptr);
602 return type_usage_info_;
603 }
604
606 ASSERT(isolate_group_ != nullptr);
607 ASSERT((type_usage_info_ == nullptr && value != nullptr) ||
608 (type_usage_info_ != nullptr && value == nullptr));
609 type_usage_info_ = value;
610 }
611
612 CompilerTimings* compiler_timings() const { return compiler_timings_; }
613
615 compiler_timings_ = stats;
616 }
617
618 int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
620 ASSERT(no_callback_scope_depth_ < INT_MAX);
621 no_callback_scope_depth_ += 1;
622 }
624 ASSERT(no_callback_scope_depth_ > 0);
625 no_callback_scope_depth_ -= 1;
626 }
627
628 bool force_growth() const { return force_growth_scope_depth_ != 0; }
630 ASSERT(force_growth_scope_depth_ < INT_MAX);
631 force_growth_scope_depth_ += 1;
632 }
634 ASSERT(force_growth_scope_depth_ > 0);
635 force_growth_scope_depth_ -= 1;
636 }
637
638 bool is_unwind_in_progress() const { return is_unwind_in_progress_; }
639
641 is_unwind_in_progress_ = true;
643 }
644
645#if defined(DEBUG)
646 void EnterCompiler() {
647 ASSERT(!IsInsideCompiler());
648 inside_compiler_ = true;
649 }
650
651 void LeaveCompiler() {
652 ASSERT(IsInsideCompiler());
653 inside_compiler_ = false;
654 }
655#endif
656
657 void StoreBufferAddObject(ObjectPtr obj);
658 void StoreBufferAddObjectGC(ObjectPtr obj);
659#if defined(TESTING)
660 bool StoreBufferContains(ObjectPtr obj) const {
661 return store_buffer_block_->Contains(obj);
662 }
663#endif
665 static intptr_t store_buffer_block_offset() {
666 return OFFSET_OF(Thread, store_buffer_block_);
667 }
668
669 bool is_marking() const { return marking_stack_block_ != nullptr; }
674 static intptr_t marking_stack_block_offset() {
675 return OFFSET_OF(Thread, marking_stack_block_);
676 }
677
678 uword top_exit_frame_info() const { return top_exit_frame_info_; }
680 top_exit_frame_info_ = top_exit_frame_info;
681 }
682 static intptr_t top_exit_frame_info_offset() {
683 return OFFSET_OF(Thread, top_exit_frame_info_);
684 }
685
686 Heap* heap() const;
687
688 // The TLAB memory boundaries.
689 //
690 // When the heap sampling profiler is enabled, we use the TLAB boundary to
691 // trigger slow path allocations so we can take a sample. This means that
692 // true_end() >= end(), where true_end() is the actual end address of the
693 // TLAB and end() is the chosen sampling boundary for the thread.
694 //
695 // When the heap sampling profiler is disabled, true_end() == end().
696 uword top() const { return top_; }
697 uword end() const { return end_; }
698 uword true_end() const { return true_end_; }
699 void set_top(uword top) { top_ = top; }
700 void set_end(uword end) { end_ = end; }
701 void set_true_end(uword true_end) { true_end_ = true_end; }
702 static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
703 static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
704
705 int32_t no_safepoint_scope_depth() const {
706#if defined(DEBUG)
707 return no_safepoint_scope_depth_;
708#else
709 return 0;
710#endif
711 }
712
714#if defined(DEBUG)
715 ASSERT(no_safepoint_scope_depth_ < INT_MAX);
716 no_safepoint_scope_depth_ += 1;
717#endif
718 }
719
721#if defined(DEBUG)
722 ASSERT(no_safepoint_scope_depth_ > 0);
723 no_safepoint_scope_depth_ -= 1;
724#endif
725 }
726
727 bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
728
730 return stopped_mutators_scope_depth_ > 0;
731 }
732
733#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
734 static intptr_t member_name##offset() { \
735 return OFFSET_OF(Thread, member_name); \
736 }
738#undef DEFINE_OFFSET_METHOD
739
741 ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
742 intptr_t index = 0;
743 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
744 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
745 if (i == reg) break;
746 ++index;
747 }
748 return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
749 index * sizeof(uword);
750 }
751
753 intptr_t index = 0;
754 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
755 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
756 if (i == reg) {
757 return index * kStoreBufferWrapperSize;
758 }
759 ++index;
760 }
761 UNREACHABLE();
762 return 0;
763 }
764
765#define DEFINE_OFFSET_METHOD(name) \
766 static intptr_t name##_entry_point_offset() { \
767 return OFFSET_OF(Thread, name##_entry_point_); \
768 }
770#undef DEFINE_OFFSET_METHOD
771
772#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
773 static intptr_t name##_entry_point_offset() { \
774 return OFFSET_OF(Thread, name##_entry_point_); \
775 }
777#undef DEFINE_OFFSET_METHOD
778
779 ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
780 void set_global_object_pool(ObjectPoolPtr raw_value) {
781 global_object_pool_ = raw_value;
782 }
783
784 const uword* dispatch_table_array() const { return dispatch_table_array_; }
785 void set_dispatch_table_array(const uword* array) {
786 dispatch_table_array_ = array;
787 }
788
789 static bool CanLoadFromThread(const Object& object);
790 static intptr_t OffsetFromThread(const Object& object);
791 static bool ObjectAtOffset(intptr_t offset, Object* object);
792 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
793
794#define DEFINE_OFFSET_METHOD(name) \
795 static intptr_t name##_entry_point_offset() { \
796 return OFFSET_OF(Thread, name##_entry_point_); \
797 }
799#undef DEFINE_OFFSET_METHOD
800
801#if defined(DEBUG)
802 // For asserts only. Has false positives when running with a simulator or
803 // SafeStack.
804 bool TopErrorHandlerIsSetJump() const;
805 bool TopErrorHandlerIsExitFrame() const;
806#endif
807
808 uword vm_tag() const { return vm_tag_; }
809 void set_vm_tag(uword tag) { vm_tag_ = tag; }
810 static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
811
813 return unboxed_runtime_arg_.int64_storage[0];
814 }
815 void set_unboxed_int64_runtime_arg(int64_t value) {
816 unboxed_runtime_arg_.int64_storage[0] = value;
817 }
819 return unboxed_runtime_arg_.int64_storage[1];
820 }
822 unboxed_runtime_arg_.int64_storage[1] = value;
823 }
825 return unboxed_runtime_arg_.double_storage[0];
826 }
828 unboxed_runtime_arg_.double_storage[0] = value;
829 }
831 return unboxed_runtime_arg_;
832 }
834 unboxed_runtime_arg_ = value;
835 }
836 static intptr_t unboxed_runtime_arg_offset() {
837 return OFFSET_OF(Thread, unboxed_runtime_arg_);
838 }
839
840 static intptr_t global_object_pool_offset() {
841 return OFFSET_OF(Thread, global_object_pool_);
842 }
843
844 static intptr_t dispatch_table_array_offset() {
845 return OFFSET_OF(Thread, dispatch_table_array_);
846 }
847
848 ObjectPtr active_exception() const { return active_exception_; }
849 void set_active_exception(const Object& value);
850 static intptr_t active_exception_offset() {
851 return OFFSET_OF(Thread, active_exception_);
852 }
853
854 ObjectPtr active_stacktrace() const { return active_stacktrace_; }
855 void set_active_stacktrace(const Object& value);
856 static intptr_t active_stacktrace_offset() {
857 return OFFSET_OF(Thread, active_stacktrace_);
858 }
859
860 uword resume_pc() const { return resume_pc_; }
861 void set_resume_pc(uword value) { resume_pc_ = value; }
862 static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
863
864 ErrorPtr sticky_error() const;
865 void set_sticky_error(const Error& value);
866 void ClearStickyError();
868
869#if defined(DEBUG)
870#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
871 void set_reusable_##object##_handle_scope_active(bool value) { \
872 reusable_##object##_handle_scope_active_ = value; \
873 } \
874 bool reusable_##object##_handle_scope_active() const { \
875 return reusable_##object##_handle_scope_active_; \
876 }
877 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
878#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
879
880 bool IsAnyReusableHandleScopeActive() const {
881#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
882 if (reusable_##object##_handle_scope_active_) { \
883 return true; \
884 }
885 REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
886 return false;
887#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
888 }
889#endif // defined(DEBUG)
890
892
893#define REUSABLE_HANDLE(object) \
894 object& object##Handle() const { return *object##_handle_; }
896#undef REUSABLE_HANDLE
897
899 const uword mask = AtSafepointBits(level);
900 return (state & mask) == mask;
901 }
902
903 // Whether the current thread is owning any safepoint level.
904 bool IsAtSafepoint() const {
905 // Owning a higher level safepoint implies owning the lower levels as well.
907 }
908 bool IsAtSafepoint(SafepointLevel level) const {
909 return IsAtSafepoint(level, safepoint_state_.load());
910 }
911 void SetAtSafepoint(bool value, SafepointLevel level) {
912 ASSERT(thread_lock()->IsOwnedByCurrentThread());
914 if (value) {
915 safepoint_state_ |= AtSafepointBits(level);
916 } else {
917 safepoint_state_ &= ~AtSafepointBits(level);
918 }
919 }
921 ASSERT(thread_lock()->IsOwnedByCurrentThread());
922 return IsSafepointRequested(level);
923 }
928 const uword state = safepoint_state_.load();
929 for (intptr_t i = level; i >= 0; --i) {
930 if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(i)))
931 return true;
932 }
933 return false;
934 }
936 ASSERT(thread_lock()->IsOwnedByCurrentThread());
937 if (level > current_safepoint_level()) return false;
938 const uword state = safepoint_state_.load();
939 return IsSafepointLevelRequested(state, level);
940 }
941
954
955 void BlockForSafepoint();
956
958 ASSERT(thread_lock()->IsOwnedByCurrentThread());
959
960 uword mask = 0;
961 switch (level) {
964 break;
967 break;
970 break;
971 default:
972 UNREACHABLE();
973 }
974
975 if (value) {
976 // acquire pulls from the release in TryEnterSafepoint.
977 return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
978 } else {
979 // release pushes to the acquire in TryExitSafepoint.
980 return safepoint_state_.fetch_and(~mask, std::memory_order_release);
981 }
982 }
987 return BlockedForSafepointField::decode(safepoint_state_);
988 }
989 void SetBlockedForSafepoint(bool value) {
990 ASSERT(thread_lock()->IsOwnedByCurrentThread());
991 safepoint_state_ =
992 BlockedForSafepointField::update(value, safepoint_state_);
993 }
994 bool BypassSafepoints() const {
995 return BypassSafepointsField::decode(safepoint_state_);
996 }
1001 return UnwindErrorInProgressField::decode(safepoint_state_);
1002 }
1003 void SetUnwindErrorInProgress(bool value) {
1005 if (value) {
1006 safepoint_state_.fetch_or(mask);
1007 } else {
1008 safepoint_state_.fetch_and(~mask);
1009 }
1010 }
1011
1012 bool OwnsGCSafepoint() const;
1013 bool OwnsReloadSafepoint() const;
1014 bool OwnsDeoptSafepoint() const;
1015 bool OwnsSafepoint() const;
1016 bool CanAcquireSafepointLocks() const;
1017
1018 uword safepoint_state() { return safepoint_state_; }
1019
1026
1028 return static_cast<ExecutionState>(execution_state_);
1029 }
1030 // Normally execution state is only accessed for the current thread.
1033 return static_cast<ExecutionState>(execution_state_);
1034 }
1036 execution_state_ = static_cast<uword>(state);
1037 }
1038 static intptr_t execution_state_offset() {
1039 return OFFSET_OF(Thread, execution_state_);
1040 }
1041
1042 virtual bool MayAllocateHandles() {
1043 return (execution_state() == kThreadInVM) ||
1045 }
1046
1052 return (1 << AtSafepointField::shift()) |
1054 }
1055
1057 uword old_state = 0;
1058 uword new_state = AtSafepointBits(current_safepoint_level());
1059 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1060 std::memory_order_release);
1061 }
1062
1065 // First try a fast update of the thread state to indicate it is at a
1066 // safepoint.
1067 if (!TryEnterSafepoint()) {
1068 // Fast update failed which means we could potentially be in the middle
1069 // of a safepoint operation.
1070 EnterSafepointUsingLock();
1071 }
1072 }
1073
1075 uword old_state = AtSafepointBits(current_safepoint_level());
1076 uword new_state = 0;
1077 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1078 std::memory_order_acquire);
1079 }
1080
1082 // First try a fast update of the thread state to indicate it is not at a
1083 // safepoint anymore.
1084 if (!TryExitSafepoint()) {
1085 // Fast update failed which means we could potentially be in the middle
1086 // of a safepoint operation.
1087 ExitSafepointUsingLock();
1088 }
1089 }
1090
1092 // If we are in a runtime call that doesn't support lazy deopt, we will only
1093 // respond to gc safepointing requests.
1095 if (IsSafepointRequested()) {
1097 }
1098 }
1099
1100 Thread* next() const { return next_; }
1101
1102 // Visit all object pointers.
1104 ValidationPolicy validate_frames);
1107
1108 bool IsValidHandle(Dart_Handle object) const;
1109 bool IsValidLocalHandle(Dart_Handle object) const;
1110 intptr_t CountLocalHandles() const;
1111 int ZoneSizeInBytes() const;
1112 void UnwindScopes(uword stack_marker);
1113
1114 void InitVMConstants();
1115
1116 int64_t GetNextTaskId() { return next_task_id_++; }
1117 static intptr_t next_task_id_offset() {
1118 return OFFSET_OF(Thread, next_task_id_);
1119 }
1120 Random* random() { return &thread_random_; }
1121 static intptr_t random_offset() { return OFFSET_OF(Thread, thread_random_); }
1122
1123#ifndef PRODUCT
1124 void PrintJSON(JSONStream* stream) const;
1125#endif
1126
1127#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1128 HeapProfileSampler& heap_sampler() { return heap_sampler_; }
1129#endif
1130
1131 PendingDeopts& pending_deopts() { return pending_deopts_; }
1132
1134 if (runtime_call_deopt_ability_ ==
1136 return SafepointLevel::kGC;
1137 }
1138 if (no_reload_scope_depth_ > 0 || allow_reload_scope_depth_ <= 0) {
1140 }
1142 }
1143
1144 private:
1145 template <class T>
1146 T* AllocateReusableHandle();
1147
1148 enum class RestoreWriteBarrierInvariantOp {
1149 kAddToRememberedSet,
1150 kAddToDeferredMarkingStack
1151 };
1153 void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
1154
1155 // Set the current compiler state and return the previous compiler state.
1156 CompilerState* SetCompilerState(CompilerState* state) {
1157 CompilerState* previous = compiler_state_;
1158 compiler_state_ = state;
1159 return previous;
1160 }
1161
1162 // Accessed from generated code.
1163 // ** This block of fields must come first! **
1164 // For AOT cross-compilation, we rely on these members having the same offsets
1165 // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
1166 // We use only word-sized fields to avoid differences in struct packing on the
1167 // different architectures. See also CheckOffsets in dart.cc.
1168 volatile RelaxedAtomic<uword> stack_limit_ = 0;
1169 uword write_barrier_mask_;
1170#if defined(DART_COMPRESSED_POINTERS)
1171 uword heap_base_ = 0;
1172#endif
1173 uword top_ = 0;
1174 uword end_ = 0;
1175 const uword* dispatch_table_array_ = nullptr;
1176 ObjectPtr* field_table_values_ = nullptr;
1177
1178 // Offsets up to this point can all fit in a byte on X64. All of the above
1179 // fields are very abundantly accessed from code. Thus, keeping them first
1180 // is important for code size (although code size on X64 is not a priority).
1181
1182// State that is cached in the TLS for fast access in generated code.
1183#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
1184 type_name member_name;
1186#undef DECLARE_MEMBERS
1187
1188#define DECLARE_MEMBERS(name) uword name##_entry_point_;
1190#undef DECLARE_MEMBERS
1191
1192#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
1194#undef DECLARE_MEMBERS
1195
1196 uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
1197
1198#define DECLARE_MEMBERS(name) uword name##_entry_point_ = 0;
1200#undef DECLARE_MEMBERS
1201
1202 Isolate* isolate_ = nullptr;
1203 IsolateGroup* isolate_group_ = nullptr;
1204
1205 uword saved_stack_limit_ = OSThread::kInvalidStackLimit;
1206 // The mutator uses this to indicate it wants to OSR (by
1207 // setting [Thread::kOsrRequest]) before going to runtime which will see this
1208 // bit.
1209 uword stack_overflow_flags_ = 0;
1210 uword volatile top_exit_frame_info_ = 0;
1211 StoreBufferBlock* store_buffer_block_ = nullptr;
1212 MarkingStackBlock* marking_stack_block_ = nullptr;
1213 MarkingStackBlock* deferred_marking_stack_block_ = nullptr;
1214 uword volatile vm_tag_ = 0;
1215 // Memory locations dedicated for passing unboxed int64 and double
1216 // values from generated code to runtime.
1217 // TODO(dartbug.com/33549): Clean this up when unboxed values
1218 // could be passed as arguments.
1219 ALIGN8 simd128_value_t unboxed_runtime_arg_;
1220
1221 // JumpToExceptionHandler state:
1222 ObjectPtr active_exception_;
1223 ObjectPtr active_stacktrace_;
1224
1225 ObjectPoolPtr global_object_pool_;
1226 uword resume_pc_;
1227 uword saved_shadow_call_stack_ = 0;
1228
1229 /*
1230 * The execution state for a thread.
1231 *
1232 * Potential execution states a thread could be in:
1233 * kThreadInGenerated - The thread is running jitted dart/stub code.
1234 * kThreadInVM - The thread is running VM code.
1235 * kThreadInNative - The thread is running native code.
1236 * kThreadInBlockedState - The thread is blocked waiting for a resource.
1237 *
1238 * Warning: Execution state doesn't imply the safepoint state. It's possible
1239 * to be in [kThreadInNative] and still not be at-safepoint (e.g. due to a
1240 * pending Dart_TypedDataAcquire() that increases no-callback-scope)
1241 */
1242 uword execution_state_;
1243
1244 /*
1245 * Stores
1246 *
1247 * - whether the thread is at a safepoint (current thread sets these)
1248 * [AtSafepointField]
1249 * [AtDeoptSafepointField]
1250 * [AtReloadSafepointField]
1251 *
1252 * - whether the thread is requested to safepoint (other thread sets these)
1253 * [SafepointRequestedField]
1254 * [DeoptSafepointRequestedField]
1255 * [ReloadSafepointRequestedField]
1256 *
1257 * - whether the thread is blocked due to safepoint request and needs to
1258 * be resumed after safepoint is done (current thread sets this)
1259 * [BlockedForSafepointField]
1260 *
1261 * - whether the thread should be ignored for safepointing purposes
1262 * [BypassSafepointsField]
1263 *
1264 * - whether the isolate running this thread has triggered an unwind error,
1265 * which requires enforced exit on a transition from native back to
1266 * generated.
1267 * [UnwindErrorInProgressField]
1268 */
1269 std::atomic<uword> safepoint_state_;
1270 uword exit_through_ffi_ = 0;
1271 ApiLocalScope* api_top_scope_;
1272 uint8_t double_truncate_round_supported_;
1273 ALIGN8 int64_t next_task_id_;
1274 ALIGN8 Random thread_random_;
1275
1276 TsanUtils* tsan_utils_ = nullptr;
1277
1278 // ---- End accessed from generated code. ----
1279
1280 // The layout of Thread object up to this point should not depend
1281 // on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code.
1282 // The code is generated without DART_PRECOMPILED_RUNTIME, but used with
1283 // DART_PRECOMPILED_RUNTIME.
1284
1285 uword true_end_ = 0;
1286 TaskKind task_kind_;
1287 TimelineStream* const dart_stream_;
1288 StreamInfo* const service_extension_stream_;
1289 mutable Monitor thread_lock_;
1290 ApiLocalScope* api_reusable_scope_;
1291 int32_t no_callback_scope_depth_;
1292 int32_t force_growth_scope_depth_ = 0;
1293 intptr_t no_reload_scope_depth_ = 0;
1294 intptr_t allow_reload_scope_depth_ = 0;
1295 intptr_t stopped_mutators_scope_depth_ = 0;
1296#if defined(DEBUG)
1297 int32_t no_safepoint_scope_depth_;
1298#endif
1299 VMHandles reusable_handles_;
1300 int32_t stack_overflow_count_;
1301 uint32_t runtime_call_count_ = 0;
1302
1303 // Deoptimization of stack frames.
1304 RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
1306 PendingDeopts pending_deopts_;
1307
1308 // Compiler state:
1309 CompilerState* compiler_state_ = nullptr;
1310 HierarchyInfo* hierarchy_info_;
1311 TypeUsageInfo* type_usage_info_;
1312 NoActiveIsolateScope* no_active_isolate_scope_ = nullptr;
1313
1314 CompilerTimings* compiler_timings_ = nullptr;
1315
1316 ErrorPtr sticky_error_;
1317
1318 ObjectPtr* field_table_values() const { return field_table_values_; }
1319
1320// Reusable handles support.
1321#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
1323#undef REUSABLE_HANDLE_FIELDS
1324
1325#if defined(DEBUG)
1326#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
1327 bool reusable_##object##_handle_scope_active_;
1328 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
1329#undef REUSABLE_HANDLE_SCOPE_VARIABLE
1330#endif // defined(DEBUG)
1331
1332 class AtSafepointField : public BitField<uword, bool, 0, 1> {};
1333 class SafepointRequestedField
1334 : public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
1335
1336 class AtDeoptSafepointField
1337 : public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
1338 class DeoptSafepointRequestedField
1339 : public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
1340
1341 class AtReloadSafepointField
1342 : public BitField<uword,
1343 bool,
1344 DeoptSafepointRequestedField::kNextBit,
1345 1> {};
1346 class ReloadSafepointRequestedField
1347 : public BitField<uword, bool, AtReloadSafepointField::kNextBit, 1> {};
1348
1349 class BlockedForSafepointField
1350 : public BitField<uword,
1351 bool,
1352 ReloadSafepointRequestedField::kNextBit,
1353 1> {};
1354 class BypassSafepointsField
1355 : public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
1356 class UnwindErrorInProgressField
1357 : public BitField<uword, bool, BypassSafepointsField::kNextBit, 1> {};
1358
1359 static uword AtSafepointBits(SafepointLevel level) {
1360 switch (level) {
1370 default:
1371 UNREACHABLE();
1372 }
1373 }
1374
1375#if defined(USING_SAFE_STACK)
1376 uword saved_safestack_limit_;
1377#endif
1378
1379 Thread* next_; // Used to chain the thread structures in an isolate.
1380 Isolate* scheduled_dart_mutator_isolate_ = nullptr;
1381
1382 bool is_unwind_in_progress_ = false;
1383
1384#if defined(DEBUG)
1385 bool inside_compiler_ = false;
1386#endif
1387
1388#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1389 HeapProfileSampler heap_sampler_;
1390#endif
1391
1392 explicit Thread(bool is_vm_isolate);
1393
1394 void StoreBufferRelease(
1396 void StoreBufferAcquire();
1397
1398 void MarkingStackRelease();
1399 void MarkingStackAcquire();
1400 void MarkingStackFlush();
1401 void DeferredMarkingStackRelease();
1402 void DeferredMarkingStackAcquire();
1403 void DeferredMarkingStackFlush();
1404
1405 void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
1406 void EnterSafepointUsingLock();
1407 void ExitSafepointUsingLock();
1408
1409 void SetupState(TaskKind kind);
1410 void ResetState();
1411
1412 void SetupMutatorState(TaskKind kind);
1413 void ResetMutatorState();
1414
1415 void SetupDartMutatorState(Isolate* isolate);
1416 void SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group);
1417 void ResetDartMutatorState(Isolate* isolate);
1418
1419 static void SuspendDartMutatorThreadInternal(Thread* thread,
1420 VMTag::VMTagId tag);
1421 static void ResumeDartMutatorThreadInternal(Thread* thread);
1422
1423 static void SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag);
1424 static void ResumeThreadInternal(Thread* thread);
1425
1426 // Adds a new active mutator thread to thread registry while associating it
1427 // with the given isolate (group).
1428 //
1429 // All existing safepoint operations are waited for before adding the thread
1430 // to the thread registry.
1431 //
1432 // => Anyone who iterates the active threads will first have to get us to
1433 // safepoint (but can access `Thread::isolate()`).
1434 static Thread* AddActiveThread(IsolateGroup* group,
1436 bool is_dart_mutator,
1437 bool bypass_safepoint);
1438
1439 // Releases a active mutator threads from the thread registry.
1440 //
1441 // Thread needs to be at-safepoint.
1442 static void FreeActiveThread(Thread* thread, bool bypass_safepoint);
1443
1444 static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
1445
1446#define REUSABLE_FRIEND_DECLARATION(name) \
1447 friend class Reusable##name##HandleScope;
1449#undef REUSABLE_FRIEND_DECLARATION
1450
1451 friend class ApiZone;
1453 friend class InterruptChecker;
1454 friend class Isolate;
1455 friend class IsolateGroup;
1457 friend class NoReloadScope;
1459 friend class Simulator;
1460 friend class StackZone;
1462 friend class ThreadRegistry;
1463 friend class CompilerState;
1465 friend class FieldTable;
1467 friend class Dart; // Calls SetupCachedEntryPoints after snapshot reading
1468 friend class
1469 TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
1470 friend class
1471 TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
1472 friend class MonitorLocker; // ExitSafepointUsingLock
1474 const char*,
1475 char**);
1477};
1478
1480 public:
1482 : StackResource(thread) {
1483 // We cannot have nested calls into the VM without deopt support.
1484 ASSERT(thread->runtime_call_deopt_ability_ ==
1486 thread->runtime_call_deopt_ability_ = kind;
1487 }
1489 thread()->runtime_call_deopt_ability_ =
1491 }
1492
1493 private:
1494 Thread* thread() {
1495 return reinterpret_cast<Thread*>(StackResource::thread());
1496 }
1497};
1498
1499#if defined(DART_HOST_OS_WINDOWS)
1500// Clears the state of the current thread and frees the allocation.
1501void WindowsThreadCleanUp();
1502#endif
1503
1504#if !defined(PRODUCT)
1505// Disable thread interrupts.
1511#else
1513 public:
1515 : StackResource(thread) {}
1517};
1518#endif // !defined(PRODUCT)
1519
1520// Within a NoSafepointScope, the thread must not reach any safepoint. Used
1521// around code that manipulates raw object pointers directly without handles.
1522#if defined(DEBUG)
1523class NoSafepointScope : public ThreadStackResource {
1524 public:
1525 explicit NoSafepointScope(Thread* thread = nullptr)
1526 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1527 this->thread()->IncrementNoSafepointScopeDepth();
1528 }
1529 ~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
1530
1531 private:
1532 DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
1533};
1534#else // defined(DEBUG)
1536 public:
1537 explicit NoSafepointScope(Thread* thread = nullptr) {}
1538
1539 private:
1541};
1542#endif // defined(DEBUG)
1543
1544// Disables initiating a reload operation as well as participating in another
1545// threads reload operation.
1546//
1547// Reload triggered by a mutator thread happens by sending all other mutator
1548// threads (that are running) OOB messages to check into a safepoint. The thread
1549// initiating the reload operation will block until all mutators are at a reload
1550// safepoint.
1551//
1552// When running under this scope, the processing of those OOB messages will
1553// ignore reload safepoint checkin requests. Yet we'll have to ensure that the
1554// dropped message is still acted upon.
1555//
1556// => To solve this we make the [~NoReloadScope] destructor resend a new reload
1557// OOB request to itself (the [~NoReloadScope] destructor is not necessarily at
1558// well-defined place where reload can happen - those places will explicitly
1559// opt-in via [ReloadParticipationScope]).
1560//
1562 public:
1563 explicit NoReloadScope(Thread* thread);
1565
1566 private:
1568};
1569
1570// Allows triggering reload safepoint operations as well as participating in
1571// reload operations (at safepoint checks).
1572//
1573// By-default safepoint checkins will not participate in reload operations, as
1574// reload has to happen at very well-defined places. This scope is intended
1575// for those places where we explicitly want to allow safepoint checkins to
1576// participate in reload operations (triggered by other threads).
1577//
1578// If there is any [NoReloadScope] active we will still disable the safepoint
1579// checkins to participate in reload.
1580//
1581// We also require the thread inititating a reload operation to explicitly
1582// opt-in via this scope.
1584 public:
1585 explicit RawReloadParticipationScope(Thread* thread) : thread_(thread) {
1586#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1587 if (thread->allow_reload_scope_depth_ == 0) {
1589 }
1590 thread->allow_reload_scope_depth_++;
1591 ASSERT(thread->allow_reload_scope_depth_ >= 0);
1592#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1593 }
1594
1596#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1597 thread_->allow_reload_scope_depth_ -= 1;
1598 ASSERT(thread_->allow_reload_scope_depth_ >= 0);
1599 if (thread_->allow_reload_scope_depth_ == 0) {
1601 }
1602#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1603 }
1604
1605 private:
1606 Thread* thread_;
1607
1609};
1610
1613
1615 public:
1617#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1618 thread->stopped_mutators_scope_depth_++;
1619 ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
1620#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1621 }
1622
1624#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1625 thread()->stopped_mutators_scope_depth_ -= 1;
1626 ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
1627#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1628 }
1629
1630 private:
1632};
1633
1634// Within a EnterCompilerScope, the thread must operate on cloned fields.
1635#if defined(DEBUG)
1636class EnterCompilerScope : public ThreadStackResource {
1637 public:
1638 explicit EnterCompilerScope(Thread* thread = nullptr)
1639 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1640 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1641 if (!previously_is_inside_compiler_) {
1642 this->thread()->EnterCompiler();
1643 }
1644 }
1645 ~EnterCompilerScope() {
1646 if (!previously_is_inside_compiler_) {
1647 thread()->LeaveCompiler();
1648 }
1649 }
1650
1651 private:
1652 bool previously_is_inside_compiler_;
1653 DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
1654};
1655#else // defined(DEBUG)
1657 public:
1658 explicit EnterCompilerScope(Thread* thread = nullptr) {}
1659
1660 private:
1662};
1663#endif // defined(DEBUG)
1664
1665// Within a LeaveCompilerScope, the thread must operate on cloned fields.
1666#if defined(DEBUG)
1667class LeaveCompilerScope : public ThreadStackResource {
1668 public:
1669 explicit LeaveCompilerScope(Thread* thread = nullptr)
1670 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1671 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1672 if (previously_is_inside_compiler_) {
1673 this->thread()->LeaveCompiler();
1674 }
1675 }
1676 ~LeaveCompilerScope() {
1677 if (previously_is_inside_compiler_) {
1678 thread()->EnterCompiler();
1679 }
1680 }
1681
1682 private:
1683 bool previously_is_inside_compiler_;
1684 DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
1685};
1686#else // defined(DEBUG)
1688 public:
1689 explicit LeaveCompilerScope(Thread* thread = nullptr) {}
1690
1691 private:
1693};
1694#endif // defined(DEBUG)
1695
1696} // namespace dart
1697
1698#endif // RUNTIME_VM_THREAD_H_
#define UNREACHABLE()
Definition assert.h:248
static constexpr uword update(bool value, uword original)
Definition bitfield.h:190
static constexpr int shift()
Definition bitfield.h:161
EnterCompilerScope(Thread *thread=nullptr)
Definition thread.h:1658
LeaveCompilerScope(Thread *thread=nullptr)
Definition thread.h:1689
NoSafepointScope(Thread *thread=nullptr)
Definition thread.h:1537
static const uword kInvalidStackLimit
Definition os_thread.h:88
static void SetCurrentTLS(BaseThread *value)
Definition os_thread.cc:318
static ThreadState * CurrentVMThread()
Definition os_thread.h:184
RawReloadParticipationScope(Thread *thread)
Definition thread.h:1585
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
RuntimeCallDeoptScope(Thread *thread, RuntimeCallDeoptAbility kind)
Definition thread.h:1481
virtual ~RuntimeCallDeoptScope()
Definition thread.h:1488
ThreadState * thread() const
Definition allocation.h:33
StoppedMutatorsScope(Thread *thread)
Definition thread.h:1616
void UnwindScopes(uword stack_marker)
Definition thread.cc:1262
bool IsBlockedForSafepoint() const
Definition thread.h:986
bool HasCompilerState() const
Definition thread.h:581
HierarchyInfo * hierarchy_info() const
Definition thread.h:588
void set_execution_state(ExecutionState state)
Definition thread.h:1035
void set_compiler_timings(CompilerTimings *stats)
Definition thread.h:614
void ScheduleInterrupts(uword interrupt_bits)
Definition thread.cc:705
bool CanAcquireSafepointLocks() const
Definition thread.cc:1306
static uword full_safepoint_state_unacquired()
Definition thread.h:1047
void set_type_usage_info(TypeUsageInfo *value)
Definition thread.h:605
void set_hierarchy_info(HierarchyInfo *value)
Definition thread.h:593
static intptr_t execution_state_offset()
Definition thread.h:1038
void StartUnwindError()
Definition thread.h:640
void set_vm_tag(uword tag)
Definition thread.h:809
static intptr_t stack_limit_offset()
Definition thread.h:401
void set_unboxed_simd128_runtime_arg(simd128_value_t value)
Definition thread.h:833
friend Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *, const char *, char **)
const uword * dispatch_table_array() const
Definition thread.h:784
void MarkingStackAddObject(ObjectPtr obj)
Definition thread.cc:826
uword resume_pc() const
Definition thread.h:860
static intptr_t write_barrier_mask_offset()
Definition thread.h:436
bool IsSafepointRequested(SafepointLevel level) const
Definition thread.h:927
uword safepoint_state()
Definition thread.h:1018
NO_SANITIZE_THREAD ExecutionState execution_state_cross_thread_for_testing() const
Definition thread.h:1032
ApiLocalScope * api_top_scope() const
Definition thread.h:512
void set_active_stacktrace(const Object &value)
Definition thread.cc:226
uword vm_tag() const
Definition thread.h:808
void DecrementNoSafepointScopeDepth()
Definition thread.h:720
void RememberLiveTemporaries()
Definition thread.cc:1083
bool OwnsSafepoint() const
Definition thread.cc:1301
bool force_growth() const
Definition thread.h:628
@ kMessageInterrupt
Definition thread.h:488
@ kInterruptsMask
Definition thread.h:490
uword GetAndClearInterrupts()
Definition thread.cc:719
void DeferredMarkingStackAddObject(ObjectPtr obj)
Definition thread.cc:833
void set_top_exit_frame_info(uword top_exit_frame_info)
Definition thread.h:679
static bool IsBlockedForSafepoint(uword state)
Definition thread.h:983
ObjectPoolPtr global_object_pool() const
Definition thread.h:779
bool HasScheduledInterrupts() const
Definition thread.h:496
int32_t no_callback_scope_depth() const
Definition thread.h:618
void PrintJSON(JSONStream *stream) const
uword saved_shadow_call_stack() const
Definition thread.h:417
static intptr_t OffsetFromThread(const Object &object)
Definition thread.cc:1112
static intptr_t safepoint_state_offset()
Definition thread.h:458
static intptr_t write_barrier_wrappers_thread_offset(Register reg)
Definition thread.h:740
void MarkingStackBlockProcess()
Definition thread.cc:816
static intptr_t vm_tag_offset()
Definition thread.h:810
@ kCompilerTask
Definition thread.h:348
@ kScavengerTask
Definition thread.h:352
@ kSampleBlockTask
Definition thread.h:353
@ kCompactorTask
Definition thread.h:351
static Thread * Current()
Definition thread.h:361
PendingDeopts & pending_deopts()
Definition thread.h:1131
bool OwnsGCSafepoint() const
Definition thread.cc:1286
void set_unboxed_int64_runtime_arg(int64_t value)
Definition thread.h:815
static bool IsAtSafepoint(SafepointLevel level, uword state)
Definition thread.h:898
bool IsValidLocalHandle(Dart_Handle object) const
Definition thread.cc:1204
bool IsValidHandle(Dart_Handle object) const
Definition thread.cc:1199
static intptr_t store_buffer_block_offset()
Definition thread.h:665
int64_t GetNextTaskId()
Definition thread.h:1116
void AssertEmptyThreadInvariants()
Definition thread.cc:315
bool IsSafepointRequested() const
Definition thread.h:924
ObjectPtr active_stacktrace() const
Definition thread.h:854
static intptr_t saved_stack_limit_offset()
Definition thread.h:406
void set_resume_pc(uword value)
Definition thread.h:861
int32_t no_safepoint_scope_depth() const
Definition thread.h:705
static intptr_t top_offset()
Definition thread.h:702
static bool ObjectAtOffset(intptr_t offset, Object *object)
Definition thread.cc:1139
void AssertNonMutatorInvariants()
Definition thread.cc:268
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError()
Definition thread.cc:243
static intptr_t service_extension_stream_offset()
Definition thread.h:571
uword end() const
Definition thread.h:697
void SetUnwindErrorInProgress(bool value)
Definition thread.h:1003
friend class CompilerState
Definition thread.h:1463
bool is_marking() const
Definition thread.h:669
bool TryEnterSafepoint()
Definition thread.h:1056
ErrorPtr HandleInterrupts()
Definition thread.cc:734
void SetStackLimit(uword value)
Definition thread.cc:685
@ kExitThroughRuntimeCall
Definition thread.h:471
@ kExitThroughFfi
Definition thread.h:468
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition thread.cc:901
TypeUsageInfo * type_usage_info() const
Definition thread.h:600
void IncrementNoCallbackScopeDepth()
Definition thread.h:619
static intptr_t random_offset()
Definition thread.h:1121
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
Definition thread.h:452
static intptr_t isolate_offset()
Definition thread.h:534
Monitor * thread_lock() const
Definition thread.h:501
static intptr_t active_exception_offset()
Definition thread.h:850
void set_end(uword end)
Definition thread.h:700
static void ExitIsolateGroupAsNonMutator()
Definition thread.cc:521
ApiLocalScope * api_reusable_scope() const
Definition thread.h:504
Heap * heap() const
Definition thread.cc:876
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition thread.h:752
static const char * TaskKindToCString(TaskKind kind)
Definition thread.cc:250
void set_api_reusable_scope(ApiLocalScope *value)
Definition thread.h:505
CompilerState & compiler_state()
Definition thread.h:583
static intptr_t top_exit_frame_info_offset()
Definition thread.h:682
int ZoneSizeInBytes() const
Definition thread.cc:1225
static bool IsSafepointLevelRequested(uword state, SafepointLevel level)
Definition thread.h:942
void ReleaseStoreBuffer()
Definition thread.cc:672
friend class compiler::target::Thread
Definition thread.h:1464
double unboxed_double_runtime_arg() const
Definition thread.h:824
intptr_t CountLocalHandles() const
Definition thread.cc:1215
void AssertNonDartMutatorInvariants()
Definition thread.cc:276
void InitVMConstants()
Definition thread.cc:181
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy)
Definition thread.cc:786
void DeferredMarkingStackBlockProcess()
Definition thread.cc:821
void AcquireMarkingStack()
bool HasExitedDartCode() const
Definition thread.cc:884
Thread * next() const
Definition thread.h:1100
bool HasActiveState()
Definition thread.cc:349
uword GetAndClearStackOverflowFlags()
Definition thread.cc:780
void ExitSafepoint()
Definition thread.h:1081
void set_sticky_error(const Error &value)
Definition thread.cc:234
void StoreBufferAddObject(ObjectPtr obj)
Definition thread.cc:791
simd128_value_t unboxed_simd128_runtime_arg() const
Definition thread.h:830
void ClearStackLimit()
Definition thread.cc:696
void CheckForSafepoint()
Definition thread.h:1091
void set_global_object_pool(ObjectPoolPtr raw_value)
Definition thread.h:780
uword top() const
Definition thread.h:696
bool IsExecutingDartCode() const
Definition thread.cc:880
bool IsSafepointRequestedLocked(SafepointLevel level) const
Definition thread.h:920
void ClearStickyError()
Definition thread.cc:239
static intptr_t end_offset()
Definition thread.h:703
void DecrementForceGrowthScopeDepth()
Definition thread.h:633
void EnterApiScope()
Definition thread.cc:1235
static intptr_t marking_stack_block_offset()
Definition thread.h:674
static uword saved_shadow_call_stack_offset()
Definition thread.h:418
static intptr_t global_object_pool_offset()
Definition thread.h:840
friend class TransitionVMToGenerated
Definition thread.h:1471
static intptr_t field_table_values_offset()
Definition thread.h:542
bool UnwindErrorInProgress() const
Definition thread.h:1000
void ExitApiScope()
Definition thread.cc:1248
static bool CanLoadFromThread(const Object &object)
Definition thread.cc:1088
void ReleaseMarkingStack()
SafepointLevel current_safepoint_level() const
Definition thread.h:1133
int64_t unboxed_int64_runtime_arg() const
Definition thread.h:812
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition thread.cc:494
bool is_unwind_in_progress() const
Definition thread.h:638
bool IsSafepointLevelRequestedLocked(SafepointLevel level) const
Definition thread.h:935
static intptr_t isolate_group_offset()
Definition thread.h:535
uword top_exit_frame_info() const
Definition thread.h:678
bool OwnsReloadSafepoint() const
Definition thread.cc:1296
uword stack_limit_address() const
Definition thread.h:398
bool OwnsDeoptSafepoint() const
Definition thread.cc:1291
int64_t unboxed_int64_runtime_second_arg() const
Definition thread.h:818
static intptr_t next_task_id_offset()
Definition thread.h:1117
void ClearReusableHandles()
Definition thread.cc:895
bool IsDartMutatorThread() const
Definition thread.h:546
static intptr_t exit_through_ffi_offset()
Definition thread.h:474
void IncrementForceGrowthScopeDepth()
Definition thread.h:629
static bool EnterIsolateGroupAsNonMutator(IsolateGroup *isolate_group, TaskKind kind)
Definition thread.cc:506
friend class Isolate
Definition thread.h:1454
void EnterSafepoint()
Definition thread.h:1063
void SetAtSafepoint(bool value, SafepointLevel level)
Definition thread.h:911
Random * random()
Definition thread.h:1120
ExecutionState execution_state() const
Definition thread.h:1027
Isolate * isolate() const
Definition thread.h:533
int32_t IncrementAndGetStackOverflowCount()
Definition thread.h:446
static uword resume_pc_offset()
Definition thread.h:862
bool BypassSafepoints() const
Definition thread.h:994
uword saved_stack_limit() const
Definition thread.h:409
friend class TransitionGeneratedToVM
Definition thread.h:1469
void set_true_end(uword true_end)
Definition thread.h:701
CompilerTimings * compiler_timings() const
Definition thread.h:612
uint32_t IncrementAndGetRuntimeCallCount()
Definition thread.h:450
uword heap_base() const
Definition thread.h:428
TaskKind task_kind() const
Definition thread.h:478
bool IsAtSafepoint() const
Definition thread.h:904
bool IsInNoReloadScope() const
Definition thread.h:727
static uword full_safepoint_state_acquired()
Definition thread.h:1051
@ kThreadInNative
Definition thread.h:1023
@ kThreadInBlockedState
Definition thread.h:1024
@ kThreadInGenerated
Definition thread.h:1022
void set_unboxed_double_runtime_arg(double value)
Definition thread.h:827
IsolateGroup * isolate_group() const
Definition thread.h:540
ObjectPtr active_exception() const
Definition thread.h:848
static intptr_t dart_stream_offset()
Definition thread.h:566
void StoreBufferAddObjectGC(ObjectPtr obj)
Definition thread.cc:799
static intptr_t api_top_scope_offset()
Definition thread.h:514
void SetBlockedForSafepoint(bool value)
Definition thread.h:989
void DecrementNoCallbackScopeDepth()
Definition thread.h:623
void set_api_top_scope(ApiLocalScope *value)
Definition thread.h:513
bool TryExitSafepoint()
Definition thread.h:1074
void AssertEmptyStackInvariants()
Definition thread.cc:284
static intptr_t double_truncate_round_supported_offset()
Definition thread.h:521
uword write_barrier_mask() const
Definition thread.h:427
static intptr_t stack_overflow_flags_offset()
Definition thread.h:442
bool IsAtSafepoint(SafepointLevel level) const
Definition thread.h:908
friend class IsolateGroup
Definition thread.h:1455
static uword SetBypassSafepoints(bool value, uword state)
Definition thread.h:997
static void EnterIsolate(Isolate *isolate)
Definition thread.cc:366
static intptr_t active_stacktrace_offset()
Definition thread.h:856
bool IsInStoppedMutatorsScope() const
Definition thread.h:729
void IncrementNoSafepointScopeDepth()
Definition thread.h:713
static intptr_t tsan_utils_offset()
Definition thread.h:525
void BlockForSafepoint()
Definition thread.cc:1282
void set_top(uword top)
Definition thread.h:699
static void ExitIsolate(bool isolate_shutdown=false)
Definition thread.cc:423
virtual bool MayAllocateHandles()
Definition thread.h:1042
ErrorPtr sticky_error() const
Definition thread.cc:230
void set_dispatch_table_array(const uword *array)
Definition thread.h:785
void set_active_exception(const Object &value)
Definition thread.cc:222
HeapProfileSampler & heap_sampler()
Definition thread.h:1128
uword SetSafepointRequested(SafepointLevel level, bool value)
Definition thread.h:957
uword true_end() const
Definition thread.h:698
void set_unboxed_int64_runtime_second_arg(int64_t value)
Definition thread.h:821
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition thread.cc:476
static intptr_t unboxed_runtime_arg_offset()
Definition thread.h:836
Isolate * scheduled_dart_mutator_isolate() const
Definition thread.h:557
friend class NoActiveIsolateScope
Definition thread.h:1456
static intptr_t dispatch_table_array_offset()
Definition thread.h:844
void DeferredMarkLiveTemporaries()
Definition thread.cc:1078
struct _Dart_Handle * Dart_Handle
Definition dart_api.h:258
#define DART_WARN_UNUSED_RESULT
Definition dart_api.h:66
#define ASSERT(E)
AtkStateType state
uint8_t value
uint32_t * target
#define REUSABLE_FRIEND_DECLARATION(name)
Definition isolate.h:1687
StoreBuffer::Block StoreBufferBlock
SafepointLevel
Definition thread.h:289
@ kGC
Definition thread.h:291
@ kNumLevels
Definition thread.h:297
@ kNoSafepoint
Definition thread.h:300
@ kGCAndDeoptAndReload
Definition thread.h:295
@ kGCAndDeopt
Definition thread.h:293
MarkingStack::Block MarkingStackBlock
uintptr_t uword
Definition globals.h:501
@ kNumberOfCpuRegisters
constexpr RegList kDartAvailableCpuRegs
const intptr_t kStoreBufferWrapperSize
ValidationPolicy
Definition thread.h:271
RuntimeCallDeoptAbility
Definition thread.h:276
constexpr int kNumberOfDartAvailableCpuRegs
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define T
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
Point offset
static intptr_t setjmp_buffer_offset()
Definition thread.h:323
jmp_buf * setjmp_buffer
Definition thread.h:315
uword exception_fp
Definition thread.h:318
static intptr_t exception_fp_offset()
Definition thread.h:332
static intptr_t exception_sp_offset()
Definition thread.h:329
static intptr_t exception_pc_offset()
Definition thread.h:326
static intptr_t setjmp_function_offset()
Definition thread.h:320
void * setjmp_function
Definition thread.h:313
uword exception_pc
Definition thread.h:316
uword exception_sp
Definition thread.h:317
double double_storage[2]
Definition globals.h:151
int64_t int64_storage[2]
Definition globals.h:149
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
Definition thread.h:189
#define REUSABLE_HANDLE_LIST(V)
Definition thread.h:78
#define REUSABLE_HANDLE_FIELDS(object)
Definition thread.h:1321
#define CACHED_CONSTANTS_LIST(V)
Definition thread.h:267
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value)
Definition thread.h:733
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)
Definition thread.h:1183
#define REUSABLE_HANDLE(object)
Definition thread.h:893
#define NO_SANITIZE_THREAD
#define ALIGN8
Definition globals.h:171
#define OFFSET_OF(type, field)
Definition globals.h:138