Flutter Engine
The Flutter Engine
thread.h
Go to the documentation of this file.
1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_THREAD_H_
6#define RUNTIME_VM_THREAD_H_
7
8#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
9#error "Should not include runtime"
10#endif
11
12#include <setjmp.h>
13
14#include "include/dart_api.h"
15#include "platform/assert.h"
16#include "platform/atomic.h"
17#include "platform/safe_stack.h"
18#include "vm/bitfield.h"
20#include "vm/constants.h"
21#include "vm/globals.h"
22#include "vm/handles.h"
24#include "vm/heap/sampler.h"
25#include "vm/os_thread.h"
26#include "vm/pending_deopts.h"
27#include "vm/random.h"
29#include "vm/tags.h"
31#include "vm/thread_state.h"
32
33namespace dart {
34
35class AbstractType;
36class ApiLocalScope;
37class Array;
38class CompilerState;
39class CompilerTimings;
40class Class;
41class Code;
42class Error;
43class ExceptionHandlers;
44class Field;
45class FieldTable;
46class Function;
47class GrowableObjectArray;
48class HandleScope;
49class Heap;
50class HierarchyInfo;
51class Instance;
52class Isolate;
53class IsolateGroup;
54class Library;
55class Object;
56class OSThread;
57class JSONObject;
58class NoActiveIsolateScope;
59class PcDescriptors;
60class RuntimeEntry;
61class Smi;
62class StackResource;
63class StackTrace;
64class StreamInfo;
65class String;
66class TimelineStream;
67class TypeArguments;
68class TypeParameter;
69class TypeUsageInfo;
70class Zone;
71
72namespace compiler {
73namespace target {
74class Thread;
75} // namespace target
76} // namespace compiler
77
78#define REUSABLE_HANDLE_LIST(V) \
79 V(AbstractType) \
80 V(Array) \
81 V(Class) \
82 V(Code) \
83 V(Error) \
84 V(ExceptionHandlers) \
85 V(Field) \
86 V(Function) \
87 V(GrowableObjectArray) \
88 V(Instance) \
89 V(Library) \
90 V(LoadingUnit) \
91 V(Object) \
92 V(PcDescriptors) \
93 V(Smi) \
94 V(String) \
95 V(TypeParameters) \
96 V(TypeArguments) \
97 V(TypeParameter) \
98 V(WeakArray)
99
100#define CACHED_VM_STUBS_LIST(V) \
101 V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().ptr(), \
102 nullptr) \
103 V(CodePtr, fix_allocation_stub_code_, \
104 StubCode::FixAllocationStubTarget().ptr(), nullptr) \
105 V(CodePtr, invoke_dart_code_stub_, StubCode::InvokeDartCode().ptr(), \
106 nullptr) \
107 V(CodePtr, call_to_runtime_stub_, StubCode::CallToRuntime().ptr(), nullptr) \
108 V(CodePtr, late_initialization_error_shared_without_fpu_regs_stub_, \
109 StubCode::LateInitializationErrorSharedWithoutFPURegs().ptr(), nullptr) \
110 V(CodePtr, late_initialization_error_shared_with_fpu_regs_stub_, \
111 StubCode::LateInitializationErrorSharedWithFPURegs().ptr(), nullptr) \
112 V(CodePtr, null_error_shared_without_fpu_regs_stub_, \
113 StubCode::NullErrorSharedWithoutFPURegs().ptr(), nullptr) \
114 V(CodePtr, null_error_shared_with_fpu_regs_stub_, \
115 StubCode::NullErrorSharedWithFPURegs().ptr(), nullptr) \
116 V(CodePtr, null_arg_error_shared_without_fpu_regs_stub_, \
117 StubCode::NullArgErrorSharedWithoutFPURegs().ptr(), nullptr) \
118 V(CodePtr, null_arg_error_shared_with_fpu_regs_stub_, \
119 StubCode::NullArgErrorSharedWithFPURegs().ptr(), nullptr) \
120 V(CodePtr, null_cast_error_shared_without_fpu_regs_stub_, \
121 StubCode::NullCastErrorSharedWithoutFPURegs().ptr(), nullptr) \
122 V(CodePtr, null_cast_error_shared_with_fpu_regs_stub_, \
123 StubCode::NullCastErrorSharedWithFPURegs().ptr(), nullptr) \
124 V(CodePtr, range_error_shared_without_fpu_regs_stub_, \
125 StubCode::RangeErrorSharedWithoutFPURegs().ptr(), nullptr) \
126 V(CodePtr, range_error_shared_with_fpu_regs_stub_, \
127 StubCode::RangeErrorSharedWithFPURegs().ptr(), nullptr) \
128 V(CodePtr, write_error_shared_without_fpu_regs_stub_, \
129 StubCode::WriteErrorSharedWithoutFPURegs().ptr(), nullptr) \
130 V(CodePtr, write_error_shared_with_fpu_regs_stub_, \
131 StubCode::WriteErrorSharedWithFPURegs().ptr(), nullptr) \
132 V(CodePtr, allocate_mint_with_fpu_regs_stub_, \
133 StubCode::AllocateMintSharedWithFPURegs().ptr(), nullptr) \
134 V(CodePtr, allocate_mint_without_fpu_regs_stub_, \
135 StubCode::AllocateMintSharedWithoutFPURegs().ptr(), nullptr) \
136 V(CodePtr, allocate_object_stub_, StubCode::AllocateObject().ptr(), nullptr) \
137 V(CodePtr, allocate_object_parameterized_stub_, \
138 StubCode::AllocateObjectParameterized().ptr(), nullptr) \
139 V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().ptr(), \
140 nullptr) \
141 V(CodePtr, async_exception_handler_stub_, \
142 StubCode::AsyncExceptionHandler().ptr(), nullptr) \
143 V(CodePtr, resume_stub_, StubCode::Resume().ptr(), nullptr) \
144 V(CodePtr, return_async_stub_, StubCode::ReturnAsync().ptr(), nullptr) \
145 V(CodePtr, return_async_not_future_stub_, \
146 StubCode::ReturnAsyncNotFuture().ptr(), nullptr) \
147 V(CodePtr, return_async_star_stub_, StubCode::ReturnAsyncStar().ptr(), \
148 nullptr) \
149 V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_, \
150 StubCode::StackOverflowSharedWithoutFPURegs().ptr(), nullptr) \
151 V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_, \
152 StubCode::StackOverflowSharedWithFPURegs().ptr(), nullptr) \
153 V(CodePtr, switchable_call_miss_stub_, StubCode::SwitchableCallMiss().ptr(), \
154 nullptr) \
155 V(CodePtr, throw_stub_, StubCode::Throw().ptr(), nullptr) \
156 V(CodePtr, re_throw_stub_, StubCode::Throw().ptr(), nullptr) \
157 V(CodePtr, assert_boolean_stub_, StubCode::AssertBoolean().ptr(), nullptr) \
158 V(CodePtr, optimize_stub_, StubCode::OptimizeFunction().ptr(), nullptr) \
159 V(CodePtr, deoptimize_stub_, StubCode::Deoptimize().ptr(), nullptr) \
160 V(CodePtr, lazy_deopt_from_return_stub_, \
161 StubCode::DeoptimizeLazyFromReturn().ptr(), nullptr) \
162 V(CodePtr, lazy_deopt_from_throw_stub_, \
163 StubCode::DeoptimizeLazyFromThrow().ptr(), nullptr) \
164 V(CodePtr, slow_type_test_stub_, StubCode::SlowTypeTest().ptr(), nullptr) \
165 V(CodePtr, lazy_specialize_type_test_stub_, \
166 StubCode::LazySpecializeTypeTest().ptr(), nullptr) \
167 V(CodePtr, enter_safepoint_stub_, StubCode::EnterSafepoint().ptr(), nullptr) \
168 V(CodePtr, exit_safepoint_stub_, StubCode::ExitSafepoint().ptr(), nullptr) \
169 V(CodePtr, exit_safepoint_ignore_unwind_in_progress_stub_, \
170 StubCode::ExitSafepointIgnoreUnwindInProgress().ptr(), nullptr) \
171 V(CodePtr, call_native_through_safepoint_stub_, \
172 StubCode::CallNativeThroughSafepoint().ptr(), nullptr)
173
174#define CACHED_NON_VM_STUB_LIST(V) \
175 V(ObjectPtr, object_null_, Object::null(), nullptr) \
176 V(BoolPtr, bool_true_, Object::bool_true().ptr(), nullptr) \
177 V(BoolPtr, bool_false_, Object::bool_false().ptr(), nullptr) \
178 V(ArrayPtr, empty_array_, Object::empty_array().ptr(), nullptr) \
179 V(TypeArgumentsPtr, empty_type_arguments_, \
180 Object::empty_type_arguments().ptr(), nullptr) \
181 V(TypePtr, dynamic_type_, Type::dynamic_type().ptr(), nullptr)
182
183// List of VM-global objects/addresses cached in each Thread object.
184// Important: constant false must immediately follow constant true.
185#define CACHED_VM_OBJECTS_LIST(V) \
186 CACHED_NON_VM_STUB_LIST(V) \
187 CACHED_VM_STUBS_LIST(V)
188
189#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V) \
190 V(suspend_state_init_async) \
191 V(suspend_state_await) \
192 V(suspend_state_await_with_type_check) \
193 V(suspend_state_return_async) \
194 V(suspend_state_return_async_not_future) \
195 V(suspend_state_init_async_star) \
196 V(suspend_state_yield_async_star) \
197 V(suspend_state_return_async_star) \
198 V(suspend_state_init_sync_star) \
199 V(suspend_state_suspend_sync_star_at_start) \
200 V(suspend_state_handle_exception)
201
202// This assertion marks places which assume that boolean false immediate
203// follows bool true in the CACHED_VM_OBJECTS_LIST
204#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE() \
205 ASSERT((Thread::bool_true_offset() + kWordSize) == \
206 Thread::bool_false_offset());
207
208#define CACHED_VM_STUBS_ADDRESSES_LIST(V) \
209 V(uword, write_barrier_entry_point_, StubCode::WriteBarrier().EntryPoint(), \
210 0) \
211 V(uword, array_write_barrier_entry_point_, \
212 StubCode::ArrayWriteBarrier().EntryPoint(), 0) \
213 V(uword, call_to_runtime_entry_point_, \
214 StubCode::CallToRuntime().EntryPoint(), 0) \
215 V(uword, allocate_mint_with_fpu_regs_entry_point_, \
216 StubCode::AllocateMintSharedWithFPURegs().EntryPoint(), 0) \
217 V(uword, allocate_mint_without_fpu_regs_entry_point_, \
218 StubCode::AllocateMintSharedWithoutFPURegs().EntryPoint(), 0) \
219 V(uword, allocate_object_entry_point_, \
220 StubCode::AllocateObject().EntryPoint(), 0) \
221 V(uword, allocate_object_parameterized_entry_point_, \
222 StubCode::AllocateObjectParameterized().EntryPoint(), 0) \
223 V(uword, allocate_object_slow_entry_point_, \
224 StubCode::AllocateObjectSlow().EntryPoint(), 0) \
225 V(uword, stack_overflow_shared_without_fpu_regs_entry_point_, \
226 StubCode::StackOverflowSharedWithoutFPURegs().EntryPoint(), 0) \
227 V(uword, stack_overflow_shared_with_fpu_regs_entry_point_, \
228 StubCode::StackOverflowSharedWithFPURegs().EntryPoint(), 0) \
229 V(uword, megamorphic_call_checked_entry_, \
230 StubCode::MegamorphicCall().EntryPoint(), 0) \
231 V(uword, switchable_call_miss_entry_, \
232 StubCode::SwitchableCallMiss().EntryPoint(), 0) \
233 V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0) \
234 V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0) \
235 V(uword, call_native_through_safepoint_entry_point_, \
236 StubCode::CallNativeThroughSafepoint().EntryPoint(), 0) \
237 V(uword, jump_to_frame_entry_point_, StubCode::JumpToFrame().EntryPoint(), \
238 0) \
239 V(uword, slow_type_test_entry_point_, StubCode::SlowTypeTest().EntryPoint(), \
240 0)
241
242#define CACHED_ADDRESSES_LIST(V) \
243 CACHED_VM_STUBS_ADDRESSES_LIST(V) \
244 V(uword, bootstrap_native_wrapper_entry_point_, \
245 NativeEntry::BootstrapNativeCallWrapperEntry(), 0) \
246 V(uword, no_scope_native_wrapper_entry_point_, \
247 NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
248 V(uword, auto_scope_native_wrapper_entry_point_, \
249 NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
250 V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(), \
251 nullptr) \
252 V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
253 0) \
254 V(uword, double_negate_address_, \
255 reinterpret_cast<uword>(&double_negate_constant), 0) \
256 V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
257 0) \
258 V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
259 0) \
260 V(uword, float_negate_address_, \
261 reinterpret_cast<uword>(&float_negate_constant), 0) \
262 V(uword, float_absolute_address_, \
263 reinterpret_cast<uword>(&float_absolute_constant), 0) \
264 V(uword, float_zerow_address_, \
265 reinterpret_cast<uword>(&float_zerow_constant), 0)
266
267#define CACHED_CONSTANTS_LIST(V) \
268 CACHED_VM_OBJECTS_LIST(V) \
269 CACHED_ADDRESSES_LIST(V)
270
272 kValidateFrames = 0,
274};
275
277 // There was no leaf call or a leaf call that can cause deoptimization
278 // after-call.
280 // There was a leaf call and the VM cannot cause deoptimize after-call.
282};
283
284// The safepoint level a thread is on or a safepoint operation is requested for
285//
286// The higher the number the stronger the guarantees:
287// * the time-to-safepoint latency increases with level
288// * the frequency of hitting possible safe points decreases with level
290 // Safe to GC
292 // Safe to GC as well as Deopt.
294 // Safe to GC, Deopt as well as Reload.
296 // Number of levels.
298
299 // No safepoint.
301};
302
303// Accessed from generated code.
304struct TsanUtils {
305 // Used to allow unwinding runtime C frames using longjmp() when throwing
306 // exceptions. This allows triggering the normal TSAN shadow stack unwinding
307 // implementation.
308 // -> See https://dartbug.com/47472#issuecomment-948235479 for details.
309#if defined(USING_THREAD_SANITIZER)
310 void* setjmp_function = reinterpret_cast<void*>(&setjmp);
311#else
312 // MSVC (on Windows) is not happy with getting address of purely intrinsic.
313 void* setjmp_function = nullptr;
314#endif
315 jmp_buf* setjmp_buffer = nullptr;
319
320 static intptr_t setjmp_function_offset() {
322 }
323 static intptr_t setjmp_buffer_offset() {
325 }
326 static intptr_t exception_pc_offset() {
328 }
329 static intptr_t exception_sp_offset() {
331 }
332 static intptr_t exception_fp_offset() {
334 }
335};
336
337// A VM thread; may be executing Dart code or performing helper tasks like
338// garbage collection or compilation. The Thread structure associated with
339// a thread is allocated by EnsureInit before entering an isolate, and destroyed
340// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
341// must currently be called manually (issue 23474).
342class Thread : public ThreadState {
343 public:
344 // The kind of task this thread is performing. Sampled by the profiler.
345 enum TaskKind {
355 };
356 // Converts a TaskKind to its corresponding C-String name.
357 static const char* TaskKindToCString(TaskKind kind);
358
359 ~Thread();
360
361 // The currently executing thread, or nullptr if not yet initialized.
362 static Thread* Current() {
363 return static_cast<Thread*>(OSThread::CurrentVMThread());
364 }
365
366 // Whether there's any active state on the [thread] that needs to be preserved
367 // across `Thread::ExitIsolate()` and `Thread::EnterIsolate()`.
368 bool HasActiveState();
373
374 // Makes the current thread enter 'isolate'.
375 static void EnterIsolate(Isolate* isolate);
376 // Makes the current thread exit its isolate.
377 static void ExitIsolate(bool isolate_shutdown = false);
378
380 TaskKind kind,
381 bool bypass_safepoint);
382 static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
383
385 TaskKind kind);
386 static void ExitIsolateGroupAsNonMutator();
387
388 // Empties the store buffer block into the isolate.
389 void ReleaseStoreBuffer();
392
394 void ClearStackLimit();
395
396 // Access to the current stack limit for generated code. Either the true OS
397 // thread's stack limit minus some headroom, or a special value to trigger
398 // interrupts.
400 return reinterpret_cast<uword>(&stack_limit_);
401 }
402 static intptr_t stack_limit_offset() {
403 return OFFSET_OF(Thread, stack_limit_);
404 }
405
406 // The true stack limit for this OS thread.
407 static intptr_t saved_stack_limit_offset() {
408 return OFFSET_OF(Thread, saved_stack_limit_);
409 }
410 uword saved_stack_limit() const { return saved_stack_limit_; }
411
412#if defined(USING_SAFE_STACK)
413 uword saved_safestack_limit() const { return saved_safestack_limit_; }
414 void set_saved_safestack_limit(uword limit) {
415 saved_safestack_limit_ = limit;
416 }
417#endif
418 uword saved_shadow_call_stack() const { return saved_shadow_call_stack_; }
420 return OFFSET_OF(Thread, saved_shadow_call_stack_);
421 }
422
423 // Stack overflow flags
424 enum {
425 kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
426 };
427
428 uword write_barrier_mask() const { return write_barrier_mask_; }
429 uword heap_base() const {
430#if defined(DART_COMPRESSED_POINTERS)
431 return heap_base_;
432#else
433 return 0;
434#endif
435 }
436
437 static intptr_t write_barrier_mask_offset() {
438 return OFFSET_OF(Thread, write_barrier_mask_);
439 }
440#if defined(DART_COMPRESSED_POINTERS)
441 static intptr_t heap_base_offset() { return OFFSET_OF(Thread, heap_base_); }
442#endif
443 static intptr_t stack_overflow_flags_offset() {
444 return OFFSET_OF(Thread, stack_overflow_flags_);
445 }
446
448 return ++stack_overflow_count_;
449 }
450
451 uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
452
454 return fpu_regs
455 ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
456 : stack_overflow_shared_without_fpu_regs_entry_point_offset();
457 }
458
459 static intptr_t safepoint_state_offset() {
460 return OFFSET_OF(Thread, safepoint_state_);
461 }
462
463 // Tag state is maintained on transitions.
464 enum {
465 // Always true in generated state.
467 // The VM exited the generated state through FFI.
468 // This can be true in both native and VM state.
470 // The VM exited the generated state through a runtime call.
471 // This can be true in both native and VM state.
473 };
474
475 static intptr_t exit_through_ffi_offset() {
476 return OFFSET_OF(Thread, exit_through_ffi_);
477 }
478
479 TaskKind task_kind() const { return task_kind_; }
480
481 // Retrieves and clears the stack overflow flags. These are set by
482 // the generated code before the slow path runtime routine for a
483 // stack overflow is called.
485
486 // Interrupt bits.
487 enum {
488 kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
489 kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
490
492 };
493
494 void ScheduleInterrupts(uword interrupt_bits);
495 ErrorPtr HandleInterrupts();
498 return (stack_limit_.load() & kInterruptsMask) != 0;
499 }
500
501 // Monitor corresponding to this thread.
502 Monitor* thread_lock() const { return &thread_lock_; }
503
504 // The reusable api local scope for this thread.
505 ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
507 ASSERT(value == nullptr || api_reusable_scope_ == nullptr);
508 api_reusable_scope_ = value;
509 }
510
511 // The api local scope for this thread, this where all local handles
512 // are allocated.
513 ApiLocalScope* api_top_scope() const { return api_top_scope_; }
514 void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
515 static intptr_t api_top_scope_offset() {
516 return OFFSET_OF(Thread, api_top_scope_);
517 }
518
519 void EnterApiScope();
520 void ExitApiScope();
521
523 return OFFSET_OF(Thread, double_truncate_round_supported_);
524 }
525
526 static intptr_t tsan_utils_offset() { return OFFSET_OF(Thread, tsan_utils_); }
527
528#if defined(USING_THREAD_SANITIZER)
529 uword exit_through_ffi() const { return exit_through_ffi_; }
530 TsanUtils* tsan_utils() const { return tsan_utils_; }
531#endif // defined(USING_THREAD_SANITIZER)
532
533 // The isolate that this thread is operating on, or nullptr if none.
534 Isolate* isolate() const { return isolate_; }
535 static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
536 static intptr_t isolate_group_offset() {
537 return OFFSET_OF(Thread, isolate_group_);
538 }
539
540 // The isolate group that this thread is operating on, or nullptr if none.
541 IsolateGroup* isolate_group() const { return isolate_group_; }
542
543 static intptr_t field_table_values_offset() {
544 return OFFSET_OF(Thread, field_table_values_);
545 }
546
548 return OFFSET_OF(Thread, shared_field_table_values_);
549 }
550
551 bool IsDartMutatorThread() const {
552 return scheduled_dart_mutator_isolate_ != nullptr;
553 }
554
555 // Returns the dart mutator [Isolate] this thread belongs to or nullptr.
556 //
557 // `isolate()` in comparison can return
558 // - `nullptr` for dart mutators (e.g. if the mutator runs under
559 // [NoActiveIsolateScope])
560 // - an incorrect isolate (e.g. if [ActiveIsolateScope] is used to seemingly
561 // enter another isolate)
563 return scheduled_dart_mutator_isolate_;
564 }
565
566#if defined(DEBUG)
567 bool IsInsideCompiler() const { return inside_compiler_; }
568#endif
569
570 // Offset of Dart TimelineStream object.
571 static intptr_t dart_stream_offset() {
572 return OFFSET_OF(Thread, dart_stream_);
573 }
574
575 // Offset of the Dart VM Service Extension StreamInfo object.
577 return OFFSET_OF(Thread, service_extension_stream_);
578 }
579
580 // Is |this| executing Dart code?
581 bool IsExecutingDartCode() const;
582
583 // Has |this| exited Dart code?
584 bool HasExitedDartCode() const;
585
586 bool HasCompilerState() const { return compiler_state_ != nullptr; }
587
590 return *compiler_state_;
591 }
592
594 ASSERT(isolate_group_ != nullptr);
595 return hierarchy_info_;
596 }
597
599 ASSERT(isolate_group_ != nullptr);
600 ASSERT((hierarchy_info_ == nullptr && value != nullptr) ||
601 (hierarchy_info_ != nullptr && value == nullptr));
602 hierarchy_info_ = value;
603 }
604
606 ASSERT(isolate_group_ != nullptr);
607 return type_usage_info_;
608 }
609
611 ASSERT(isolate_group_ != nullptr);
612 ASSERT((type_usage_info_ == nullptr && value != nullptr) ||
613 (type_usage_info_ != nullptr && value == nullptr));
614 type_usage_info_ = value;
615 }
616
617 CompilerTimings* compiler_timings() const { return compiler_timings_; }
618
620 compiler_timings_ = stats;
621 }
622
623 int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
625 ASSERT(no_callback_scope_depth_ < INT_MAX);
626 no_callback_scope_depth_ += 1;
627 }
629 ASSERT(no_callback_scope_depth_ > 0);
630 no_callback_scope_depth_ -= 1;
631 }
632
633 bool force_growth() const { return force_growth_scope_depth_ != 0; }
635 ASSERT(force_growth_scope_depth_ < INT_MAX);
636 force_growth_scope_depth_ += 1;
637 }
639 ASSERT(force_growth_scope_depth_ > 0);
640 force_growth_scope_depth_ -= 1;
641 }
642
643 bool is_unwind_in_progress() const { return is_unwind_in_progress_; }
644
646 is_unwind_in_progress_ = true;
648 }
649
650#if defined(DEBUG)
651 void EnterCompiler() {
652 ASSERT(!IsInsideCompiler());
653 inside_compiler_ = true;
654 }
655
656 void LeaveCompiler() {
657 ASSERT(IsInsideCompiler());
658 inside_compiler_ = false;
659 }
660#endif
661
662 void StoreBufferAddObject(ObjectPtr obj);
663 void StoreBufferAddObjectGC(ObjectPtr obj);
664#if defined(TESTING)
665 bool StoreBufferContains(ObjectPtr obj) const {
666 return store_buffer_block_->Contains(obj);
667 }
668#endif
672 static intptr_t store_buffer_block_offset() {
673 return OFFSET_OF(Thread, store_buffer_block_);
674 }
675
676 bool is_marking() const { return old_marking_stack_block_ != nullptr; }
685 return OFFSET_OF(Thread, old_marking_stack_block_);
686 }
688 return OFFSET_OF(Thread, new_marking_stack_block_);
689 }
690
691 uword top_exit_frame_info() const { return top_exit_frame_info_; }
693 top_exit_frame_info_ = top_exit_frame_info;
694 }
695 static intptr_t top_exit_frame_info_offset() {
696 return OFFSET_OF(Thread, top_exit_frame_info_);
697 }
698
699 Heap* heap() const;
700
701 // The TLAB memory boundaries.
702 //
703 // When the heap sampling profiler is enabled, we use the TLAB boundary to
704 // trigger slow path allocations so we can take a sample. This means that
705 // true_end() >= end(), where true_end() is the actual end address of the
706 // TLAB and end() is the chosen sampling boundary for the thread.
707 //
708 // When the heap sampling profiler is disabled, true_end() == end().
709 uword top() const { return top_; }
710 uword end() const { return end_; }
711 uword true_end() const { return true_end_; }
712 void set_top(uword top) { top_ = top; }
713 void set_end(uword end) { end_ = end; }
714 void set_true_end(uword true_end) { true_end_ = true_end; }
715 static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
716 static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
717
718 int32_t no_safepoint_scope_depth() const {
719#if defined(DEBUG)
720 return no_safepoint_scope_depth_;
721#else
722 return 0;
723#endif
724 }
725
727#if defined(DEBUG)
728 ASSERT(no_safepoint_scope_depth_ < INT_MAX);
729 no_safepoint_scope_depth_ += 1;
730#endif
731 }
732
734#if defined(DEBUG)
735 ASSERT(no_safepoint_scope_depth_ > 0);
736 no_safepoint_scope_depth_ -= 1;
737#endif
738 }
739
740 bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
741
743 return stopped_mutators_scope_depth_ > 0;
744 }
745
746#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
747 static intptr_t member_name##offset() { \
748 return OFFSET_OF(Thread, member_name); \
749 }
751#undef DEFINE_OFFSET_METHOD
752
754 ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
755 intptr_t index = 0;
756 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
757 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
758 if (i == reg) break;
759 ++index;
760 }
761 return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
762 index * sizeof(uword);
763 }
764
766 intptr_t index = 0;
767 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
768 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
769 if (i == reg) {
770 return index * kStoreBufferWrapperSize;
771 }
772 ++index;
773 }
774 UNREACHABLE();
775 return 0;
776 }
777
778#define DEFINE_OFFSET_METHOD(name) \
779 static intptr_t name##_entry_point_offset() { \
780 return OFFSET_OF(Thread, name##_entry_point_); \
781 }
783#undef DEFINE_OFFSET_METHOD
784
785#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
786 static intptr_t name##_entry_point_offset() { \
787 return OFFSET_OF(Thread, name##_entry_point_); \
788 }
790#undef DEFINE_OFFSET_METHOD
791
792 ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
793 void set_global_object_pool(ObjectPoolPtr raw_value) {
794 global_object_pool_ = raw_value;
795 }
796
797 const uword* dispatch_table_array() const { return dispatch_table_array_; }
798 void set_dispatch_table_array(const uword* array) {
799 dispatch_table_array_ = array;
800 }
801
802 static bool CanLoadFromThread(const Object& object);
803 static intptr_t OffsetFromThread(const Object& object);
804 static bool ObjectAtOffset(intptr_t offset, Object* object);
805 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
806
807#define DEFINE_OFFSET_METHOD(name) \
808 static intptr_t name##_entry_point_offset() { \
809 return OFFSET_OF(Thread, name##_entry_point_); \
810 }
812#undef DEFINE_OFFSET_METHOD
813
814#if defined(DEBUG)
815 // For asserts only. Has false positives when running with a simulator or
816 // SafeStack.
817 bool TopErrorHandlerIsSetJump() const;
818 bool TopErrorHandlerIsExitFrame() const;
819#endif
820
821 uword vm_tag() const { return vm_tag_; }
822 void set_vm_tag(uword tag) { vm_tag_ = tag; }
823 static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
824
826 return unboxed_runtime_arg_.int64_storage[0];
827 }
829 unboxed_runtime_arg_.int64_storage[0] = value;
830 }
832 return unboxed_runtime_arg_.int64_storage[1];
833 }
835 unboxed_runtime_arg_.int64_storage[1] = value;
836 }
838 return unboxed_runtime_arg_.double_storage[0];
839 }
841 unboxed_runtime_arg_.double_storage[0] = value;
842 }
844 return unboxed_runtime_arg_;
845 }
847 unboxed_runtime_arg_ = value;
848 }
849 static intptr_t unboxed_runtime_arg_offset() {
850 return OFFSET_OF(Thread, unboxed_runtime_arg_);
851 }
852
853 static intptr_t global_object_pool_offset() {
854 return OFFSET_OF(Thread, global_object_pool_);
855 }
856
857 static intptr_t dispatch_table_array_offset() {
858 return OFFSET_OF(Thread, dispatch_table_array_);
859 }
860
861 ObjectPtr active_exception() const { return active_exception_; }
862 void set_active_exception(const Object& value);
863 static intptr_t active_exception_offset() {
864 return OFFSET_OF(Thread, active_exception_);
865 }
866
867 ObjectPtr active_stacktrace() const { return active_stacktrace_; }
869 static intptr_t active_stacktrace_offset() {
870 return OFFSET_OF(Thread, active_stacktrace_);
871 }
872
873 uword resume_pc() const { return resume_pc_; }
874 void set_resume_pc(uword value) { resume_pc_ = value; }
875 static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
876
877 ErrorPtr sticky_error() const;
878 void set_sticky_error(const Error& value);
879 void ClearStickyError();
881
882#if defined(DEBUG)
883#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
884 void set_reusable_##object##_handle_scope_active(bool value) { \
885 reusable_##object##_handle_scope_active_ = value; \
886 } \
887 bool reusable_##object##_handle_scope_active() const { \
888 return reusable_##object##_handle_scope_active_; \
889 }
890 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
891#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
892
893 bool IsAnyReusableHandleScopeActive() const {
894#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
895 if (reusable_##object##_handle_scope_active_) { \
896 return true; \
897 }
898 REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
899 return false;
900#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
901 }
902#endif // defined(DEBUG)
903
905
906#define REUSABLE_HANDLE(object) \
907 object& object##Handle() const { return *object##_handle_; }
909#undef REUSABLE_HANDLE
910
912 const uword mask = AtSafepointBits(level);
913 return (state & mask) == mask;
914 }
915
916 // Whether the current thread is owning any safepoint level.
917 bool IsAtSafepoint() const {
918 // Owning a higher level safepoint implies owning the lower levels as well.
920 }
922 return IsAtSafepoint(level, safepoint_state_.load());
923 }
925 ASSERT(thread_lock()->IsOwnedByCurrentThread());
927 if (value) {
928 safepoint_state_ |= AtSafepointBits(level);
929 } else {
930 safepoint_state_ &= ~AtSafepointBits(level);
931 }
932 }
934 ASSERT(thread_lock()->IsOwnedByCurrentThread());
936 }
937 bool IsSafepointRequested() const {
939 }
941 const uword state = safepoint_state_.load();
942 for (intptr_t i = level; i >= 0; --i) {
944 return true;
945 }
946 return false;
947 }
949 ASSERT(thread_lock()->IsOwnedByCurrentThread());
950 if (level > current_safepoint_level()) return false;
951 const uword state = safepoint_state_.load();
953 }
954
956 switch (level) {
958 return (state & SafepointRequestedField::mask_in_place()) != 0;
960 return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
962 return (state & ReloadSafepointRequestedField::mask_in_place()) != 0;
963 default:
964 UNREACHABLE();
965 }
966 }
967
968 void BlockForSafepoint();
969
971 ASSERT(thread_lock()->IsOwnedByCurrentThread());
972
973 uword mask = 0;
974 switch (level) {
976 mask = SafepointRequestedField::mask_in_place();
977 break;
979 mask = DeoptSafepointRequestedField::mask_in_place();
980 break;
982 mask = ReloadSafepointRequestedField::mask_in_place();
983 break;
984 default:
985 UNREACHABLE();
986 }
987
988 if (value) {
989 // acquire pulls from the release in TryEnterSafepoint.
990 return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
991 } else {
992 // release pushes to the acquire in TryExitSafepoint.
993 return safepoint_state_.fetch_and(~mask, std::memory_order_release);
994 }
995 }
998 }
1000 return BlockedForSafepointField::decode(safepoint_state_);
1001 }
1003 ASSERT(thread_lock()->IsOwnedByCurrentThread());
1004 safepoint_state_ =
1005 BlockedForSafepointField::update(value, safepoint_state_);
1006 }
1007 bool BypassSafepoints() const {
1008 return BypassSafepointsField::decode(safepoint_state_);
1009 }
1011 return BypassSafepointsField::update(value, state);
1012 }
1014 return UnwindErrorInProgressField::decode(safepoint_state_);
1015 }
1017 const uword mask = UnwindErrorInProgressField::mask_in_place();
1018 if (value) {
1019 safepoint_state_.fetch_or(mask);
1020 } else {
1021 safepoint_state_.fetch_and(~mask);
1022 }
1023 }
1024
1025 bool OwnsGCSafepoint() const;
1026 bool OwnsReloadSafepoint() const;
1027 bool OwnsDeoptSafepoint() const;
1028 bool OwnsSafepoint() const;
1029 bool CanAcquireSafepointLocks() const;
1030
1031 uword safepoint_state() { return safepoint_state_; }
1032
1039
1041 return static_cast<ExecutionState>(execution_state_);
1042 }
1043 // Normally execution state is only accessed for the current thread.
1046 return static_cast<ExecutionState>(execution_state_);
1047 }
1049 execution_state_ = static_cast<uword>(state);
1050 }
1051 static intptr_t execution_state_offset() {
1052 return OFFSET_OF(Thread, execution_state_);
1053 }
1054
1055 virtual bool MayAllocateHandles() {
1056 return (execution_state() == kThreadInVM) ||
1058 }
1059
1061 return (0 << AtSafepointField::shift()) |
1062 (0 << AtDeoptSafepointField::shift());
1063 }
1065 return (1 << AtSafepointField::shift()) |
1066 (1 << AtDeoptSafepointField::shift());
1067 }
1068
1070 uword old_state = 0;
1071 uword new_state = AtSafepointBits(current_safepoint_level());
1072 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1073 std::memory_order_release);
1074 }
1075
1078 // First try a fast update of the thread state to indicate it is at a
1079 // safepoint.
1080 if (!TryEnterSafepoint()) {
1081 // Fast update failed which means we could potentially be in the middle
1082 // of a safepoint operation.
1083 EnterSafepointUsingLock();
1084 }
1085 }
1086
1088 uword old_state = AtSafepointBits(current_safepoint_level());
1089 uword new_state = 0;
1090 return safepoint_state_.compare_exchange_strong(old_state, new_state,
1091 std::memory_order_acquire);
1092 }
1093
1095 // First try a fast update of the thread state to indicate it is not at a
1096 // safepoint anymore.
1097 if (!TryExitSafepoint()) {
1098 // Fast update failed which means we could potentially be in the middle
1099 // of a safepoint operation.
1100 ExitSafepointUsingLock();
1101 }
1102 }
1103
1105 // If we are in a runtime call that doesn't support lazy deopt, we will only
1106 // respond to gc safepointing requests.
1108 if (IsSafepointRequested()) {
1110 }
1111 }
1112
1113 Thread* next() const { return next_; }
1114
1115 // Visit all object pointers.
1117 ValidationPolicy validate_frames);
1120
1121 bool IsValidHandle(Dart_Handle object) const;
1122 bool IsValidLocalHandle(Dart_Handle object) const;
1123 intptr_t CountLocalHandles() const;
1124 int ZoneSizeInBytes() const;
1125 void UnwindScopes(uword stack_marker);
1126
1127 void InitVMConstants();
1128
1129 int64_t GetNextTaskId() { return next_task_id_++; }
1130 static intptr_t next_task_id_offset() {
1131 return OFFSET_OF(Thread, next_task_id_);
1132 }
1133 Random* random() { return &thread_random_; }
1134 static intptr_t random_offset() { return OFFSET_OF(Thread, thread_random_); }
1135
1136#ifndef PRODUCT
1138#endif
1139
1140#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1141 HeapProfileSampler& heap_sampler() { return heap_sampler_; }
1142#endif
1143
1144 PendingDeopts& pending_deopts() { return pending_deopts_; }
1145
1147 if (runtime_call_deopt_ability_ ==
1149 return SafepointLevel::kGC;
1150 }
1151 if (no_reload_scope_depth_ > 0 || allow_reload_scope_depth_ <= 0) {
1153 }
1155 }
1156
1157 private:
1158 template <class T>
1159 T* AllocateReusableHandle();
1160
1161 enum class RestoreWriteBarrierInvariantOp {
1162 kAddToRememberedSet,
1163 kAddToDeferredMarkingStack
1164 };
1166 void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
1167
1168 // Set the current compiler state and return the previous compiler state.
1169 CompilerState* SetCompilerState(CompilerState* state) {
1170 CompilerState* previous = compiler_state_;
1171 compiler_state_ = state;
1172 return previous;
1173 }
1174
1175 // Accessed from generated code.
1176 // ** This block of fields must come first! **
1177 // For AOT cross-compilation, we rely on these members having the same offsets
1178 // in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
1179 // We use only word-sized fields to avoid differences in struct packing on the
1180 // different architectures. See also CheckOffsets in dart.cc.
1181 volatile RelaxedAtomic<uword> stack_limit_ = 0;
1182 uword write_barrier_mask_;
1183#if defined(DART_COMPRESSED_POINTERS)
1184 uword heap_base_ = 0;
1185#endif
1186 uword top_ = 0;
1187 uword end_ = 0;
1188 const uword* dispatch_table_array_ = nullptr;
1189 ObjectPtr* field_table_values_ = nullptr;
1190 ObjectPtr* shared_field_table_values_ = nullptr;
1191
1192 // Offsets up to this point can all fit in a byte on X64. All of the above
1193 // fields are very abundantly accessed from code. Thus, keeping them first
1194 // is important for code size (although code size on X64 is not a priority).
1195
1196// State that is cached in the TLS for fast access in generated code.
1197#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
1198 type_name member_name;
1200#undef DECLARE_MEMBERS
1201
1202#define DECLARE_MEMBERS(name) uword name##_entry_point_;
1204#undef DECLARE_MEMBERS
1205
1206#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
1208#undef DECLARE_MEMBERS
1209
1210 uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
1211
1212#define DECLARE_MEMBERS(name) uword name##_entry_point_ = 0;
1214#undef DECLARE_MEMBERS
1215
1216 Isolate* isolate_ = nullptr;
1217 IsolateGroup* isolate_group_ = nullptr;
1218
1219 uword saved_stack_limit_ = OSThread::kInvalidStackLimit;
1220 // The mutator uses this to indicate it wants to OSR (by
1221 // setting [Thread::kOsrRequest]) before going to runtime which will see this
1222 // bit.
1223 uword stack_overflow_flags_ = 0;
1224 uword volatile top_exit_frame_info_ = 0;
1225 StoreBufferBlock* store_buffer_block_ = nullptr;
1226 MarkingStackBlock* old_marking_stack_block_ = nullptr;
1227 MarkingStackBlock* new_marking_stack_block_ = nullptr;
1228 MarkingStackBlock* deferred_marking_stack_block_ = nullptr;
1229 uword volatile vm_tag_ = 0;
1230 // Memory locations dedicated for passing unboxed int64 and double
1231 // values from generated code to runtime.
1232 // TODO(dartbug.com/33549): Clean this up when unboxed values
1233 // could be passed as arguments.
1234 ALIGN8 simd128_value_t unboxed_runtime_arg_;
1235
1236 // JumpToExceptionHandler state:
1237 ObjectPtr active_exception_;
1238 ObjectPtr active_stacktrace_;
1239
1240 ObjectPoolPtr global_object_pool_;
1241 uword resume_pc_;
1242 uword saved_shadow_call_stack_ = 0;
1243
1244 /*
1245 * The execution state for a thread.
1246 *
1247 * Potential execution states a thread could be in:
1248 * kThreadInGenerated - The thread is running jitted dart/stub code.
1249 * kThreadInVM - The thread is running VM code.
1250 * kThreadInNative - The thread is running native code.
1251 * kThreadInBlockedState - The thread is blocked waiting for a resource.
1252 *
1253 * Warning: Execution state doesn't imply the safepoint state. It's possible
1254 * to be in [kThreadInNative] and still not be at-safepoint (e.g. due to a
1255 * pending Dart_TypedDataAcquire() that increases no-callback-scope)
1256 */
1257 uword execution_state_;
1258
1259 /*
1260 * Stores
1261 *
1262 * - whether the thread is at a safepoint (current thread sets these)
1263 * [AtSafepointField]
1264 * [AtDeoptSafepointField]
1265 * [AtReloadSafepointField]
1266 *
1267 * - whether the thread is requested to safepoint (other thread sets these)
1268 * [SafepointRequestedField]
1269 * [DeoptSafepointRequestedField]
1270 * [ReloadSafepointRequestedField]
1271 *
1272 * - whether the thread is blocked due to safepoint request and needs to
1273 * be resumed after safepoint is done (current thread sets this)
1274 * [BlockedForSafepointField]
1275 *
1276 * - whether the thread should be ignored for safepointing purposes
1277 * [BypassSafepointsField]
1278 *
1279 * - whether the isolate running this thread has triggered an unwind error,
1280 * which requires enforced exit on a transition from native back to
1281 * generated.
1282 * [UnwindErrorInProgressField]
1283 */
1284 std::atomic<uword> safepoint_state_;
1285 uword exit_through_ffi_ = 0;
1286 ApiLocalScope* api_top_scope_;
1287 uint8_t double_truncate_round_supported_;
1288 ALIGN8 int64_t next_task_id_;
1289 ALIGN8 Random thread_random_;
1290
1291 TsanUtils* tsan_utils_ = nullptr;
1292
1293 // ---- End accessed from generated code. ----
1294
1295 // The layout of Thread object up to this point should not depend
1296 // on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code.
1297 // The code is generated without DART_PRECOMPILED_RUNTIME, but used with
1298 // DART_PRECOMPILED_RUNTIME.
1299
1300 uword true_end_ = 0;
1301 TaskKind task_kind_;
1302 TimelineStream* const dart_stream_;
1303 StreamInfo* const service_extension_stream_;
1304 mutable Monitor thread_lock_;
1305 ApiLocalScope* api_reusable_scope_;
1306 int32_t no_callback_scope_depth_;
1307 int32_t force_growth_scope_depth_ = 0;
1308 intptr_t no_reload_scope_depth_ = 0;
1309 intptr_t allow_reload_scope_depth_ = 0;
1310 intptr_t stopped_mutators_scope_depth_ = 0;
1311#if defined(DEBUG)
1312 int32_t no_safepoint_scope_depth_;
1313#endif
1314 VMHandles reusable_handles_;
1315 int32_t stack_overflow_count_;
1316 uint32_t runtime_call_count_ = 0;
1317
1318 // Deoptimization of stack frames.
1319 RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
1321 PendingDeopts pending_deopts_;
1322
1323 // Compiler state:
1324 CompilerState* compiler_state_ = nullptr;
1325 HierarchyInfo* hierarchy_info_;
1326 TypeUsageInfo* type_usage_info_;
1327 NoActiveIsolateScope* no_active_isolate_scope_ = nullptr;
1328
1329 CompilerTimings* compiler_timings_ = nullptr;
1330
1331 ErrorPtr sticky_error_;
1332
1333 ObjectPtr* field_table_values() const { return field_table_values_; }
1334 ObjectPtr* shared_field_table_values() const {
1335 return shared_field_table_values_;
1336 }
1337
1338// Reusable handles support.
1339#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
1341#undef REUSABLE_HANDLE_FIELDS
1342
1343#if defined(DEBUG)
1344#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
1345 bool reusable_##object##_handle_scope_active_;
1346 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
1347#undef REUSABLE_HANDLE_SCOPE_VARIABLE
1348#endif // defined(DEBUG)
1349
1350 class AtSafepointField : public BitField<uword, bool, 0, 1> {};
1351 class SafepointRequestedField
1352 : public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
1353
1354 class AtDeoptSafepointField
1355 : public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
1356 class DeoptSafepointRequestedField
1357 : public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
1358
1359 class AtReloadSafepointField
1360 : public BitField<uword,
1361 bool,
1362 DeoptSafepointRequestedField::kNextBit,
1363 1> {};
1364 class ReloadSafepointRequestedField
1365 : public BitField<uword, bool, AtReloadSafepointField::kNextBit, 1> {};
1366
1367 class BlockedForSafepointField
1368 : public BitField<uword,
1369 bool,
1370 ReloadSafepointRequestedField::kNextBit,
1371 1> {};
1372 class BypassSafepointsField
1373 : public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
1374 class UnwindErrorInProgressField
1375 : public BitField<uword, bool, BypassSafepointsField::kNextBit, 1> {};
1376
1377 static uword AtSafepointBits(SafepointLevel level) {
1378 switch (level) {
1380 return AtSafepointField::mask_in_place();
1382 return AtSafepointField::mask_in_place() |
1383 AtDeoptSafepointField::mask_in_place();
1385 return AtSafepointField::mask_in_place() |
1386 AtDeoptSafepointField::mask_in_place() |
1387 AtReloadSafepointField::mask_in_place();
1388 default:
1389 UNREACHABLE();
1390 }
1391 }
1392
1393#if defined(USING_SAFE_STACK)
1394 uword saved_safestack_limit_;
1395#endif
1396
1397 Thread* next_; // Used to chain the thread structures in an isolate.
1398 Isolate* scheduled_dart_mutator_isolate_ = nullptr;
1399
1400 bool is_unwind_in_progress_ = false;
1401
1402#if defined(DEBUG)
1403 bool inside_compiler_ = false;
1404#endif
1405
1406#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1407 HeapProfileSampler heap_sampler_;
1408#endif
1409
1410 explicit Thread(bool is_vm_isolate);
1411
1412 void StoreBufferRelease(
1414 void StoreBufferAcquire();
1415
1416 void OldMarkingStackRelease();
1417 void OldMarkingStackAcquire();
1418 void NewMarkingStackRelease();
1419 void NewMarkingStackAcquire();
1420 void DeferredMarkingStackRelease();
1421 void DeferredMarkingStackAcquire();
1422
1423 void AcquireMarkingStacks();
1424 void ReleaseMarkingStacks();
1425 void FlushMarkingStacks();
1426
1427 void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
1428 void EnterSafepointUsingLock();
1429 void ExitSafepointUsingLock();
1430
1431 void SetupState(TaskKind kind);
1432 void ResetState();
1433
1434 void SetupMutatorState(TaskKind kind);
1435 void ResetMutatorState();
1436
1437 void SetupDartMutatorState(Isolate* isolate);
1438 void SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group);
1439 void ResetDartMutatorState(Isolate* isolate);
1440
1441 static void SuspendDartMutatorThreadInternal(Thread* thread,
1442 VMTag::VMTagId tag);
1443 static void ResumeDartMutatorThreadInternal(Thread* thread);
1444
1445 static void SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag);
1446 static void ResumeThreadInternal(Thread* thread);
1447
1448 // Adds a new active mutator thread to thread registry while associating it
1449 // with the given isolate (group).
1450 //
1451 // All existing safepoint operations are waited for before adding the thread
1452 // to the thread registry.
1453 //
1454 // => Anyone who iterates the active threads will first have to get us to
1455 // safepoint (but can access `Thread::isolate()`).
1456 static Thread* AddActiveThread(IsolateGroup* group,
1458 bool is_dart_mutator,
1459 bool bypass_safepoint);
1460
1461 // Releases a active mutator threads from the thread registry.
1462 //
1463 // Thread needs to be at-safepoint.
1464 static void FreeActiveThread(Thread* thread, bool bypass_safepoint);
1465
1466 static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
1467
1468#define REUSABLE_FRIEND_DECLARATION(name) \
1469 friend class Reusable##name##HandleScope;
1471#undef REUSABLE_FRIEND_DECLARATION
1472
1473 friend class ApiZone;
1475 friend class InterruptChecker;
1476 friend class Isolate;
1477 friend class IsolateGroup;
1479 friend class NoReloadScope;
1481 friend class Simulator;
1482 friend class StackZone;
1484 friend class ThreadRegistry;
1485 friend class CompilerState;
1487 friend class FieldTable;
1489 friend class Dart; // Calls SetupCachedEntryPoints after snapshot reading
1490 friend class
1491 TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
1492 friend class
1493 TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
1494 friend class MonitorLocker; // ExitSafepointUsingLock
1496 const char*,
1497 char**);
1499};
1500
1502 public:
1504 : StackResource(thread) {
1505 // We cannot have nested calls into the VM without deopt support.
1506 ASSERT(thread->runtime_call_deopt_ability_ ==
1508 thread->runtime_call_deopt_ability_ = kind;
1509 }
1511 thread()->runtime_call_deopt_ability_ =
1513 }
1514
1515 private:
1516 Thread* thread() {
1517 return reinterpret_cast<Thread*>(StackResource::thread());
1518 }
1519};
1520
1521#if defined(DART_HOST_OS_WINDOWS)
1522// Clears the state of the current thread and frees the allocation.
1523void WindowsThreadCleanUp();
1524#endif
1525
1526#if !defined(PRODUCT)
1527// Disable thread interrupts.
1529 public:
1532};
1533#else
1535 public:
1537 : StackResource(thread) {}
1539};
1540#endif // !defined(PRODUCT)
1541
1542// Within a NoSafepointScope, the thread must not reach any safepoint. Used
1543// around code that manipulates raw object pointers directly without handles.
1544#if defined(DEBUG)
1545class NoSafepointScope : public ThreadStackResource {
1546 public:
1547 explicit NoSafepointScope(Thread* thread = nullptr)
1548 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1549 this->thread()->IncrementNoSafepointScopeDepth();
1550 }
1551 ~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
1552
1553 private:
1554 DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
1555};
1556#else // defined(DEBUG)
1558 public:
1559 explicit NoSafepointScope(Thread* thread = nullptr) {}
1560
1561 private:
1562 DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
1563};
1564#endif // defined(DEBUG)
1565
1566// Disables initiating a reload operation as well as participating in another
1567// threads reload operation.
1568//
1569// Reload triggered by a mutator thread happens by sending all other mutator
1570// threads (that are running) OOB messages to check into a safepoint. The thread
1571// initiating the reload operation will block until all mutators are at a reload
1572// safepoint.
1573//
1574// When running under this scope, the processing of those OOB messages will
1575// ignore reload safepoint checkin requests. Yet we'll have to ensure that the
1576// dropped message is still acted upon.
1577//
1578// => To solve this we make the [~NoReloadScope] destructor resend a new reload
1579// OOB request to itself (the [~NoReloadScope] destructor is not necessarily at
1580// well-defined place where reload can happen - those places will explicitly
1581// opt-in via [ReloadParticipationScope]).
1582//
1584 public:
1585 explicit NoReloadScope(Thread* thread);
1587
1588 private:
1589 DISALLOW_COPY_AND_ASSIGN(NoReloadScope);
1590};
1591
1592// Allows triggering reload safepoint operations as well as participating in
1593// reload operations (at safepoint checks).
1594//
1595// By-default safepoint checkins will not participate in reload operations, as
1596// reload has to happen at very well-defined places. This scope is intended
1597// for those places where we explicitly want to allow safepoint checkins to
1598// participate in reload operations (triggered by other threads).
1599//
1600// If there is any [NoReloadScope] active we will still disable the safepoint
1601// checkins to participate in reload.
1602//
1603// We also require the thread inititating a reload operation to explicitly
1604// opt-in via this scope.
1606 public:
1607 explicit RawReloadParticipationScope(Thread* thread) : thread_(thread) {
1608#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1609 if (thread->allow_reload_scope_depth_ == 0) {
1611 }
1612 thread->allow_reload_scope_depth_++;
1613 ASSERT(thread->allow_reload_scope_depth_ >= 0);
1614#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1615 }
1616
1618#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1619 thread_->allow_reload_scope_depth_ -= 1;
1620 ASSERT(thread_->allow_reload_scope_depth_ >= 0);
1621 if (thread_->allow_reload_scope_depth_ == 0) {
1623 }
1624#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1625 }
1626
1627 private:
1628 Thread* thread_;
1629
1630 DISALLOW_COPY_AND_ASSIGN(RawReloadParticipationScope);
1631};
1632
1635
1637 public:
1639#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1640 thread->stopped_mutators_scope_depth_++;
1641 ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
1642#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1643 }
1644
1646#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1647 thread()->stopped_mutators_scope_depth_ -= 1;
1648 ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
1649#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1650 }
1651
1652 private:
1653 DISALLOW_COPY_AND_ASSIGN(StoppedMutatorsScope);
1654};
1655
1656// Within a EnterCompilerScope, the thread must operate on cloned fields.
1657#if defined(DEBUG)
1658class EnterCompilerScope : public ThreadStackResource {
1659 public:
1660 explicit EnterCompilerScope(Thread* thread = nullptr)
1661 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1662 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1663 if (!previously_is_inside_compiler_) {
1664 this->thread()->EnterCompiler();
1665 }
1666 }
1667 ~EnterCompilerScope() {
1668 if (!previously_is_inside_compiler_) {
1669 thread()->LeaveCompiler();
1670 }
1671 }
1672
1673 private:
1674 bool previously_is_inside_compiler_;
1675 DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
1676};
1677#else // defined(DEBUG)
1679 public:
1680 explicit EnterCompilerScope(Thread* thread = nullptr) {}
1681
1682 private:
1683 DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
1684};
1685#endif // defined(DEBUG)
1686
1687// Within a LeaveCompilerScope, the thread must operate on cloned fields.
1688#if defined(DEBUG)
1689class LeaveCompilerScope : public ThreadStackResource {
1690 public:
1691 explicit LeaveCompilerScope(Thread* thread = nullptr)
1692 : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
1693 previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
1694 if (previously_is_inside_compiler_) {
1695 this->thread()->LeaveCompiler();
1696 }
1697 }
1698 ~LeaveCompilerScope() {
1699 if (previously_is_inside_compiler_) {
1700 thread()->EnterCompiler();
1701 }
1702 }
1703
1704 private:
1705 bool previously_is_inside_compiler_;
1706 DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
1707};
1708#else // defined(DEBUG)
1710 public:
1711 explicit LeaveCompilerScope(Thread* thread = nullptr) {}
1712
1713 private:
1714 DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
1715};
1716#endif // defined(DEBUG)
1717
1718} // namespace dart
1719
1720#endif // RUNTIME_VM_THREAD_H_
#define UNREACHABLE()
Definition: assert.h:248
DisableThreadInterruptsScope(Thread *thread)
Definition: thread.cc:1481
EnterCompilerScope(Thread *thread=nullptr)
Definition: thread.h:1680
LeaveCompilerScope(Thread *thread=nullptr)
Definition: thread.h:1711
NoReloadScope(Thread *thread)
Definition: thread.cc:1499
NoSafepointScope(Thread *thread=nullptr)
Definition: thread.h:1559
static const uword kInvalidStackLimit
Definition: os_thread.h:88
static void SetCurrentTLS(BaseThread *value)
Definition: os_thread.cc:318
static ThreadState * CurrentVMThread()
Definition: os_thread.h:188
RawReloadParticipationScope(Thread *thread)
Definition: thread.h:1607
T load(std::memory_order order=std::memory_order_relaxed) const
Definition: atomic.h:21
RuntimeCallDeoptScope(Thread *thread, RuntimeCallDeoptAbility kind)
Definition: thread.h:1503
virtual ~RuntimeCallDeoptScope()
Definition: thread.h:1510
ThreadState * thread() const
Definition: allocation.h:33
StoppedMutatorsScope(Thread *thread)
Definition: thread.h:1638
void UnwindScopes(uword stack_marker)
Definition: thread.cc:1328
bool IsBlockedForSafepoint() const
Definition: thread.h:999
bool HasCompilerState() const
Definition: thread.h:586
HierarchyInfo * hierarchy_info() const
Definition: thread.h:593
void set_execution_state(ExecutionState state)
Definition: thread.h:1048
void set_compiler_timings(CompilerTimings *stats)
Definition: thread.h:619
void ScheduleInterrupts(uword interrupt_bits)
Definition: thread.cc:710
bool CanAcquireSafepointLocks() const
Definition: thread.cc:1372
static uword full_safepoint_state_unacquired()
Definition: thread.h:1060
void set_type_usage_info(TypeUsageInfo *value)
Definition: thread.h:610
void set_hierarchy_info(HierarchyInfo *value)
Definition: thread.h:598
static intptr_t execution_state_offset()
Definition: thread.h:1051
void StartUnwindError()
Definition: thread.h:645
void set_vm_tag(uword tag)
Definition: thread.h:822
static intptr_t stack_limit_offset()
Definition: thread.h:402
void set_unboxed_simd128_runtime_arg(simd128_value_t value)
Definition: thread.h:846
friend Isolate * CreateWithinExistingIsolateGroup(IsolateGroup *, const char *, char **)
const uword * dispatch_table_array() const
Definition: thread.h:797
void MarkingStackAddObject(ObjectPtr obj)
Definition: thread.cc:847
uword resume_pc() const
Definition: thread.h:873
static intptr_t write_barrier_mask_offset()
Definition: thread.h:437
@ kOsrRequest
Definition: thread.h:425
bool IsSafepointRequested(SafepointLevel level) const
Definition: thread.h:940
uword safepoint_state()
Definition: thread.h:1031
NO_SANITIZE_THREAD ExecutionState execution_state_cross_thread_for_testing() const
Definition: thread.h:1045
ApiLocalScope * api_top_scope() const
Definition: thread.h:513
void set_active_stacktrace(const Object &value)
Definition: thread.cc:228
uword vm_tag() const
Definition: thread.h:821
void DecrementNoSafepointScopeDepth()
Definition: thread.h:733
void RememberLiveTemporaries()
Definition: thread.cc:1149
bool OwnsSafepoint() const
Definition: thread.cc:1367
bool force_growth() const
Definition: thread.h:633
@ kMessageInterrupt
Definition: thread.h:489
@ kVMInterrupt
Definition: thread.h:488
@ kInterruptsMask
Definition: thread.h:491
uword GetAndClearInterrupts()
Definition: thread.cc:724
void DeferredMarkingStackAddObject(ObjectPtr obj)
Definition: thread.cc:871
void set_top_exit_frame_info(uword top_exit_frame_info)
Definition: thread.h:692
static bool IsBlockedForSafepoint(uword state)
Definition: thread.h:996
ObjectPoolPtr global_object_pool() const
Definition: thread.h:792
bool HasScheduledInterrupts() const
Definition: thread.h:497
int32_t no_callback_scope_depth() const
Definition: thread.h:623
void PrintJSON(JSONStream *stream) const
uword saved_shadow_call_stack() const
Definition: thread.h:418
static intptr_t OffsetFromThread(const Object &object)
Definition: thread.cc:1178
static intptr_t safepoint_state_offset()
Definition: thread.h:459
static intptr_t write_barrier_wrappers_thread_offset(Register reg)
Definition: thread.h:753
static intptr_t vm_tag_offset()
Definition: thread.h:823
@ kCompilerTask
Definition: thread.h:348
@ kScavengerTask
Definition: thread.h:352
@ kMutatorTask
Definition: thread.h:347
@ kMarkerTask
Definition: thread.h:349
@ kSampleBlockTask
Definition: thread.h:353
@ kIncrementalCompactorTask
Definition: thread.h:354
@ kSweeperTask
Definition: thread.h:350
@ kUnknownTask
Definition: thread.h:346
@ kCompactorTask
Definition: thread.h:351
static Thread * Current()
Definition: thread.h:362
PendingDeopts & pending_deopts()
Definition: thread.h:1144
bool OwnsGCSafepoint() const
Definition: thread.cc:1352
void set_unboxed_int64_runtime_arg(int64_t value)
Definition: thread.h:828
static bool IsAtSafepoint(SafepointLevel level, uword state)
Definition: thread.h:911
bool IsValidLocalHandle(Dart_Handle object) const
Definition: thread.cc:1270
bool IsValidHandle(Dart_Handle object) const
Definition: thread.cc:1265
static intptr_t store_buffer_block_offset()
Definition: thread.h:672
int64_t GetNextTaskId()
Definition: thread.h:1129
void AssertEmptyThreadInvariants()
Definition: thread.cc:318
bool IsSafepointRequested() const
Definition: thread.h:937
ObjectPtr active_stacktrace() const
Definition: thread.h:867
static intptr_t saved_stack_limit_offset()
Definition: thread.h:407
void set_resume_pc(uword value)
Definition: thread.h:874
int32_t no_safepoint_scope_depth() const
Definition: thread.h:718
static intptr_t top_offset()
Definition: thread.h:715
static bool ObjectAtOffset(intptr_t offset, Object *object)
Definition: thread.cc:1205
void AssertNonMutatorInvariants()
Definition: thread.cc:270
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError()
Definition: thread.cc:245
static intptr_t service_extension_stream_offset()
Definition: thread.h:576
uword end() const
Definition: thread.h:710
void SetUnwindErrorInProgress(bool value)
Definition: thread.h:1016
friend class CompilerState
Definition: thread.h:1485
bool is_marking() const
Definition: thread.h:676
bool TryEnterSafepoint()
Definition: thread.h:1069
ErrorPtr HandleInterrupts()
Definition: thread.cc:739
void SetStackLimit(uword value)
Definition: thread.cc:690
@ kExitThroughRuntimeCall
Definition: thread.h:472
@ kDidNotExit
Definition: thread.h:466
@ kExitThroughFfi
Definition: thread.h:469
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: thread.cc:968
TypeUsageInfo * type_usage_info() const
Definition: thread.h:605
void IncrementNoCallbackScopeDepth()
Definition: thread.h:624
static intptr_t random_offset()
Definition: thread.h:1134
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
Definition: thread.h:453
static intptr_t isolate_offset()
Definition: thread.h:535
static intptr_t new_marking_stack_block_offset()
Definition: thread.h:687
Monitor * thread_lock() const
Definition: thread.h:502
static intptr_t active_exception_offset()
Definition: thread.h:863
void set_end(uword end)
Definition: thread.h:713
static void ExitIsolateGroupAsNonMutator()
Definition: thread.cc:526
ApiLocalScope * api_reusable_scope() const
Definition: thread.h:505
Heap * heap() const
Definition: thread.cc:943
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition: thread.h:765
static const char * TaskKindToCString(TaskKind kind)
Definition: thread.cc:252
void set_api_reusable_scope(ApiLocalScope *value)
Definition: thread.h:506
CompilerState & compiler_state()
Definition: thread.h:588
static intptr_t top_exit_frame_info_offset()
Definition: thread.h:695
int ZoneSizeInBytes() const
Definition: thread.cc:1291
static bool IsSafepointLevelRequested(uword state, SafepointLevel level)
Definition: thread.h:955
void ReleaseStoreBuffer()
Definition: thread.cc:677
double unboxed_double_runtime_arg() const
Definition: thread.h:837
intptr_t CountLocalHandles() const
Definition: thread.cc:1281
void AssertNonDartMutatorInvariants()
Definition: thread.cc:279
void InitVMConstants()
Definition: thread.cc:183
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy)
Definition: thread.cc:791
void DeferredMarkingStackBlockProcess()
Definition: thread.cc:842
void AcquireMarkingStack()
bool HasExitedDartCode() const
Definition: thread.cc:951
Thread * next() const
Definition: thread.h:1113
bool HasActiveState()
Definition: thread.cc:354
uword GetAndClearStackOverflowFlags()
Definition: thread.cc:785
void ExitSafepoint()
Definition: thread.h:1094
void OldMarkingStackBlockProcess()
Definition: thread.cc:832
void set_sticky_error(const Error &value)
Definition: thread.cc:236
void StoreBufferAddObject(ObjectPtr obj)
Definition: thread.cc:796
simd128_value_t unboxed_simd128_runtime_arg() const
Definition: thread.h:843
void ClearStackLimit()
Definition: thread.cc:701
void CheckForSafepoint()
Definition: thread.h:1104
void set_global_object_pool(ObjectPoolPtr raw_value)
Definition: thread.h:793
uword top() const
Definition: thread.h:709
bool IsExecutingDartCode() const
Definition: thread.cc:947
bool IsSafepointRequestedLocked(SafepointLevel level) const
Definition: thread.h:933
void NewMarkingStackAddObject(ObjectPtr obj)
Definition: thread.cc:863
void ClearStickyError()
Definition: thread.cc:241
static intptr_t end_offset()
Definition: thread.h:716
void DecrementForceGrowthScopeDepth()
Definition: thread.h:638
void EnterApiScope()
Definition: thread.cc:1301
static uword saved_shadow_call_stack_offset()
Definition: thread.h:419
void StoreBufferReleaseGC()
Definition: thread.cc:821
static intptr_t global_object_pool_offset()
Definition: thread.h:853
void StoreBufferAcquireGC()
Definition: thread.cc:828
friend class TransitionVMToGenerated
Definition: thread.h:1493
static intptr_t field_table_values_offset()
Definition: thread.h:543
bool UnwindErrorInProgress() const
Definition: thread.h:1013
static intptr_t shared_field_table_values_offset()
Definition: thread.h:547
void ExitApiScope()
Definition: thread.cc:1314
static bool CanLoadFromThread(const Object &object)
Definition: thread.cc:1154
void ReleaseMarkingStack()
SafepointLevel current_safepoint_level() const
Definition: thread.h:1146
int64_t unboxed_int64_runtime_arg() const
Definition: thread.h:825
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition: thread.cc:499
bool is_unwind_in_progress() const
Definition: thread.h:643
bool IsSafepointLevelRequestedLocked(SafepointLevel level) const
Definition: thread.h:948
static intptr_t isolate_group_offset()
Definition: thread.h:536
uword top_exit_frame_info() const
Definition: thread.h:691
bool OwnsReloadSafepoint() const
Definition: thread.cc:1362
uword stack_limit_address() const
Definition: thread.h:399
bool OwnsDeoptSafepoint() const
Definition: thread.cc:1357
int64_t unboxed_int64_runtime_second_arg() const
Definition: thread.h:831
static intptr_t next_task_id_offset()
Definition: thread.h:1130
void ClearReusableHandles()
Definition: thread.cc:962
void OldMarkingStackAddObject(ObjectPtr obj)
Definition: thread.cc:855
bool IsDartMutatorThread() const
Definition: thread.h:551
static intptr_t exit_through_ffi_offset()
Definition: thread.h:475
void IncrementForceGrowthScopeDepth()
Definition: thread.h:634
static bool EnterIsolateGroupAsNonMutator(IsolateGroup *isolate_group, TaskKind kind)
Definition: thread.cc:511
friend class Isolate
Definition: thread.h:1476
void EnterSafepoint()
Definition: thread.h:1076
void SetAtSafepoint(bool value, SafepointLevel level)
Definition: thread.h:924
Random * random()
Definition: thread.h:1133
ExecutionState execution_state() const
Definition: thread.h:1040
Isolate * isolate() const
Definition: thread.h:534
int32_t IncrementAndGetStackOverflowCount()
Definition: thread.h:447
static uword resume_pc_offset()
Definition: thread.h:875
bool BypassSafepoints() const
Definition: thread.h:1007
uword saved_stack_limit() const
Definition: thread.h:410
friend class TransitionGeneratedToVM
Definition: thread.h:1491
void set_true_end(uword true_end)
Definition: thread.h:714
CompilerTimings * compiler_timings() const
Definition: thread.h:617
uint32_t IncrementAndGetRuntimeCallCount()
Definition: thread.h:451
uword heap_base() const
Definition: thread.h:429
TaskKind task_kind() const
Definition: thread.h:479
bool IsAtSafepoint() const
Definition: thread.h:917
bool IsInNoReloadScope() const
Definition: thread.h:740
static uword full_safepoint_state_acquired()
Definition: thread.h:1064
@ kThreadInNative
Definition: thread.h:1036
@ kThreadInBlockedState
Definition: thread.h:1037
@ kThreadInGenerated
Definition: thread.h:1035
void set_unboxed_double_runtime_arg(double value)
Definition: thread.h:840
IsolateGroup * isolate_group() const
Definition: thread.h:541
ObjectPtr active_exception() const
Definition: thread.h:861
static intptr_t dart_stream_offset()
Definition: thread.h:571
void StoreBufferAddObjectGC(ObjectPtr obj)
Definition: thread.cc:804
static intptr_t api_top_scope_offset()
Definition: thread.h:515
void SetBlockedForSafepoint(bool value)
Definition: thread.h:1002
void DecrementNoCallbackScopeDepth()
Definition: thread.h:628
void set_api_top_scope(ApiLocalScope *value)
Definition: thread.h:514
void NewMarkingStackBlockProcess()
Definition: thread.cc:837
bool TryExitSafepoint()
Definition: thread.h:1087
void AssertEmptyStackInvariants()
Definition: thread.cc:287
static intptr_t double_truncate_round_supported_offset()
Definition: thread.h:522
uword write_barrier_mask() const
Definition: thread.h:428
static intptr_t stack_overflow_flags_offset()
Definition: thread.h:443
bool IsAtSafepoint(SafepointLevel level) const
Definition: thread.h:921
friend class IsolateGroup
Definition: thread.h:1477
static uword SetBypassSafepoints(bool value, uword state)
Definition: thread.h:1010
static void EnterIsolate(Isolate *isolate)
Definition: thread.cc:371
static intptr_t active_stacktrace_offset()
Definition: thread.h:869
bool IsInStoppedMutatorsScope() const
Definition: thread.h:742
void IncrementNoSafepointScopeDepth()
Definition: thread.h:726
static intptr_t tsan_utils_offset()
Definition: thread.h:526
void BlockForSafepoint()
Definition: thread.cc:1348
void set_top(uword top)
Definition: thread.h:712
static void ExitIsolate(bool isolate_shutdown=false)
Definition: thread.cc:428
virtual bool MayAllocateHandles()
Definition: thread.h:1055
ErrorPtr sticky_error() const
Definition: thread.cc:232
void set_dispatch_table_array(const uword *array)
Definition: thread.h:798
void set_active_exception(const Object &value)
Definition: thread.cc:224
HeapProfileSampler & heap_sampler()
Definition: thread.h:1141
uword SetSafepointRequested(SafepointLevel level, bool value)
Definition: thread.h:970
uword true_end() const
Definition: thread.h:711
void set_unboxed_int64_runtime_second_arg(int64_t value)
Definition: thread.h:834
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition: thread.cc:481
static intptr_t unboxed_runtime_arg_offset()
Definition: thread.h:849
Isolate * scheduled_dart_mutator_isolate() const
Definition: thread.h:562
friend class NoActiveIsolateScope
Definition: thread.h:1478
static intptr_t old_marking_stack_block_offset()
Definition: thread.h:684
static intptr_t dispatch_table_array_offset()
Definition: thread.h:857
void DeferredMarkLiveTemporaries()
Definition: thread.cc:1144
struct _Dart_Handle * Dart_Handle
Definition: dart_api.h:258
#define DART_WARN_UNUSED_RESULT
Definition: dart_api.h:66
#define ASSERT(E)
AtkStateType state
uint8_t value
uint32_t * target
Definition: dart_vm.cc:33
StoreBuffer::Block StoreBufferBlock
SafepointLevel
Definition: thread.h:289
@ kGC
Definition: thread.h:291
@ kNumLevels
Definition: thread.h:297
@ kNoSafepoint
Definition: thread.h:300
@ kGCAndDeoptAndReload
Definition: thread.h:295
@ kGCAndDeopt
Definition: thread.h:293
MarkingStack::Block MarkingStackBlock
uintptr_t uword
Definition: globals.h:501
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
constexpr RegList kDartAvailableCpuRegs
const intptr_t kStoreBufferWrapperSize
ValidationPolicy
Definition: thread.h:271
RuntimeCallDeoptAbility
Definition: thread.h:276
constexpr int kNumberOfDartAvailableCpuRegs
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network policy
Definition: switches.h:248
dictionary stats
Definition: malisc.py:20
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:581
static DecodeResult decode(std::string path)
Definition: png_codec.cpp:124
#define T
Definition: precompiler.cc:65
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
SeparatedVector2 offset
static intptr_t setjmp_buffer_offset()
Definition: thread.h:323
jmp_buf * setjmp_buffer
Definition: thread.h:315
uword exception_fp
Definition: thread.h:318
static intptr_t exception_fp_offset()
Definition: thread.h:332
static intptr_t exception_sp_offset()
Definition: thread.h:329
static intptr_t exception_pc_offset()
Definition: thread.h:326
static intptr_t setjmp_function_offset()
Definition: thread.h:320
void * setjmp_function
Definition: thread.h:313
uword exception_pc
Definition: thread.h:316
uword exception_sp
Definition: thread.h:317
double double_storage[2]
Definition: globals.h:151
int64_t int64_storage[2]
Definition: globals.h:149
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
Definition: thread.h:189
#define REUSABLE_HANDLE_LIST(V)
Definition: thread.h:78
#define REUSABLE_HANDLE_FIELDS(object)
Definition: thread.h:1339
#define CACHED_CONSTANTS_LIST(V)
Definition: thread.h:267
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value)
Definition: thread.h:807
#define REUSABLE_FRIEND_DECLARATION(name)
Definition: thread.h:1468
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)
Definition: thread.h:1212
#define REUSABLE_HANDLE(object)
Definition: thread.h:906
#define NO_SANITIZE_THREAD
#define ALIGN8
Definition: globals.h:171
#define OFFSET_OF(type, field)
Definition: globals.h:138