Flutter Engine
The Flutter Engine
thread.cc
Go to the documentation of this file.
1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/thread.h"
6
7#include "vm/cpu.h"
8#include "vm/dart_api_state.h"
9#include "vm/growable_array.h"
10#include "vm/heap/safepoint.h"
11#include "vm/isolate.h"
12#include "vm/json_stream.h"
13#include "vm/lockers.h"
14#include "vm/log.h"
15#include "vm/message_handler.h"
16#include "vm/native_entry.h"
17#include "vm/object.h"
18#include "vm/object_store.h"
19#include "vm/os_thread.h"
20#include "vm/profiler.h"
21#include "vm/runtime_entry.h"
22#include "vm/service.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
26#include "vm/thread_registry.h"
27#include "vm/timeline.h"
28#include "vm/zone.h"
29
30namespace dart {
31
32#if !defined(PRODUCT)
33DECLARE_FLAG(bool, trace_service);
34DECLARE_FLAG(bool, trace_service_verbose);
35#endif // !defined(PRODUCT)
36
38 // We should cleanly exit any isolate before destruction.
39 ASSERT(isolate_ == nullptr);
40 ASSERT(store_buffer_block_ == nullptr);
41 ASSERT(old_marking_stack_block_ == nullptr);
42 ASSERT(new_marking_stack_block_ == nullptr);
43 ASSERT(deferred_marking_stack_block_ == nullptr);
44 // There should be no top api scopes at this point.
45 ASSERT(api_top_scope() == nullptr);
46 // Delete the reusable api scope if there is one.
47 if (api_reusable_scope_ != nullptr) {
48 delete api_reusable_scope_;
49 api_reusable_scope_ = nullptr;
50 }
51
52 DO_IF_TSAN(delete tsan_utils_);
53}
54
55#if defined(DEBUG)
56#define REUSABLE_HANDLE_SCOPE_INIT(object) \
57 reusable_##object##_handle_scope_active_(false),
58#else
59#define REUSABLE_HANDLE_SCOPE_INIT(object)
60#endif // defined(DEBUG)
61
62#define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(nullptr),
63
64Thread::Thread(bool is_vm_isolate)
65 : ThreadState(false),
66 write_barrier_mask_(UntaggedObject::kGenerationalBarrierMask),
67 active_exception_(Object::null()),
68 active_stacktrace_(Object::null()),
69 global_object_pool_(ObjectPool::null()),
70 resume_pc_(0),
71 execution_state_(kThreadInNative),
72 safepoint_state_(0),
73 api_top_scope_(nullptr),
74 double_truncate_round_supported_(
75 TargetCPUFeatures::double_truncate_round_supported() ? 1 : 0),
76 tsan_utils_(DO_IF_TSAN(new TsanUtils()) DO_IF_NOT_TSAN(nullptr)),
77 task_kind_(kUnknownTask),
78#if defined(SUPPORT_TIMELINE)
79 dart_stream_(ASSERT_NOTNULL(Timeline::GetDartStream())),
80#else
81 dart_stream_(nullptr),
82#endif
83#if !defined(PRODUCT)
84 service_extension_stream_(ASSERT_NOTNULL(&Service::extension_stream)),
85#else
86 service_extension_stream_(nullptr),
87#endif
88 thread_lock_(),
89 api_reusable_scope_(nullptr),
90 no_callback_scope_depth_(0),
91#if defined(DEBUG)
92 no_safepoint_scope_depth_(0),
93#endif
94 reusable_handles_(),
95 stack_overflow_count_(0),
96 hierarchy_info_(nullptr),
97 type_usage_info_(nullptr),
98 sticky_error_(Error::null()),
101#if defined(USING_SAFE_STACK)
102 saved_safestack_limit_(0),
103#endif
104#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
105 next_(nullptr),
106 heap_sampler_(this) {
107#else
108 next_(nullptr) {
109#endif
110
111#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value) \
112 member_name = default_init_value;
114#undef DEFAULT_INIT
115
116 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
117 write_barrier_wrappers_entry_points_[i] = 0;
118 }
119
120#define DEFAULT_INIT(name) name##_entry_point_ = 0;
122#undef DEFAULT_INIT
123
124#define DEFAULT_INIT(returntype, name, ...) name##_entry_point_ = 0;
126#undef DEFAULT_INIT
127
128 // We cannot initialize the VM constants here for the vm isolate thread
129 // due to boot strapping issues.
130 if (!is_vm_isolate) {
131 InitVMConstants();
132 }
133
134#if defined(DART_HOST_OS_FUCHSIA)
135 next_task_id_ = trace_generate_nonce();
136#else
137 next_task_id_ = Random::GlobalNextUInt64();
138#endif
139
140 memset(&unboxed_runtime_arg_, 0, sizeof(simd128_value_t));
141}
142
143static const double double_nan_constant = NAN;
144
145static const struct ALIGN16 {
146 uint64_t a;
147 uint64_t b;
148} double_negate_constant = {0x8000000000000000ULL, 0x8000000000000000ULL};
149
150static const struct ALIGN16 {
151 uint64_t a;
152 uint64_t b;
153} double_abs_constant = {0x7FFFFFFFFFFFFFFFULL, 0x7FFFFFFFFFFFFFFFULL};
154
155static const struct ALIGN16 {
156 uint32_t a;
157 uint32_t b;
158 uint32_t c;
159 uint32_t d;
160} float_not_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
161
162static const struct ALIGN16 {
163 uint32_t a;
164 uint32_t b;
165 uint32_t c;
166 uint32_t d;
167} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
168
169static const struct ALIGN16 {
170 uint32_t a;
171 uint32_t b;
172 uint32_t c;
173 uint32_t d;
174} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
175
176static const struct ALIGN16 {
177 uint32_t a;
178 uint32_t b;
179 uint32_t c;
180 uint32_t d;
181} float_zerow_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000};
182
183void Thread::InitVMConstants() {
184#if defined(DART_COMPRESSED_POINTERS)
185 heap_base_ = Object::null()->heap_base();
186#endif
187
188#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \
189 ASSERT((init_expr)->IsOldObject());
191#undef ASSERT_VM_HEAP
192
193#define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \
194 ASSERT(member_name == default_init_value); \
195 member_name = (init_expr);
197#undef INIT_VALUE
198
199 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
200 write_barrier_wrappers_entry_points_[i] =
201 StubCode::WriteBarrierWrappers().EntryPoint() +
203 }
204
205#define INIT_VALUE(name) \
206 ASSERT(name##_entry_point_ == 0); \
207 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
209#undef INIT_VALUE
210
211#define INIT_VALUE(returntype, name, ...) \
212 ASSERT(name##_entry_point_ == 0); \
213 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
215#undef INIT_VALUE
216
217// Setup the thread specific reusable handles.
218#define REUSABLE_HANDLE_ALLOCATION(object) \
219 this->object##_handle_ = this->AllocateReusableHandle<object>();
221#undef REUSABLE_HANDLE_ALLOCATION
222}
223
224void Thread::set_active_exception(const Object& value) {
225 active_exception_ = value.ptr();
226}
227
228void Thread::set_active_stacktrace(const Object& value) {
229 active_stacktrace_ = value.ptr();
230}
231
232ErrorPtr Thread::sticky_error() const {
233 return sticky_error_;
234}
235
236void Thread::set_sticky_error(const Error& value) {
237 ASSERT(!value.IsNull());
238 sticky_error_ = value.ptr();
239}
240
241void Thread::ClearStickyError() {
242 sticky_error_ = Error::null();
243}
244
245ErrorPtr Thread::StealStickyError() {
246 NoSafepointScope no_safepoint;
247 ErrorPtr return_value = sticky_error_;
248 sticky_error_ = Error::null();
249 return return_value;
250}
251
252const char* Thread::TaskKindToCString(TaskKind kind) {
253 switch (kind) {
254 case kUnknownTask:
255 return "kUnknownTask";
256 case kMutatorTask:
257 return "kMutatorTask";
258 case kCompilerTask:
259 return "kCompilerTask";
260 case kSweeperTask:
261 return "kSweeperTask";
262 case kMarkerTask:
263 return "kMarkerTask";
264 default:
265 UNREACHABLE();
266 return "";
267 }
268}
269
270void Thread::AssertNonMutatorInvariants() {
271 ASSERT(BypassSafepoints());
272 ASSERT(store_buffer_block_ == nullptr);
273 ASSERT(old_marking_stack_block_ == nullptr);
274 ASSERT(new_marking_stack_block_ == nullptr);
275 ASSERT(deferred_marking_stack_block_ == nullptr);
276 AssertNonDartMutatorInvariants();
277}
278
279void Thread::AssertNonDartMutatorInvariants() {
280 ASSERT(!IsDartMutatorThread());
281 ASSERT(isolate() == nullptr);
282 ASSERT(isolate_group() != nullptr);
283 ASSERT(task_kind_ != kMutatorTask);
284 DEBUG_ASSERT(!IsAnyReusableHandleScopeActive());
285}
286
287void Thread::AssertEmptyStackInvariants() {
288 ASSERT(zone() == nullptr);
289 ASSERT(top_handle_scope() == nullptr);
290 ASSERT(long_jump_base() == nullptr);
291 ASSERT(top_resource() == nullptr);
292 ASSERT(top_exit_frame_info_ == 0);
293 ASSERT(api_top_scope_ == nullptr);
294 ASSERT(!pending_deopts_.HasPendingDeopts());
295 ASSERT(compiler_state_ == nullptr);
296 ASSERT(hierarchy_info_ == nullptr);
297 ASSERT(type_usage_info_ == nullptr);
298 ASSERT(no_active_isolate_scope_ == nullptr);
299 ASSERT(compiler_timings_ == nullptr);
300 ASSERT(!exit_through_ffi_);
301 ASSERT(runtime_call_deopt_ability_ == RuntimeCallDeoptAbility::kCanLazyDeopt);
302 ASSERT(no_callback_scope_depth_ == 0);
303 ASSERT(force_growth_scope_depth_ == 0);
304 ASSERT(no_reload_scope_depth_ == 0);
305 ASSERT(stopped_mutators_scope_depth_ == 0);
306 ASSERT(stack_overflow_flags_ == 0);
307 DEBUG_ASSERT(!inside_compiler_);
308 DEBUG_ASSERT(no_safepoint_scope_depth_ == 0);
309
310 // Avoid running these asserts for `vm-isolate`.
311 if (active_stacktrace_.untag() != 0) {
312 ASSERT(sticky_error() == Error::null());
313 ASSERT(active_exception_ == Object::null());
314 ASSERT(active_stacktrace_ == Object::null());
315 }
316}
317
318void Thread::AssertEmptyThreadInvariants() {
319 AssertEmptyStackInvariants();
320
321 ASSERT(top_ == 0);
322 ASSERT(end_ == 0);
323 ASSERT(true_end_ == 0);
324 ASSERT(isolate_ == nullptr);
325 ASSERT(isolate_group_ == nullptr);
326 ASSERT(os_thread() == nullptr);
327 ASSERT(vm_tag_ == VMTag::kInvalidTagId);
328 ASSERT(task_kind_ == kUnknownTask);
329 ASSERT(execution_state_ == Thread::kThreadInNative);
330 ASSERT(scheduled_dart_mutator_isolate_ == nullptr);
331
332 ASSERT(write_barrier_mask_ == UntaggedObject::kGenerationalBarrierMask);
333 ASSERT(store_buffer_block_ == nullptr);
334 ASSERT(old_marking_stack_block_ == nullptr);
335 ASSERT(new_marking_stack_block_ == nullptr);
336 ASSERT(deferred_marking_stack_block_ == nullptr);
337 ASSERT(!is_unwind_in_progress_);
338
339 ASSERT(saved_stack_limit_ == OSThread::kInvalidStackLimit);
340 ASSERT(stack_limit_.load() == 0);
341 ASSERT(safepoint_state_ == 0);
342
343 // Avoid running these asserts for `vm-isolate`.
344 if (active_stacktrace_.untag() != 0) {
345 ASSERT(field_table_values_ == nullptr);
346 ASSERT(shared_field_table_values_ == nullptr);
347 ASSERT(global_object_pool_ == Object::null());
348#define CHECK_REUSABLE_HANDLE(object) ASSERT(object##_handle_->IsNull());
350#undef CHECK_REUSABLE_HANDLE
351 }
352}
353
354bool Thread::HasActiveState() {
355 // Do we have active dart frames?
356 if (top_exit_frame_info() != 0) {
357 return true;
358 }
359 // Do we have active embedder scopes?
360 if (api_top_scope() != nullptr) {
361 return true;
362 }
363 // Do we have active vm zone?
364 if (zone() != nullptr) {
365 return true;
366 }
367 AssertEmptyStackInvariants();
368 return false;
369}
370
371void Thread::EnterIsolate(Isolate* isolate) {
372 const bool is_resumable = isolate->mutator_thread() != nullptr;
373
374 // To let VM's thread pool (if we run on it) know that this thread is
375 // occupying a mutator again (decreases its max size).
376 const bool is_nested_reenter =
377 (is_resumable && isolate->mutator_thread()->top_exit_frame_info() != 0);
378
379 auto group = isolate->group();
380 if (!(is_nested_reenter && isolate->mutator_thread()->OwnsSafepoint())) {
381 group->IncreaseMutatorCount(isolate, is_nested_reenter);
382 }
383
384 // Two threads cannot enter isolate at same time.
385 ASSERT(isolate->scheduled_mutator_thread_ == nullptr);
386
387 // We lazily create a [Thread] structure for the mutator thread, but we'll
388 // reuse it until the death of the isolate.
389 Thread* thread = nullptr;
390 if (is_resumable) {
391 thread = isolate->mutator_thread();
392 ASSERT(thread->scheduled_dart_mutator_isolate_ == isolate);
393 ASSERT(thread->isolate() == isolate);
394 ASSERT(thread->isolate_group() == isolate->group());
395 {
396 // Descheduled isolates are reloadable (if nothing else prevents it).
397 RawReloadParticipationScope enable_reload(thread);
398 thread->ExitSafepoint();
399 }
400 } else {
401 thread = AddActiveThread(group, isolate, /*is_dart_mutator*/ true,
402 /*bypass_safepoint=*/false);
403 thread->SetupState(kMutatorTask);
404 thread->SetupMutatorState(kMutatorTask);
405 thread->SetupDartMutatorState(isolate);
406 }
407
408 isolate->scheduled_mutator_thread_ = thread;
409 ResumeDartMutatorThreadInternal(thread);
410}
411
412static bool ShouldSuspend(bool isolate_shutdown, Thread* thread) {
413 // Must destroy thread.
414 if (isolate_shutdown) return false;
415
416 // Must retain thread.
417 if (thread->HasActiveState() || thread->OwnsSafepoint()) return true;
418
419 // Could do either. When there are few isolates suspend to avoid work
420 // entering and leaving. When there are many isolate, destroy the thread to
421 // avoid the root set growing too big.
422 const intptr_t kMaxSuspendedThreads = 20;
423 auto group = thread->isolate_group();
424 return group->thread_registry()->active_isolates_count() <
425 kMaxSuspendedThreads;
426}
427
428void Thread::ExitIsolate(bool isolate_shutdown) {
429 Thread* thread = Thread::Current();
430 ASSERT(thread != nullptr);
431 ASSERT(thread->IsDartMutatorThread());
432 ASSERT(thread->isolate() != nullptr);
433 ASSERT(thread->isolate_group() != nullptr);
434 ASSERT(thread->isolate()->mutator_thread_ == thread);
435 ASSERT(thread->isolate()->scheduled_mutator_thread_ == thread);
436 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive());
437
438 auto isolate = thread->isolate();
439 auto group = thread->isolate_group();
440
441 thread->set_vm_tag(isolate->is_runnable() ? VMTag::kIdleTagId
442 : VMTag::kLoadWaitTagId);
443 if (thread->sticky_error() != Error::null()) {
444 ASSERT(isolate->sticky_error_ == Error::null());
445 isolate->sticky_error_ = thread->StealStickyError();
446 }
447
448 isolate->scheduled_mutator_thread_ = nullptr;
449
450 // Right now we keep the [Thread] object across the isolate's lifetime. This
451 // makes entering/exiting quite fast as it mainly boils down to safepoint
452 // transitions. Though any operation that walks over all active threads will
453 // see this thread as well (e.g. safepoint operations).
454 const bool is_nested_exit = thread->top_exit_frame_info() != 0;
455 if (ShouldSuspend(isolate_shutdown, thread)) {
456 const auto tag =
457 isolate->is_runnable() ? VMTag::kIdleTagId : VMTag::kLoadWaitTagId;
458 SuspendDartMutatorThreadInternal(thread, tag);
459 {
460 // Descheduled isolates are reloadable (if nothing else prevents it).
461 RawReloadParticipationScope enable_reload(thread);
462 thread->EnterSafepoint();
463 }
464 thread->set_execution_state(Thread::kThreadInNative);
465 } else {
466 thread->ResetDartMutatorState(isolate);
467 thread->ResetMutatorState();
468 thread->ResetState();
469 SuspendDartMutatorThreadInternal(thread, VMTag::kInvalidTagId);
470 FreeActiveThread(thread, /*bypass_safepoint=*/false);
471 }
472
473 // To let VM's thread pool (if we run on it) know that this thread is
474 // occupying a mutator again (decreases its max size).
475 ASSERT(!(isolate_shutdown && is_nested_exit));
476 if (!(is_nested_exit && thread->OwnsSafepoint())) {
477 group->DecreaseMutatorCount(isolate, is_nested_exit);
478 }
479}
480
481bool Thread::EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
482 TaskKind kind,
483 bool bypass_safepoint) {
484 Thread* thread = AddActiveThread(isolate_group, nullptr,
485 /*is_dart_mutator=*/false, bypass_safepoint);
486 if (thread != nullptr) {
487 thread->SetupState(kind);
488 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
489 // parallel scavenger threads write to the [Thread]s storebuffer)
490 thread->SetupMutatorState(kind);
491 ResumeThreadInternal(thread);
492
494 return true;
495 }
496 return false;
497}
498
499void Thread::ExitIsolateGroupAsHelper(bool bypass_safepoint) {
500 Thread* thread = Thread::Current();
502
503 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
504 // parallel scavenger threads write to the [Thread]s storebuffer)
505 thread->ResetMutatorState();
506 thread->ResetState();
507 SuspendThreadInternal(thread, VMTag::kInvalidTagId);
508 FreeActiveThread(thread, bypass_safepoint);
509}
510
511bool Thread::EnterIsolateGroupAsNonMutator(IsolateGroup* isolate_group,
512 TaskKind kind) {
513 Thread* thread =
514 AddActiveThread(isolate_group, nullptr,
515 /*is_dart_mutator=*/false, /*bypass_safepoint=*/true);
516 if (thread != nullptr) {
517 thread->SetupState(kind);
518 ResumeThreadInternal(thread);
519
521 return true;
522 }
523 return false;
524}
525
526void Thread::ExitIsolateGroupAsNonMutator() {
527 Thread* thread = Thread::Current();
528 ASSERT(thread != nullptr);
530
531 thread->ResetState();
532 SuspendThreadInternal(thread, VMTag::kInvalidTagId);
533 FreeActiveThread(thread, /*bypass_safepoint=*/true);
534}
535
536void Thread::ResumeDartMutatorThreadInternal(Thread* thread) {
537 ResumeThreadInternal(thread);
538 if (Dart::vm_isolate() != nullptr &&
539 thread->isolate() != Dart::vm_isolate()) {
540#if defined(USING_SIMULATOR)
541 thread->SetStackLimit(Simulator::Current()->overflow_stack_limit());
542#else
543 thread->SetStackLimit(OSThread::Current()->overflow_stack_limit());
544#endif
545 }
546}
547
548void Thread::SuspendDartMutatorThreadInternal(Thread* thread,
549 VMTag::VMTagId tag) {
550 thread->ClearStackLimit();
551 SuspendThreadInternal(thread, tag);
552}
553
554void Thread::ResumeThreadInternal(Thread* thread) {
555 ASSERT(!thread->IsAtSafepoint());
556 ASSERT(thread->isolate_group() != nullptr);
557 ASSERT(thread->execution_state() == Thread::kThreadInNative);
558 ASSERT(thread->vm_tag() == VMTag::kInvalidTagId ||
559 thread->vm_tag() == VMTag::kIdleTagId ||
560 thread->vm_tag() == VMTag::kLoadWaitTagId);
561
562 thread->set_vm_tag(VMTag::kVMTagId);
563 thread->set_execution_state(Thread::kThreadInVM);
564
565 OSThread* os_thread = OSThread::Current();
566 thread->set_os_thread(os_thread);
567 os_thread->set_thread(thread);
568 Thread::SetCurrent(thread);
569 NOT_IN_PRODUCT(os_thread->EnableThreadInterrupts());
570
571#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
572 thread->heap_sampler().Initialize();
573#endif
574}
575
576void Thread::SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag) {
577 thread->heap()->new_space()->AbandonRemainingTLAB(thread);
578
579#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
580 thread->heap_sampler().Cleanup();
581#endif
582
583 OSThread* os_thread = thread->os_thread();
584 ASSERT(os_thread != nullptr);
585 NOT_IN_PRODUCT(os_thread->DisableThreadInterrupts());
586 os_thread->set_thread(nullptr);
587 OSThread::SetCurrent(os_thread);
588 thread->set_os_thread(nullptr);
589
590 thread->set_vm_tag(tag);
591}
592
593Thread* Thread::AddActiveThread(IsolateGroup* group,
594 Isolate* isolate,
595 bool is_dart_mutator,
596 bool bypass_safepoint) {
597 // NOTE: We cannot just use `Dart::vm_isolate() == this` here, since during
598 // VM startup it might not have been set at this point.
599 const bool is_vm_isolate =
600 Dart::vm_isolate() == nullptr || Dart::vm_isolate() == isolate;
601
602 auto thread_registry = group->thread_registry();
603 auto safepoint_handler = group->safepoint_handler();
604 MonitorLocker ml(thread_registry->threads_lock());
605
606 if (!bypass_safepoint) {
607 while (safepoint_handler->AnySafepointInProgressLocked()) {
608 ml.Wait();
609 }
610 }
611
612 Thread* thread = thread_registry->GetFreeThreadLocked(is_vm_isolate);
613 thread->AssertEmptyThreadInvariants();
614
615 thread->isolate_ = isolate; // May be nullptr.
616 thread->isolate_group_ = group;
617 thread->scheduled_dart_mutator_isolate_ = isolate;
618
619 // We start at being at-safepoint (in case any safepoint operation is
620 // in-progress, we'll check into it once leaving the safepoint)
621 thread->set_safepoint_state(Thread::SetBypassSafepoints(bypass_safepoint, 0));
622 thread->runtime_call_deopt_ability_ = RuntimeCallDeoptAbility::kCanLazyDeopt;
623 ASSERT(!thread->IsAtSafepoint());
624
625 ASSERT(thread->saved_stack_limit_ == OSThread::kInvalidStackLimit);
626 return thread;
627}
628
629void Thread::FreeActiveThread(Thread* thread, bool bypass_safepoint) {
630 ASSERT(!thread->HasActiveState());
631 ASSERT(!thread->IsAtSafepoint());
632
633 if (!bypass_safepoint) {
634 // GC helper threads don't have any handle state to clear, and the GC might
635 // be currently visiting thread state. If this is not a GC helper, the GC
636 // can't be visiting thread state because its waiting for this thread to
637 // check in.
638 thread->ClearReusableHandles();
639 }
640
641 auto group = thread->isolate_group_;
642 auto thread_registry = group->thread_registry();
643
644 MonitorLocker ml(thread_registry->threads_lock());
645
646 if (!bypass_safepoint) {
647 // There may be a pending safepoint operation on another thread that is
648 // waiting for us to check-in.
649 //
650 // Though notice we're holding the thread registrys' threads_lock, which
651 // means if this other thread runs code as part of a safepoint operation it
652 // will still wait for us to finish here before it tries to iterate the
653 // active mutators (e.g. when GC starts/stops incremental marking).
654 //
655 // The thread is empty and the corresponding isolate (if any) is therefore
656 // at event-loop boundary (or shutting down). We participate in reload in
657 // those scenarios.
658 //
659 // (It may be that an active [RELOAD_OPERATION_SCOPE] sent an OOB message to
660 // this isolate but it didn't handle the OOB due to shutting down, so we'll
661 // still have to update the reloading thread that it's ok to continue)
662 RawReloadParticipationScope enable_reload(thread);
663 thread->EnterSafepoint();
664 }
665
666 thread->isolate_ = nullptr;
667 thread->isolate_group_ = nullptr;
668 thread->scheduled_dart_mutator_isolate_ = nullptr;
669 thread->set_execution_state(Thread::kThreadInNative);
670 thread->stack_limit_.store(0);
671 thread->safepoint_state_ = 0;
672
673 thread->AssertEmptyThreadInvariants();
674 thread_registry->ReturnThreadLocked(thread);
675}
676
677void Thread::ReleaseStoreBuffer() {
678 ASSERT(IsAtSafepoint() || OwnsSafepoint() || task_kind_ == kMarkerTask);
679 if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) {
680 return; // Nothing to release.
681 }
682 // Prevent scheduling another GC by ignoring the threshold.
683 StoreBufferRelease(StoreBuffer::kIgnoreThreshold);
684 // Make sure to get an *empty* block; the isolate needs all entries
685 // at GC time.
686 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires.
687 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
688}
689
690void Thread::SetStackLimit(uword limit) {
691 // The thread setting the stack limit is not necessarily the thread which
692 // the stack limit is being set on.
693 MonitorLocker ml(&thread_lock_);
694 if (!HasScheduledInterrupts()) {
695 // No interrupt pending, set stack_limit_ too.
696 stack_limit_.store(limit);
697 }
698 saved_stack_limit_ = limit;
699}
700
701void Thread::ClearStackLimit() {
702 SetStackLimit(OSThread::kInvalidStackLimit);
703}
704
705static bool IsInterruptLimit(uword limit) {
706 return (limit & ~Thread::kInterruptsMask) ==
707 (kInterruptStackLimit & ~Thread::kInterruptsMask);
708}
709
710void Thread::ScheduleInterrupts(uword interrupt_bits) {
711 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask.
712
713 uword old_limit = stack_limit_.load();
714 uword new_limit;
715 do {
716 if (IsInterruptLimit(old_limit)) {
717 new_limit = old_limit | interrupt_bits;
718 } else {
719 new_limit = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
720 }
721 } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
722}
723
724uword Thread::GetAndClearInterrupts() {
725 uword interrupt_bits = 0;
726 uword old_limit = stack_limit_.load();
727 uword new_limit = saved_stack_limit_;
728 do {
729 if (IsInterruptLimit(old_limit)) {
730 interrupt_bits = interrupt_bits | (old_limit & kInterruptsMask);
731 } else {
732 return interrupt_bits;
733 }
734 } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
735
736 return interrupt_bits;
737}
738
740 uword interrupt_bits = GetAndClearInterrupts();
741 if ((interrupt_bits & kVMInterrupt) != 0) {
742 CheckForSafepoint();
743 if (isolate_group()->store_buffer()->Overflowed()) {
744 // Evacuate: If the popular store buffer targets are copied instead of
745 // promoted, the store buffer won't shrink and a second scavenge will
746 // occur that does promote them.
747 heap()->CollectGarbage(this, GCType::kEvacuate, GCReason::kStoreBuffer);
748 }
749 heap()->CheckFinalizeMarking(this);
750
751#if !defined(PRODUCT)
752 if (isolate()->TakeHasCompletedBlocks()) {
753 Profiler::ProcessCompletedBlocks(isolate());
754 }
755#endif // !defined(PRODUCT)
756
757#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
758 HeapProfileSampler& sampler = heap_sampler();
759 if (sampler.ShouldSetThreadSamplingInterval()) {
761 }
762 if (sampler.ShouldUpdateThreadEnable()) {
763 sampler.UpdateThreadEnable();
764 }
765#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
766 }
767 if ((interrupt_bits & kMessageInterrupt) != 0) {
769 isolate()->message_handler()->HandleOOBMessages();
770 if (status != MessageHandler::kOK) {
771 // False result from HandleOOBMessages signals that the isolate should
772 // be terminating.
773 if (FLAG_trace_isolates) {
774 OS::PrintErr(
775 "[!] Terminating isolate due to OOB message:\n"
776 "\tisolate: %s\n",
777 isolate()->name());
778 }
779 return StealStickyError();
780 }
781 }
782 return Error::null();
783}
784
785uword Thread::GetAndClearStackOverflowFlags() {
786 uword stack_overflow_flags = stack_overflow_flags_;
787 stack_overflow_flags_ = 0;
788 return stack_overflow_flags;
789}
790
791void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) {
792 StoreBufferRelease(policy);
793 StoreBufferAcquire();
794}
795
796void Thread::StoreBufferAddObject(ObjectPtr obj) {
797 ASSERT(this == Thread::Current());
798 store_buffer_block_->Push(obj);
799 if (store_buffer_block_->IsFull()) {
800 StoreBufferBlockProcess(StoreBuffer::kCheckThreshold);
801 }
802}
803
804void Thread::StoreBufferAddObjectGC(ObjectPtr obj) {
805 store_buffer_block_->Push(obj);
806 if (store_buffer_block_->IsFull()) {
807 StoreBufferBlockProcess(StoreBuffer::kIgnoreThreshold);
808 }
809}
810
811void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) {
812 StoreBufferBlock* block = store_buffer_block_;
813 store_buffer_block_ = nullptr;
814 isolate_group()->store_buffer()->PushBlock(block, policy);
815}
816
817void Thread::StoreBufferAcquire() {
818 store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
819}
820
821void Thread::StoreBufferReleaseGC() {
822 StoreBufferBlock* block = store_buffer_block_;
823 store_buffer_block_ = nullptr;
824 isolate_group()->store_buffer()->PushBlock(block,
825 StoreBuffer::kIgnoreThreshold);
826}
827
828void Thread::StoreBufferAcquireGC() {
829 store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
830}
831
832void Thread::OldMarkingStackBlockProcess() {
833 OldMarkingStackRelease();
834 OldMarkingStackAcquire();
835}
836
837void Thread::NewMarkingStackBlockProcess() {
838 NewMarkingStackRelease();
839 NewMarkingStackAcquire();
840}
841
842void Thread::DeferredMarkingStackBlockProcess() {
843 DeferredMarkingStackRelease();
844 DeferredMarkingStackAcquire();
845}
846
847void Thread::MarkingStackAddObject(ObjectPtr obj) {
848 if (obj->IsNewObject()) {
849 NewMarkingStackAddObject(obj);
850 } else {
851 OldMarkingStackAddObject(obj);
852 }
853}
854
855void Thread::OldMarkingStackAddObject(ObjectPtr obj) {
856 ASSERT(obj->IsOldObject());
857 old_marking_stack_block_->Push(obj);
858 if (old_marking_stack_block_->IsFull()) {
859 OldMarkingStackBlockProcess();
860 }
861}
862
863void Thread::NewMarkingStackAddObject(ObjectPtr obj) {
864 ASSERT(obj->IsNewObject());
865 new_marking_stack_block_->Push(obj);
866 if (new_marking_stack_block_->IsFull()) {
867 NewMarkingStackBlockProcess();
868 }
869}
870
871void Thread::DeferredMarkingStackAddObject(ObjectPtr obj) {
872 deferred_marking_stack_block_->Push(obj);
873 if (deferred_marking_stack_block_->IsFull()) {
874 DeferredMarkingStackBlockProcess();
875 }
876}
877
878void Thread::OldMarkingStackRelease() {
879 MarkingStackBlock* old_block = old_marking_stack_block_;
880 old_marking_stack_block_ = nullptr;
881 isolate_group()->old_marking_stack()->PushBlock(old_block);
882
883 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask;
884}
885
886void Thread::NewMarkingStackRelease() {
887 MarkingStackBlock* new_block = new_marking_stack_block_;
888 new_marking_stack_block_ = nullptr;
889 isolate_group()->new_marking_stack()->PushBlock(new_block);
890}
891
892void Thread::OldMarkingStackAcquire() {
893 old_marking_stack_block_ =
894 isolate_group()->old_marking_stack()->PopEmptyBlock();
895
896 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask |
897 UntaggedObject::kIncrementalBarrierMask;
898}
899
900void Thread::NewMarkingStackAcquire() {
901 new_marking_stack_block_ =
902 isolate_group()->new_marking_stack()->PopEmptyBlock();
903}
904
905void Thread::DeferredMarkingStackRelease() {
906 MarkingStackBlock* block = deferred_marking_stack_block_;
907 deferred_marking_stack_block_ = nullptr;
908 isolate_group()->deferred_marking_stack()->PushBlock(block);
909}
910
911void Thread::DeferredMarkingStackAcquire() {
912 deferred_marking_stack_block_ =
913 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
914}
915
916void Thread::AcquireMarkingStacks() {
917 OldMarkingStackAcquire();
918 NewMarkingStackAcquire();
919 DeferredMarkingStackAcquire();
920}
921
922void Thread::ReleaseMarkingStacks() {
923 OldMarkingStackRelease();
924 NewMarkingStackRelease();
925 DeferredMarkingStackRelease();
926}
927
928void Thread::FlushMarkingStacks() {
929 isolate_group()->old_marking_stack()->PushBlock(old_marking_stack_block_);
930 old_marking_stack_block_ =
931 isolate_group()->old_marking_stack()->PopEmptyBlock();
932
933 isolate_group()->new_marking_stack()->PushBlock(new_marking_stack_block_);
934 new_marking_stack_block_ =
935 isolate_group()->new_marking_stack()->PopEmptyBlock();
936
937 isolate_group()->deferred_marking_stack()->PushBlock(
938 deferred_marking_stack_block_);
939 deferred_marking_stack_block_ =
940 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
941}
942
943Heap* Thread::heap() const {
944 return isolate_group_->heap();
945}
946
947bool Thread::IsExecutingDartCode() const {
948 return (top_exit_frame_info() == 0) && VMTag::IsDartTag(vm_tag());
949}
950
951bool Thread::HasExitedDartCode() const {
952 return (top_exit_frame_info() != 0) && !VMTag::IsDartTag(vm_tag());
953}
954
955template <class C>
956C* Thread::AllocateReusableHandle() {
957 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle());
958 C::initializeHandle(handle, C::null());
959 return handle;
960}
961
962void Thread::ClearReusableHandles() {
963#define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null();
965#undef CLEAR_REUSABLE_HANDLE
966}
967
968void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
969 ValidationPolicy validation_policy) {
970 ASSERT(visitor != nullptr);
971
972 if (zone() != nullptr) {
973 zone()->VisitObjectPointers(visitor);
974 }
975
976 // Visit objects in thread specific handles area.
977 reusable_handles_.VisitObjectPointers(visitor);
978
979 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&global_object_pool_));
980 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_exception_));
981 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_stacktrace_));
982 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&sticky_error_));
983
984 // Visit the api local scope as it has all the api local handles.
985 ApiLocalScope* scope = api_top_scope_;
986 while (scope != nullptr) {
987 scope->local_handles()->VisitObjectPointers(visitor);
988 scope = scope->previous();
989 }
990
991 // Only the mutator thread can run Dart code.
992 if (IsDartMutatorThread()) {
993 // The MarkTask, which calls this method, can run on a different thread. We
994 // therefore assume the mutator is at a safepoint and we can iterate its
995 // stack.
996 // TODO(vm-team): It would be beneficial to be able to ask the mutator
997 // thread whether it is in fact blocked at the moment (at a "safepoint") so
998 // we can safely iterate its stack.
999 //
1000 // Unfortunately we cannot use `this->IsAtSafepoint()` here because that
1001 // will return `false` even though the mutator thread is waiting for mark
1002 // tasks (which iterate its stack) to finish.
1003 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
1004 StackFrameIterator::kAllowCrossThreadIteration;
1005
1006 // Iterate over all the stack frames and visit objects on the stack.
1007 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy,
1008 this, cross_thread_policy);
1009 StackFrame* frame = frames_iterator.NextFrame();
1010 visitor->set_gc_root_type("frame");
1011 while (frame != nullptr) {
1012 frame->VisitObjectPointers(visitor);
1013 frame = frames_iterator.NextFrame();
1014 }
1015 visitor->clear_gc_root_type();
1016 } else {
1017 // We are not on the mutator thread.
1018 RELEASE_ASSERT(top_exit_frame_info() == 0);
1019 }
1020}
1021
1023 public:
1025 Thread* thread,
1026 Thread::RestoreWriteBarrierInvariantOp op)
1028 thread_(thread),
1029 current_(Thread::Current()),
1030 op_(op) {}
1031
1032 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
1033 for (; first != last + 1; first++) {
1034 ObjectPtr obj = *first;
1035 if (obj->IsImmediateObject()) continue;
1036
1037 // To avoid adding too much work into the remembered set, skip large
1038 // arrays. Write barrier elimination will not remove the barrier
1039 // if we can trigger GC between array allocation and store.
1040 if (obj->GetClassId() == kArrayCid) {
1041 const auto length = Smi::Value(Array::RawCast(obj)->untag()->length());
1042 if (length > Array::kMaxLengthForWriteBarrierElimination) {
1043 continue;
1044 }
1045 }
1046
1047 // Dart code won't store into VM-internal objects except Contexts and
1048 // UnhandledExceptions. This assumption is checked by an assertion in
1049 // WriteBarrierElimination::UpdateVectorForBlock.
1050 if (!obj->IsDartInstance() && !obj->IsContext() &&
1051 !obj->IsUnhandledException())
1052 continue;
1053
1054 // Dart code won't store into canonical instances.
1055 if (obj->untag()->IsCanonical()) continue;
1056
1057 // Objects in the VM isolate heap are immutable and won't be
1058 // stored into. Check this condition last because there's no bit
1059 // in the header for it.
1060 if (obj->untag()->InVMIsolateHeap()) continue;
1061
1062 switch (op_) {
1063 case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
1064 if (obj->IsOldObject()) {
1065 obj->untag()->EnsureInRememberedSet(current_);
1066 }
1067 if (current_->is_marking()) {
1068 current_->DeferredMarkingStackAddObject(obj);
1069 }
1070 break;
1071 case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
1072 // Re-scan obj when finalizing marking.
1073 ASSERT(current_->is_marking());
1074 current_->DeferredMarkingStackAddObject(obj);
1075 break;
1076 }
1077 }
1078 }
1079
1080#if defined(DART_COMPRESSED_POINTERS)
1081 void VisitCompressedPointers(uword heap_base,
1082 CompressedObjectPtr* first,
1083 CompressedObjectPtr* last) override {
1084 UNREACHABLE(); // Stack slots are not compressed.
1085 }
1086#endif
1087
1088 private:
1089 Thread* const thread_;
1090 Thread* const current_;
1091 Thread::RestoreWriteBarrierInvariantOp op_;
1092};
1093
1094// Write barrier elimination assumes that all live temporaries will be
1095// in the remembered set after a scavenge triggered by a non-Dart-call
1096// instruction (see Instruction::CanCallDart()), and additionally they will be
1097// in the deferred marking stack if concurrent marking started. Specifically,
1098// this includes any instruction which will always create an exit frame
1099// below the current frame before any other Dart frames.
1100//
1101// Therefore, to support this assumption, we scan the stack after a scavenge
1102// or when concurrent marking begins and add all live temporaries in
1103// Dart frames preceding an exit frame to the store buffer or deferred
1104// marking stack.
1105void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
1106 ASSERT(IsAtSafepoint() || OwnsGCSafepoint() || this == Thread::Current());
1107
1108 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
1109 StackFrameIterator::kAllowCrossThreadIteration;
1110 StackFrameIterator frames_iterator(top_exit_frame_info(),
1111 ValidationPolicy::kDontValidateFrames,
1112 this, cross_thread_policy);
1113 RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
1114 ObjectStore* object_store = isolate_group()->object_store();
1115 bool scan_next_dart_frame = false;
1116 for (StackFrame* frame = frames_iterator.NextFrame(); frame != nullptr;
1117 frame = frames_iterator.NextFrame()) {
1118 if (frame->IsExitFrame()) {
1119 scan_next_dart_frame = true;
1120 } else if (frame->IsEntryFrame()) {
1121 /* Continue searching. */
1122 } else if (frame->IsStubFrame()) {
1123 const uword pc = frame->pc();
1124 if (Code::ContainsInstructionAt(
1125 object_store->init_late_static_field_stub(), pc) ||
1126 Code::ContainsInstructionAt(
1127 object_store->init_late_final_static_field_stub(), pc) ||
1128 Code::ContainsInstructionAt(
1129 object_store->init_late_instance_field_stub(), pc) ||
1130 Code::ContainsInstructionAt(
1131 object_store->init_late_final_instance_field_stub(), pc)) {
1132 scan_next_dart_frame = true;
1133 }
1134 } else {
1135 ASSERT(frame->IsDartFrame(/*validate=*/false));
1136 if (scan_next_dart_frame) {
1137 frame->VisitObjectPointers(&visitor);
1138 }
1139 scan_next_dart_frame = false;
1140 }
1141 }
1142}
1143
1144void Thread::DeferredMarkLiveTemporaries() {
1145 RestoreWriteBarrierInvariant(
1146 RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
1147}
1148
1149void Thread::RememberLiveTemporaries() {
1150 RestoreWriteBarrierInvariant(
1151 RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
1152}
1153
1155 // In order to allow us to use assembler helper routines with non-[Code]
1156 // objects *before* stubs are initialized, we only loop ver the stubs if the
1157 // [object] is in fact a [Code] object.
1158 if (object.IsCode()) {
1159#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1160 if (object.ptr() == expr) { \
1161 return true; \
1162 }
1164#undef CHECK_OBJECT
1165 }
1166
1167 // For non [Code] objects we check if the object equals to any of the cached
1168 // non-stub entries.
1169#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1170 if (object.ptr() == expr) { \
1171 return true; \
1172 }
1174#undef CHECK_OBJECT
1175 return false;
1176}
1177
1178intptr_t Thread::OffsetFromThread(const Object& object) {
1179 // In order to allow us to use assembler helper routines with non-[Code]
1180 // objects *before* stubs are initialized, we only loop ver the stubs if the
1181 // [object] is in fact a [Code] object.
1182 if (object.IsCode()) {
1183#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1184 ASSERT((expr)->untag()->InVMIsolateHeap()); \
1185 if (object.ptr() == expr) { \
1186 return Thread::member_name##offset(); \
1187 }
1189#undef COMPUTE_OFFSET
1190 }
1191
1192 // For non [Code] objects we check if the object equals to any of the cached
1193 // non-stub entries.
1194#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1195 if (object.ptr() == expr) { \
1196 return Thread::member_name##offset(); \
1197 }
1199#undef COMPUTE_OFFSET
1200
1201 UNREACHABLE();
1202 return -1;
1203}
1204
1205bool Thread::ObjectAtOffset(intptr_t offset, Object* object) {
1206 if (Isolate::Current() == Dart::vm_isolate()) {
1207 // --disassemble-stubs runs before all the references through
1208 // thread have targets
1209 return false;
1210 }
1211
1212#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1213 if (Thread::member_name##offset() == offset) { \
1214 *object = expr; \
1215 return true; \
1216 }
1218#undef COMPUTE_OFFSET
1219 return false;
1220}
1221
1222intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) {
1223#define COMPUTE_OFFSET(name) \
1224 if (runtime_entry == &k##name##RuntimeEntry) { \
1225 return Thread::name##_entry_point_offset(); \
1226 }
1228#undef COMPUTE_OFFSET
1229
1230#define COMPUTE_OFFSET(returntype, name, ...) \
1231 if (runtime_entry == &k##name##RuntimeEntry) { \
1232 return Thread::name##_entry_point_offset(); \
1233 }
1235#undef COMPUTE_OFFSET
1236
1237 UNREACHABLE();
1238 return -1;
1239}
1240
1241#if defined(DEBUG)
1242bool Thread::TopErrorHandlerIsSetJump() const {
1243 if (long_jump_base() == nullptr) return false;
1244 if (top_exit_frame_info_ == 0) return true;
1245#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1246 // False positives: simulator stack and native stack are unordered.
1247 return true;
1248#else
1249 return reinterpret_cast<uword>(long_jump_base()) < top_exit_frame_info_;
1250#endif
1251}
1252
1253bool Thread::TopErrorHandlerIsExitFrame() const {
1254 if (top_exit_frame_info_ == 0) return false;
1255 if (long_jump_base() == nullptr) return true;
1256#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1257 // False positives: simulator stack and native stack are unordered.
1258 return true;
1259#else
1260 return top_exit_frame_info_ < reinterpret_cast<uword>(long_jump_base());
1261#endif
1262}
1263#endif // defined(DEBUG)
1264
1265bool Thread::IsValidHandle(Dart_Handle object) const {
1266 return IsValidLocalHandle(object) || IsValidZoneHandle(object) ||
1267 IsValidScopedHandle(object);
1268}
1269
1270bool Thread::IsValidLocalHandle(Dart_Handle object) const {
1271 ApiLocalScope* scope = api_top_scope_;
1272 while (scope != nullptr) {
1273 if (scope->local_handles()->IsValidHandle(object)) {
1274 return true;
1275 }
1276 scope = scope->previous();
1277 }
1278 return false;
1279}
1280
1281intptr_t Thread::CountLocalHandles() const {
1282 intptr_t total = 0;
1283 ApiLocalScope* scope = api_top_scope_;
1284 while (scope != nullptr) {
1285 total += scope->local_handles()->CountHandles();
1286 scope = scope->previous();
1287 }
1288 return total;
1289}
1290
1291int Thread::ZoneSizeInBytes() const {
1292 int total = 0;
1293 ApiLocalScope* scope = api_top_scope_;
1294 while (scope != nullptr) {
1295 total += scope->zone()->SizeInBytes();
1296 scope = scope->previous();
1297 }
1298 return total;
1299}
1300
1301void Thread::EnterApiScope() {
1302 ASSERT(MayAllocateHandles());
1303 ApiLocalScope* new_scope = api_reusable_scope();
1304 if (new_scope == nullptr) {
1305 new_scope = new ApiLocalScope(api_top_scope(), top_exit_frame_info());
1306 ASSERT(new_scope != nullptr);
1307 } else {
1308 new_scope->Reinit(this, api_top_scope(), top_exit_frame_info());
1309 set_api_reusable_scope(nullptr);
1310 }
1311 set_api_top_scope(new_scope); // New scope is now the top scope.
1312}
1313
1314void Thread::ExitApiScope() {
1315 ASSERT(MayAllocateHandles());
1316 ApiLocalScope* scope = api_top_scope();
1317 ApiLocalScope* reusable_scope = api_reusable_scope();
1318 set_api_top_scope(scope->previous()); // Reset top scope to previous.
1319 if (reusable_scope == nullptr) {
1320 scope->Reset(this); // Reset the old scope which we just exited.
1321 set_api_reusable_scope(scope);
1322 } else {
1323 ASSERT(reusable_scope != scope);
1324 delete scope;
1325 }
1326}
1327
1328void Thread::UnwindScopes(uword stack_marker) {
1329 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated
1330 // under the same top_exit_frame_info.
1331 ApiLocalScope* scope = api_top_scope_;
1332 while (scope != nullptr && scope->stack_marker() != 0 &&
1333 scope->stack_marker() == stack_marker) {
1334 api_top_scope_ = scope->previous();
1335 delete scope;
1336 scope = api_top_scope_;
1337 }
1338}
1339
1340void Thread::EnterSafepointUsingLock() {
1341 isolate_group()->safepoint_handler()->EnterSafepointUsingLock(this);
1342}
1343
1344void Thread::ExitSafepointUsingLock() {
1345 isolate_group()->safepoint_handler()->ExitSafepointUsingLock(this);
1346}
1347
1348void Thread::BlockForSafepoint() {
1349 isolate_group()->safepoint_handler()->BlockForSafepoint(this);
1350}
1351
1352bool Thread::OwnsGCSafepoint() const {
1353 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1355}
1356
1357bool Thread::OwnsDeoptSafepoint() const {
1358 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1360}
1361
1362bool Thread::OwnsReloadSafepoint() const {
1363 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1365}
1366
1367bool Thread::OwnsSafepoint() const {
1368 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1370}
1371
1372bool Thread::CanAcquireSafepointLocks() const {
1373 // A thread may acquire locks and then enter a safepoint operation (e.g.
1374 // holding program lock, allocating objects which triggers GC).
1375 //
1376 // So if this code is called inside safepoint operation, we generally have to
1377 // assume other threads may hold locks and are blocked on the safepoint,
1378 // meaning we cannot hold safepoint and acquire locks (deadlock!).
1379 //
1380 // Though if we own a reload safepoint operation it means all other mutators
1381 // are blocked in very specific places, where we know no locks are held. As
1382 // such we allow the current thread to acquire locks.
1383 //
1384 // Example: We own reload safepoint operation, load kernel, which allocates
1385 // symbols, where the symbol implementation acquires the symbol lock (we know
1386 // other mutators at reload safepoint do not hold symbol lock).
1387 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1389}
1390
1391void Thread::SetupState(TaskKind kind) {
1392 task_kind_ = kind;
1393}
1394
1395void Thread::ResetState() {
1396 task_kind_ = kUnknownTask;
1397 vm_tag_ = VMTag::kInvalidTagId;
1398}
1399
1400void Thread::SetupMutatorState(TaskKind kind) {
1401 ASSERT(store_buffer_block_ == nullptr);
1402
1403 if (isolate_group()->old_marking_stack() != nullptr) {
1404 ASSERT(isolate_group()->new_marking_stack() != nullptr);
1405 ASSERT(isolate_group()->deferred_marking_stack() != nullptr);
1406 // Concurrent mark in progress. Enable barrier for this thread.
1407 OldMarkingStackAcquire();
1408 NewMarkingStackAcquire();
1409 DeferredMarkingStackAcquire();
1410 }
1411
1412 // TODO(koda): Use StoreBufferAcquire once we properly flush
1413 // before Scavenge.
1414 if (kind == kMutatorTask) {
1415 StoreBufferAcquire();
1416 } else {
1417 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
1418 }
1419}
1420
1421void Thread::ResetMutatorState() {
1422 ASSERT(execution_state() == Thread::kThreadInVM);
1423 ASSERT(store_buffer_block_ != nullptr);
1424
1425 if (is_marking()) {
1426 OldMarkingStackRelease();
1427 NewMarkingStackRelease();
1428 DeferredMarkingStackRelease();
1429 }
1430 StoreBufferRelease();
1431}
1432
1433void Thread::SetupDartMutatorState(Isolate* isolate) {
1434 field_table_values_ = isolate->field_table_->table();
1435 isolate->mutator_thread_ = this;
1436
1437 SetupDartMutatorStateDependingOnSnapshot(isolate->group());
1438}
1439
1440void Thread::SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group) {
1441 // The snapshot may or may not have been read at this point (on isolate group
1442 // creation, the first isolate is first time entered before the snapshot is
1443 // read)
1444 //
1445 // So we call this code explicitly after snapshot reading time and whenever we
1446 // enter an isolate with a new thread object.
1447#if defined(DART_PRECOMPILED_RUNTIME)
1448 auto object_store = group->object_store();
1449 if (object_store != nullptr) {
1450 global_object_pool_ = object_store->global_object_pool();
1451
1452 auto dispatch_table = group->dispatch_table();
1453 if (dispatch_table != nullptr) {
1454 dispatch_table_array_ = dispatch_table->ArrayOrigin();
1455 }
1456#define INIT_ENTRY_POINT(name) \
1457 if (object_store->name() != Object::null()) { \
1458 name##_entry_point_ = Function::EntryPointOf(object_store->name()); \
1459 }
1460 CACHED_FUNCTION_ENTRY_POINTS_LIST(INIT_ENTRY_POINT)
1461#undef INIT_ENTRY_POINT
1462 }
1463#endif // defined(DART_PRECOMPILED_RUNTIME)
1464
1465 shared_field_table_values_ = group->shared_field_table()->table();
1466}
1467
1468void Thread::ResetDartMutatorState(Isolate* isolate) {
1469 ASSERT(execution_state() == Thread::kThreadInVM);
1470
1471 isolate->mutator_thread_ = nullptr;
1472 is_unwind_in_progress_ = false;
1473
1474 field_table_values_ = nullptr;
1475 shared_field_table_values_ = nullptr;
1476 ONLY_IN_PRECOMPILED(global_object_pool_ = ObjectPool::null());
1477 ONLY_IN_PRECOMPILED(dispatch_table_array_ = nullptr);
1478}
1479
1480#if !defined(PRODUCT)
1481DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread)
1482 : StackResource(thread) {
1483 if (thread != nullptr) {
1484 OSThread* os_thread = thread->os_thread();
1485 ASSERT(os_thread != nullptr);
1486 os_thread->DisableThreadInterrupts();
1487 }
1488}
1489
1491 if (thread() != nullptr) {
1492 OSThread* os_thread = thread()->os_thread();
1493 ASSERT(os_thread != nullptr);
1494 os_thread->EnableThreadInterrupts();
1495 }
1496}
1497#endif
1498
1500#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1501 thread->no_reload_scope_depth_++;
1502 ASSERT(thread->no_reload_scope_depth_ >= 0);
1503#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1504}
1505
1507#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1508 thread()->no_reload_scope_depth_ -= 1;
1509 ASSERT(thread()->no_reload_scope_depth_ >= 0);
1510 auto isolate = thread()->isolate();
1511 const intptr_t state = thread()->safepoint_state();
1512
1513 if (thread()->no_reload_scope_depth_ == 0) {
1514 // If we were asked to go to a reload safepoint & block for a reload
1515 // safepoint operation on another thread - *while* being inside
1516 // [NoReloadScope] - we may have handled & ignored the OOB message telling
1517 // us to reload.
1518 //
1519 // Since we're exiting now the [NoReloadScope], we'll make another OOB
1520 // reload request message to ourselves, which will be handled in
1521 // well-defined place where we can perform reload.
1522 if (isolate != nullptr &&
1526 }
1527 }
1528#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1529}
1530
1531} // namespace dart
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define ASSERT_NOTNULL(ptr)
Definition: assert.h:323
uword stack_marker() const
void Reinit(Thread *thread, ApiLocalScope *previous, uword stack_marker)
LocalHandles * local_handles()
ApiLocalScope * previous() const
void Reset(Thread *thread)
Thread * scheduled_mutator_thread_
Definition: base_isolate.h:34
Thread * mutator_thread_
Definition: base_isolate.h:38
void SetThreadSamplingInterval()
Definition: sampler.cc:136
bool ShouldSetThreadSamplingInterval()
Definition: sampler.h:98
bool ShouldUpdateThreadEnable()
Definition: sampler.h:76
@ kCheckForReload
Definition: isolate.h:975
void SendInternalLibMessage(LibMsgId msg_id, uint64_t capability)
Definition: isolate.cc:1033
IsolateGroup * group() const
Definition: isolate.h:1037
Thread * mutator_thread() const
Definition: isolate.cc:1920
int CountHandles() const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
bool IsValidHandle(Dart_Handle object) const
NoReloadScope(Thread *thread)
Definition: thread.cc:1499
void DisableThreadInterrupts()
Definition: os_thread.cc:143
void EnableThreadInterrupts()
Definition: os_thread.cc:148
void set_gc_root_type(const char *gc_root_type)
Definition: visitor.h:58
void VisitPointer(ObjectPtr *p)
Definition: visitor.h:55
bool IsDartInstance() const
UntaggedObject * untag() const
intptr_t GetClassId() const
Definition: raw_object.h:885
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition: thread.cc:1032
RestoreWriteBarrierInvariantVisitor(IsolateGroup *group, Thread *thread, Thread::RestoreWriteBarrierInvariantOp op)
Definition: thread.cc:1024
StackFrame * NextFrame()
Definition: stack_frame.cc:549
ThreadState * thread() const
Definition: allocation.h:33
OSThread * os_thread() const
Definition: thread_state.h:33
void set_execution_state(ExecutionState state)
Definition: thread.h:1048
void set_vm_tag(uword tag)
Definition: thread.h:822
uword safepoint_state()
Definition: thread.h:1031
ApiLocalScope * api_top_scope() const
Definition: thread.h:513
bool OwnsSafepoint() const
Definition: thread.cc:1367
void AssertNonMutatorInvariants()
Definition: thread.cc:270
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError()
Definition: thread.cc:245
void SetStackLimit(uword value)
Definition: thread.cc:690
static bool IsSafepointLevelRequested(uword state, SafepointLevel level)
Definition: thread.h:955
void AssertNonDartMutatorInvariants()
Definition: thread.cc:279
bool HasActiveState()
Definition: thread.cc:354
void ExitSafepoint()
Definition: thread.h:1094
uword top_exit_frame_info() const
Definition: thread.h:691
bool IsDartMutatorThread() const
Definition: thread.h:551
void EnterSafepoint()
Definition: thread.h:1076
Isolate * isolate() const
Definition: thread.h:534
IsolateGroup * isolate_group() const
Definition: thread.h:541
ErrorPtr sticky_error() const
Definition: thread.cc:232
DART_FORCE_INLINE void EnsureInRememberedSet(Thread *thread)
Definition: raw_object.h:379
bool InVMIsolateHeap() const
Definition: raw_object.cc:20
bool IsCanonical() const
Definition: raw_object.h:350
uintptr_t SizeInBytes() const
Definition: zone.cc:182
struct _Dart_Handle * Dart_Handle
Definition: dart_api.h:258
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
double frame
Definition: examples.cpp:31
static bool b
struct MyStruct a[10]
AtkStateType state
if(end==-1)
uint8_t value
size_t length
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
Definition: dart_vm.cc:33
@ kNoSafepoint
Definition: thread.h:300
@ kGCAndDeoptAndReload
Definition: thread.h:295
@ kGCAndDeopt
Definition: thread.h:293
static const struct dart::ALIGN16 float_negate_constant
static bool ShouldSuspend(bool isolate_shutdown, Thread *thread)
Definition: thread.cc:412
static const struct dart::ALIGN16 float_not_constant
static const struct dart::ALIGN16 float_absolute_constant
MarkingStack::Block MarkingStackBlock
static const struct dart::ALIGN16 double_negate_constant
static const struct dart::ALIGN16 float_zerow_constant
static constexpr uword kInterruptStackLimit
Definition: stack_frame.h:427
uintptr_t uword
Definition: globals.h:501
static const double double_nan_constant
Definition: thread.cc:143
const intptr_t kStoreBufferWrapperSize
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558
void HandleInterrupts(Thread *thread)
ValidationPolicy
Definition: thread.h:271
static bool IsInterruptLimit(uword limit)
Definition: thread.cc:705
static const struct dart::ALIGN16 double_abs_constant
constexpr int kNumberOfDartAvailableCpuRegs
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network policy
Definition: switches.h:248
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
SeparatedVector2 offset
uint64_t a
Definition: thread.cc:146
uint32_t c
Definition: thread.cc:158
uint64_t b
Definition: thread.cc:147
uint32_t d
Definition: thread.cc:159
#define CHECK_REUSABLE_HANDLE(object)
#define INIT_VALUE(type_name, member_name, init_expr, default_init_value)
#define REUSABLE_HANDLE_SCOPE_INIT(object)
Definition: thread.cc:59
#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value)
#define REUSABLE_HANDLE_INITIALIZERS(object)
Definition: thread.cc:62
#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value)
#define CLEAR_REUSABLE_HANDLE(object)
#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value)
#define REUSABLE_HANDLE_ALLOCATION(object)
#define CHECK_OBJECT(type_name, member_name, expr, default_init_value)
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
Definition: thread.h:189
#define REUSABLE_HANDLE_LIST(V)
Definition: thread.h:78
#define CACHED_CONSTANTS_LIST(V)
Definition: thread.h:267
#define CACHED_NON_VM_STUB_LIST(V)
Definition: thread.h:174
#define CACHED_VM_STUBS_LIST(V)
Definition: thread.h:100
#define CACHED_VM_OBJECTS_LIST(V)
Definition: thread.h:185
#define DO_IF_NOT_TSAN(CODE)
#define DO_IF_TSAN(CODE)
#define SUPPORT_TIMELINE
Definition: globals.h:120
#define NOT_IN_PRODUCT(code)
Definition: globals.h:84
#define ONLY_IN_PRECOMPILED(code)
Definition: globals.h:101
#define ALIGN16
Definition: globals.h:172