Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
thread.cc
Go to the documentation of this file.
1// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/thread.h"
6
7#include "vm/cpu.h"
8#include "vm/dart_api_state.h"
9#include "vm/growable_array.h"
10#include "vm/heap/safepoint.h"
11#include "vm/isolate.h"
12#include "vm/json_stream.h"
13#include "vm/lockers.h"
14#include "vm/log.h"
15#include "vm/message_handler.h"
16#include "vm/native_entry.h"
17#include "vm/object.h"
18#include "vm/object_store.h"
19#include "vm/os_thread.h"
20#include "vm/profiler.h"
21#include "vm/runtime_entry.h"
22#include "vm/service.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
26#include "vm/thread_registry.h"
27#include "vm/timeline.h"
28#include "vm/zone.h"
29
30namespace dart {
31
32#if !defined(PRODUCT)
33DECLARE_FLAG(bool, trace_service);
34DECLARE_FLAG(bool, trace_service_verbose);
35#endif // !defined(PRODUCT)
36
38 // We should cleanly exit any isolate before destruction.
39 ASSERT(isolate_ == nullptr);
40 ASSERT(store_buffer_block_ == nullptr);
41 ASSERT(marking_stack_block_ == nullptr);
42 // There should be no top api scopes at this point.
43 ASSERT(api_top_scope() == nullptr);
44 // Delete the reusable api scope if there is one.
45 if (api_reusable_scope_ != nullptr) {
46 delete api_reusable_scope_;
47 api_reusable_scope_ = nullptr;
48 }
49
50 DO_IF_TSAN(delete tsan_utils_);
51}
52
53#if defined(DEBUG)
54#define REUSABLE_HANDLE_SCOPE_INIT(object) \
55 reusable_##object##_handle_scope_active_(false),
56#else
57#define REUSABLE_HANDLE_SCOPE_INIT(object)
58#endif // defined(DEBUG)
59
60#define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(nullptr),
61
62Thread::Thread(bool is_vm_isolate)
63 : ThreadState(false),
64 write_barrier_mask_(UntaggedObject::kGenerationalBarrierMask),
65 active_exception_(Object::null()),
66 active_stacktrace_(Object::null()),
67 global_object_pool_(ObjectPool::null()),
68 resume_pc_(0),
69 execution_state_(kThreadInNative),
70 safepoint_state_(0),
71 api_top_scope_(nullptr),
72 double_truncate_round_supported_(
73 TargetCPUFeatures::double_truncate_round_supported() ? 1 : 0),
74 tsan_utils_(DO_IF_TSAN(new TsanUtils()) DO_IF_NOT_TSAN(nullptr)),
75 task_kind_(kUnknownTask),
76#if defined(SUPPORT_TIMELINE)
77 dart_stream_(ASSERT_NOTNULL(Timeline::GetDartStream())),
78#else
79 dart_stream_(nullptr),
80#endif
81#if !defined(PRODUCT)
82 service_extension_stream_(ASSERT_NOTNULL(&Service::extension_stream)),
83#else
84 service_extension_stream_(nullptr),
85#endif
86 thread_lock_(),
87 api_reusable_scope_(nullptr),
88 no_callback_scope_depth_(0),
89#if defined(DEBUG)
90 no_safepoint_scope_depth_(0),
91#endif
92 reusable_handles_(),
93 stack_overflow_count_(0),
94 hierarchy_info_(nullptr),
95 type_usage_info_(nullptr),
96 sticky_error_(Error::null()),
99#if defined(USING_SAFE_STACK)
100 saved_safestack_limit_(0),
101#endif
102#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
103 next_(nullptr),
104 heap_sampler_(this) {
105#else
106 next_(nullptr) {
107#endif
108
109#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value) \
110 member_name = default_init_value;
112#undef DEFAULT_INIT
113
114 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
115 write_barrier_wrappers_entry_points_[i] = 0;
116 }
117
118#define DEFAULT_INIT(name) name##_entry_point_ = 0;
120#undef DEFAULT_INIT
121
122#define DEFAULT_INIT(returntype, name, ...) name##_entry_point_ = 0;
124#undef DEFAULT_INIT
125
126 // We cannot initialize the VM constants here for the vm isolate thread
127 // due to boot strapping issues.
128 if (!is_vm_isolate) {
129 InitVMConstants();
130 }
131
132#if defined(DART_HOST_OS_FUCHSIA)
133 next_task_id_ = trace_generate_nonce();
134#else
135 next_task_id_ = Random::GlobalNextUInt64();
136#endif
137
138 memset(&unboxed_runtime_arg_, 0, sizeof(simd128_value_t));
139}
140
141static const double double_nan_constant = NAN;
142
143static const struct ALIGN16 {
144 uint64_t a;
145 uint64_t b;
146} double_negate_constant = {0x8000000000000000ULL, 0x8000000000000000ULL};
147
148static const struct ALIGN16 {
149 uint64_t a;
150 uint64_t b;
151} double_abs_constant = {0x7FFFFFFFFFFFFFFFULL, 0x7FFFFFFFFFFFFFFFULL};
152
153static const struct ALIGN16 {
154 uint32_t a;
155 uint32_t b;
156 uint32_t c;
157 uint32_t d;
158} float_not_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
159
160static const struct ALIGN16 {
161 uint32_t a;
162 uint32_t b;
163 uint32_t c;
164 uint32_t d;
165} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
166
167static const struct ALIGN16 {
168 uint32_t a;
169 uint32_t b;
170 uint32_t c;
171 uint32_t d;
172} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
173
174static const struct ALIGN16 {
175 uint32_t a;
176 uint32_t b;
177 uint32_t c;
178 uint32_t d;
179} float_zerow_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000};
180
181void Thread::InitVMConstants() {
182#if defined(DART_COMPRESSED_POINTERS)
183 heap_base_ = Object::null()->heap_base();
184#endif
185
186#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \
187 ASSERT((init_expr)->IsOldObject());
189#undef ASSERT_VM_HEAP
190
191#define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \
192 ASSERT(member_name == default_init_value); \
193 member_name = (init_expr);
195#undef INIT_VALUE
196
197 for (intptr_t i = 0; i < kNumberOfDartAvailableCpuRegs; ++i) {
198 write_barrier_wrappers_entry_points_[i] =
199 StubCode::WriteBarrierWrappers().EntryPoint() +
201 }
202
203#define INIT_VALUE(name) \
204 ASSERT(name##_entry_point_ == 0); \
205 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
207#undef INIT_VALUE
208
209#define INIT_VALUE(returntype, name, ...) \
210 ASSERT(name##_entry_point_ == 0); \
211 name##_entry_point_ = k##name##RuntimeEntry.GetEntryPoint();
213#undef INIT_VALUE
214
215// Setup the thread specific reusable handles.
216#define REUSABLE_HANDLE_ALLOCATION(object) \
217 this->object##_handle_ = this->AllocateReusableHandle<object>();
219#undef REUSABLE_HANDLE_ALLOCATION
220}
221
222void Thread::set_active_exception(const Object& value) {
223 active_exception_ = value.ptr();
224}
225
226void Thread::set_active_stacktrace(const Object& value) {
227 active_stacktrace_ = value.ptr();
228}
229
230ErrorPtr Thread::sticky_error() const {
231 return sticky_error_;
232}
233
234void Thread::set_sticky_error(const Error& value) {
235 ASSERT(!value.IsNull());
236 sticky_error_ = value.ptr();
237}
238
239void Thread::ClearStickyError() {
240 sticky_error_ = Error::null();
241}
242
243ErrorPtr Thread::StealStickyError() {
244 NoSafepointScope no_safepoint;
245 ErrorPtr return_value = sticky_error_;
246 sticky_error_ = Error::null();
247 return return_value;
248}
249
250const char* Thread::TaskKindToCString(TaskKind kind) {
251 switch (kind) {
252 case kUnknownTask:
253 return "kUnknownTask";
254 case kMutatorTask:
255 return "kMutatorTask";
256 case kCompilerTask:
257 return "kCompilerTask";
258 case kSweeperTask:
259 return "kSweeperTask";
260 case kMarkerTask:
261 return "kMarkerTask";
262 default:
263 UNREACHABLE();
264 return "";
265 }
266}
267
268void Thread::AssertNonMutatorInvariants() {
269 ASSERT(BypassSafepoints());
270 ASSERT(store_buffer_block_ == nullptr);
271 ASSERT(marking_stack_block_ == nullptr);
272 ASSERT(deferred_marking_stack_block_ == nullptr);
273 AssertNonDartMutatorInvariants();
274}
275
276void Thread::AssertNonDartMutatorInvariants() {
277 ASSERT(!IsDartMutatorThread());
278 ASSERT(isolate() == nullptr);
279 ASSERT(isolate_group() != nullptr);
280 ASSERT(task_kind_ != kMutatorTask);
281 DEBUG_ASSERT(!IsAnyReusableHandleScopeActive());
282}
283
284void Thread::AssertEmptyStackInvariants() {
285 ASSERT(zone() == nullptr);
286 ASSERT(top_handle_scope() == nullptr);
287 ASSERT(long_jump_base() == nullptr);
288 ASSERT(top_resource() == nullptr);
289 ASSERT(top_exit_frame_info_ == 0);
290 ASSERT(api_top_scope_ == nullptr);
291 ASSERT(!pending_deopts_.HasPendingDeopts());
292 ASSERT(compiler_state_ == nullptr);
293 ASSERT(hierarchy_info_ == nullptr);
294 ASSERT(type_usage_info_ == nullptr);
295 ASSERT(no_active_isolate_scope_ == nullptr);
296 ASSERT(compiler_timings_ == nullptr);
297 ASSERT(!exit_through_ffi_);
298 ASSERT(runtime_call_deopt_ability_ == RuntimeCallDeoptAbility::kCanLazyDeopt);
299 ASSERT(no_callback_scope_depth_ == 0);
300 ASSERT(force_growth_scope_depth_ == 0);
301 ASSERT(no_reload_scope_depth_ == 0);
302 ASSERT(stopped_mutators_scope_depth_ == 0);
303 ASSERT(stack_overflow_flags_ == 0);
304 DEBUG_ASSERT(!inside_compiler_);
305 DEBUG_ASSERT(no_safepoint_scope_depth_ == 0);
306
307 // Avoid running these asserts for `vm-isolate`.
308 if (active_stacktrace_.untag() != 0) {
309 ASSERT(sticky_error() == Error::null());
310 ASSERT(active_exception_ == Object::null());
311 ASSERT(active_stacktrace_ == Object::null());
312 }
313}
314
315void Thread::AssertEmptyThreadInvariants() {
316 AssertEmptyStackInvariants();
317
318 ASSERT(top_ == 0);
319 ASSERT(end_ == 0);
320 ASSERT(true_end_ == 0);
321 ASSERT(isolate_ == nullptr);
322 ASSERT(isolate_group_ == nullptr);
323 ASSERT(os_thread() == nullptr);
324 ASSERT(vm_tag_ == VMTag::kInvalidTagId);
325 ASSERT(task_kind_ == kUnknownTask);
326 ASSERT(execution_state_ == Thread::kThreadInNative);
327 ASSERT(scheduled_dart_mutator_isolate_ == nullptr);
328
329 ASSERT(write_barrier_mask_ == UntaggedObject::kGenerationalBarrierMask);
330 ASSERT(store_buffer_block_ == nullptr);
331 ASSERT(marking_stack_block_ == nullptr);
332 ASSERT(deferred_marking_stack_block_ == nullptr);
333 ASSERT(!is_unwind_in_progress_);
334
335 ASSERT(saved_stack_limit_ == OSThread::kInvalidStackLimit);
336 ASSERT(stack_limit_.load() == 0);
337 ASSERT(safepoint_state_ == 0);
338
339 // Avoid running these asserts for `vm-isolate`.
340 if (active_stacktrace_.untag() != 0) {
341 ASSERT(field_table_values_ == nullptr);
342 ASSERT(global_object_pool_ == Object::null());
343#define CHECK_REUSABLE_HANDLE(object) ASSERT(object##_handle_->IsNull());
345#undef CHECK_REUSABLE_HANDLE
346 }
347}
348
349bool Thread::HasActiveState() {
350 // Do we have active dart frames?
351 if (top_exit_frame_info() != 0) {
352 return true;
353 }
354 // Do we have active embedder scopes?
355 if (api_top_scope() != nullptr) {
356 return true;
357 }
358 // Do we have active vm zone?
359 if (zone() != nullptr) {
360 return true;
361 }
362 AssertEmptyStackInvariants();
363 return false;
364}
365
366void Thread::EnterIsolate(Isolate* isolate) {
367 const bool is_resumable = isolate->mutator_thread() != nullptr;
368
369 // To let VM's thread pool (if we run on it) know that this thread is
370 // occupying a mutator again (decreases its max size).
371 const bool is_nested_reenter =
372 (is_resumable && isolate->mutator_thread()->top_exit_frame_info() != 0);
373
374 auto group = isolate->group();
375 if (!(is_nested_reenter && isolate->mutator_thread()->OwnsSafepoint())) {
376 group->IncreaseMutatorCount(isolate, is_nested_reenter);
377 }
378
379 // Two threads cannot enter isolate at same time.
380 ASSERT(isolate->scheduled_mutator_thread_ == nullptr);
381
382 // We lazily create a [Thread] structure for the mutator thread, but we'll
383 // reuse it until the death of the isolate.
384 Thread* thread = nullptr;
385 if (is_resumable) {
386 thread = isolate->mutator_thread();
387 ASSERT(thread->scheduled_dart_mutator_isolate_ == isolate);
388 ASSERT(thread->isolate() == isolate);
389 ASSERT(thread->isolate_group() == isolate->group());
390 {
391 // Descheduled isolates are reloadable (if nothing else prevents it).
392 RawReloadParticipationScope enable_reload(thread);
393 thread->ExitSafepoint();
394 }
395 } else {
396 thread = AddActiveThread(group, isolate, /*is_dart_mutator*/ true,
397 /*bypass_safepoint=*/false);
398 thread->SetupState(kMutatorTask);
399 thread->SetupMutatorState(kMutatorTask);
400 thread->SetupDartMutatorState(isolate);
401 }
402
403 isolate->scheduled_mutator_thread_ = thread;
404 ResumeDartMutatorThreadInternal(thread);
405}
406
407static bool ShouldSuspend(bool isolate_shutdown, Thread* thread) {
408 // Must destroy thread.
409 if (isolate_shutdown) return false;
410
411 // Must retain thread.
412 if (thread->HasActiveState() || thread->OwnsSafepoint()) return true;
413
414 // Could do either. When there are few isolates suspend to avoid work
415 // entering and leaving. When there are many isolate, destroy the thread to
416 // avoid the root set growing too big.
417 const intptr_t kMaxSuspendedThreads = 20;
418 auto group = thread->isolate_group();
419 return group->thread_registry()->active_isolates_count() <
420 kMaxSuspendedThreads;
421}
422
423void Thread::ExitIsolate(bool isolate_shutdown) {
424 Thread* thread = Thread::Current();
425 ASSERT(thread != nullptr);
426 ASSERT(thread->IsDartMutatorThread());
427 ASSERT(thread->isolate() != nullptr);
428 ASSERT(thread->isolate_group() != nullptr);
429 ASSERT(thread->isolate()->mutator_thread_ == thread);
430 ASSERT(thread->isolate()->scheduled_mutator_thread_ == thread);
431 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive());
432
433 auto isolate = thread->isolate();
434 auto group = thread->isolate_group();
435
436 thread->set_vm_tag(isolate->is_runnable() ? VMTag::kIdleTagId
437 : VMTag::kLoadWaitTagId);
438 if (thread->sticky_error() != Error::null()) {
439 ASSERT(isolate->sticky_error_ == Error::null());
440 isolate->sticky_error_ = thread->StealStickyError();
441 }
442
443 isolate->scheduled_mutator_thread_ = nullptr;
444
445 // Right now we keep the [Thread] object across the isolate's lifetime. This
446 // makes entering/exiting quite fast as it mainly boils down to safepoint
447 // transitions. Though any operation that walks over all active threads will
448 // see this thread as well (e.g. safepoint operations).
449 const bool is_nested_exit = thread->top_exit_frame_info() != 0;
450 if (ShouldSuspend(isolate_shutdown, thread)) {
451 const auto tag =
452 isolate->is_runnable() ? VMTag::kIdleTagId : VMTag::kLoadWaitTagId;
453 SuspendDartMutatorThreadInternal(thread, tag);
454 {
455 // Descheduled isolates are reloadable (if nothing else prevents it).
456 RawReloadParticipationScope enable_reload(thread);
457 thread->EnterSafepoint();
458 }
459 thread->set_execution_state(Thread::kThreadInNative);
460 } else {
461 thread->ResetDartMutatorState(isolate);
462 thread->ResetMutatorState();
463 thread->ResetState();
464 SuspendDartMutatorThreadInternal(thread, VMTag::kInvalidTagId);
465 FreeActiveThread(thread, /*bypass_safepoint=*/false);
466 }
467
468 // To let VM's thread pool (if we run on it) know that this thread is
469 // occupying a mutator again (decreases its max size).
470 ASSERT(!(isolate_shutdown && is_nested_exit));
471 if (!(is_nested_exit && thread->OwnsSafepoint())) {
472 group->DecreaseMutatorCount(isolate, is_nested_exit);
473 }
474}
475
476bool Thread::EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
477 TaskKind kind,
478 bool bypass_safepoint) {
479 Thread* thread = AddActiveThread(isolate_group, nullptr,
480 /*is_dart_mutator=*/false, bypass_safepoint);
481 if (thread != nullptr) {
482 thread->SetupState(kind);
483 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
484 // parallel scavenger threads write to the [Thread]s storebuffer)
485 thread->SetupMutatorState(kind);
486 ResumeThreadInternal(thread);
487
489 return true;
490 }
491 return false;
492}
493
494void Thread::ExitIsolateGroupAsHelper(bool bypass_safepoint) {
495 Thread* thread = Thread::Current();
497
498 // Even if [bypass_safepoint] is true, a thread may need mutator state (e.g.
499 // parallel scavenger threads write to the [Thread]s storebuffer)
500 thread->ResetMutatorState();
501 thread->ResetState();
502 SuspendThreadInternal(thread, VMTag::kInvalidTagId);
503 FreeActiveThread(thread, bypass_safepoint);
504}
505
506bool Thread::EnterIsolateGroupAsNonMutator(IsolateGroup* isolate_group,
507 TaskKind kind) {
508 Thread* thread =
509 AddActiveThread(isolate_group, nullptr,
510 /*is_dart_mutator=*/false, /*bypass_safepoint=*/true);
511 if (thread != nullptr) {
512 thread->SetupState(kind);
513 ResumeThreadInternal(thread);
514
516 return true;
517 }
518 return false;
519}
520
521void Thread::ExitIsolateGroupAsNonMutator() {
522 Thread* thread = Thread::Current();
523 ASSERT(thread != nullptr);
525
526 thread->ResetState();
527 SuspendThreadInternal(thread, VMTag::kInvalidTagId);
528 FreeActiveThread(thread, /*bypass_safepoint=*/true);
529}
530
531void Thread::ResumeDartMutatorThreadInternal(Thread* thread) {
532 ResumeThreadInternal(thread);
533 if (Dart::vm_isolate() != nullptr &&
534 thread->isolate() != Dart::vm_isolate()) {
535#if defined(USING_SIMULATOR)
536 thread->SetStackLimit(Simulator::Current()->overflow_stack_limit());
537#else
538 thread->SetStackLimit(OSThread::Current()->overflow_stack_limit());
539#endif
540 }
541}
542
543void Thread::SuspendDartMutatorThreadInternal(Thread* thread,
544 VMTag::VMTagId tag) {
545 thread->ClearStackLimit();
546 SuspendThreadInternal(thread, tag);
547}
548
549void Thread::ResumeThreadInternal(Thread* thread) {
550 ASSERT(!thread->IsAtSafepoint());
551 ASSERT(thread->isolate_group() != nullptr);
552 ASSERT(thread->execution_state() == Thread::kThreadInNative);
553 ASSERT(thread->vm_tag() == VMTag::kInvalidTagId ||
554 thread->vm_tag() == VMTag::kIdleTagId ||
555 thread->vm_tag() == VMTag::kLoadWaitTagId);
556
557 thread->set_vm_tag(VMTag::kVMTagId);
558 thread->set_execution_state(Thread::kThreadInVM);
559
560 OSThread* os_thread = OSThread::Current();
561 thread->set_os_thread(os_thread);
562 os_thread->set_thread(thread);
563 Thread::SetCurrent(thread);
564 NOT_IN_PRODUCT(os_thread->EnableThreadInterrupts());
565
566#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
567 thread->heap_sampler().Initialize();
568#endif
569}
570
571void Thread::SuspendThreadInternal(Thread* thread, VMTag::VMTagId tag) {
572 thread->heap()->new_space()->AbandonRemainingTLAB(thread);
573
574#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
575 thread->heap_sampler().Cleanup();
576#endif
577
578 OSThread* os_thread = thread->os_thread();
579 ASSERT(os_thread != nullptr);
580 NOT_IN_PRODUCT(os_thread->DisableThreadInterrupts());
581 os_thread->set_thread(nullptr);
582 OSThread::SetCurrent(os_thread);
583 thread->set_os_thread(nullptr);
584
585 thread->set_vm_tag(tag);
586}
587
588Thread* Thread::AddActiveThread(IsolateGroup* group,
589 Isolate* isolate,
590 bool is_dart_mutator,
591 bool bypass_safepoint) {
592 // NOTE: We cannot just use `Dart::vm_isolate() == this` here, since during
593 // VM startup it might not have been set at this point.
594 const bool is_vm_isolate =
595 Dart::vm_isolate() == nullptr || Dart::vm_isolate() == isolate;
596
597 auto thread_registry = group->thread_registry();
598 auto safepoint_handler = group->safepoint_handler();
599 MonitorLocker ml(thread_registry->threads_lock());
600
601 if (!bypass_safepoint) {
602 while (safepoint_handler->AnySafepointInProgressLocked()) {
603 ml.Wait();
604 }
605 }
606
607 Thread* thread = thread_registry->GetFreeThreadLocked(is_vm_isolate);
608 thread->AssertEmptyThreadInvariants();
609
610 thread->isolate_ = isolate; // May be nullptr.
611 thread->isolate_group_ = group;
612 thread->scheduled_dart_mutator_isolate_ = isolate;
613
614 // We start at being at-safepoint (in case any safepoint operation is
615 // in-progress, we'll check into it once leaving the safepoint)
616 thread->set_safepoint_state(Thread::SetBypassSafepoints(bypass_safepoint, 0));
617 thread->runtime_call_deopt_ability_ = RuntimeCallDeoptAbility::kCanLazyDeopt;
618 ASSERT(!thread->IsAtSafepoint());
619
620 ASSERT(thread->saved_stack_limit_ == OSThread::kInvalidStackLimit);
621 return thread;
622}
623
624void Thread::FreeActiveThread(Thread* thread, bool bypass_safepoint) {
625 ASSERT(!thread->HasActiveState());
626 ASSERT(!thread->IsAtSafepoint());
627
628 if (!bypass_safepoint) {
629 // GC helper threads don't have any handle state to clear, and the GC might
630 // be currently visiting thread state. If this is not a GC helper, the GC
631 // can't be visiting thread state because its waiting for this thread to
632 // check in.
633 thread->ClearReusableHandles();
634 }
635
636 auto group = thread->isolate_group_;
637 auto thread_registry = group->thread_registry();
638
639 MonitorLocker ml(thread_registry->threads_lock());
640
641 if (!bypass_safepoint) {
642 // There may be a pending safepoint operation on another thread that is
643 // waiting for us to check-in.
644 //
645 // Though notice we're holding the thread registrys' threads_lock, which
646 // means if this other thread runs code as part of a safepoint operation it
647 // will still wait for us to finish here before it tries to iterate the
648 // active mutators (e.g. when GC starts/stops incremental marking).
649 //
650 // The thread is empty and the corresponding isolate (if any) is therefore
651 // at event-loop boundary (or shutting down). We participate in reload in
652 // those scenarios.
653 //
654 // (It may be that an active [RELOAD_OPERATION_SCOPE] sent an OOB message to
655 // this isolate but it didn't handle the OOB due to shutting down, so we'll
656 // still have to update the reloading thread that it's ok to continue)
657 RawReloadParticipationScope enable_reload(thread);
658 thread->EnterSafepoint();
659 }
660
661 thread->isolate_ = nullptr;
662 thread->isolate_group_ = nullptr;
663 thread->scheduled_dart_mutator_isolate_ = nullptr;
664 thread->set_execution_state(Thread::kThreadInNative);
665 thread->stack_limit_.store(0);
666 thread->safepoint_state_ = 0;
667
668 thread->AssertEmptyThreadInvariants();
669 thread_registry->ReturnThreadLocked(thread);
670}
671
672void Thread::ReleaseStoreBuffer() {
673 ASSERT(IsAtSafepoint() || OwnsSafepoint());
674 if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) {
675 return; // Nothing to release.
676 }
677 // Prevent scheduling another GC by ignoring the threshold.
678 StoreBufferRelease(StoreBuffer::kIgnoreThreshold);
679 // Make sure to get an *empty* block; the isolate needs all entries
680 // at GC time.
681 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires.
682 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
683}
684
685void Thread::SetStackLimit(uword limit) {
686 // The thread setting the stack limit is not necessarily the thread which
687 // the stack limit is being set on.
688 MonitorLocker ml(&thread_lock_);
689 if (!HasScheduledInterrupts()) {
690 // No interrupt pending, set stack_limit_ too.
691 stack_limit_.store(limit);
692 }
693 saved_stack_limit_ = limit;
694}
695
696void Thread::ClearStackLimit() {
697 SetStackLimit(OSThread::kInvalidStackLimit);
698}
699
700static bool IsInterruptLimit(uword limit) {
701 return (limit & ~Thread::kInterruptsMask) ==
702 (kInterruptStackLimit & ~Thread::kInterruptsMask);
703}
704
705void Thread::ScheduleInterrupts(uword interrupt_bits) {
706 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask.
707
708 uword old_limit = stack_limit_.load();
709 uword new_limit;
710 do {
711 if (IsInterruptLimit(old_limit)) {
712 new_limit = old_limit | interrupt_bits;
713 } else {
714 new_limit = (kInterruptStackLimit & ~kInterruptsMask) | interrupt_bits;
715 }
716 } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
717}
718
719uword Thread::GetAndClearInterrupts() {
720 uword interrupt_bits = 0;
721 uword old_limit = stack_limit_.load();
722 uword new_limit = saved_stack_limit_;
723 do {
724 if (IsInterruptLimit(old_limit)) {
725 interrupt_bits = interrupt_bits | (old_limit & kInterruptsMask);
726 } else {
727 return interrupt_bits;
728 }
729 } while (!stack_limit_.compare_exchange_weak(old_limit, new_limit));
730
731 return interrupt_bits;
732}
733
734ErrorPtr Thread::HandleInterrupts() {
735 uword interrupt_bits = GetAndClearInterrupts();
736 if ((interrupt_bits & kVMInterrupt) != 0) {
737 CheckForSafepoint();
738 if (isolate_group()->store_buffer()->Overflowed()) {
739 // Evacuate: If the popular store buffer targets are copied instead of
740 // promoted, the store buffer won't shrink and a second scavenge will
741 // occur that does promote them.
742 heap()->CollectGarbage(this, GCType::kEvacuate, GCReason::kStoreBuffer);
743 }
744 heap()->CheckFinalizeMarking(this);
745
746#if !defined(PRODUCT)
747 if (isolate()->TakeHasCompletedBlocks()) {
748 Profiler::ProcessCompletedBlocks(isolate());
749 }
750#endif // !defined(PRODUCT)
751
752#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
753 HeapProfileSampler& sampler = heap_sampler();
754 if (sampler.ShouldSetThreadSamplingInterval()) {
756 }
757 if (sampler.ShouldUpdateThreadEnable()) {
758 sampler.UpdateThreadEnable();
759 }
760#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
761 }
762 if ((interrupt_bits & kMessageInterrupt) != 0) {
764 isolate()->message_handler()->HandleOOBMessages();
765 if (status != MessageHandler::kOK) {
766 // False result from HandleOOBMessages signals that the isolate should
767 // be terminating.
768 if (FLAG_trace_isolates) {
769 OS::PrintErr(
770 "[!] Terminating isolate due to OOB message:\n"
771 "\tisolate: %s\n",
772 isolate()->name());
773 }
774 return StealStickyError();
775 }
776 }
777 return Error::null();
778}
779
780uword Thread::GetAndClearStackOverflowFlags() {
781 uword stack_overflow_flags = stack_overflow_flags_;
782 stack_overflow_flags_ = 0;
783 return stack_overflow_flags;
784}
785
786void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) {
787 StoreBufferRelease(policy);
788 StoreBufferAcquire();
789}
790
791void Thread::StoreBufferAddObject(ObjectPtr obj) {
792 ASSERT(this == Thread::Current());
793 store_buffer_block_->Push(obj);
794 if (store_buffer_block_->IsFull()) {
795 StoreBufferBlockProcess(StoreBuffer::kCheckThreshold);
796 }
797}
798
799void Thread::StoreBufferAddObjectGC(ObjectPtr obj) {
800 store_buffer_block_->Push(obj);
801 if (store_buffer_block_->IsFull()) {
802 StoreBufferBlockProcess(StoreBuffer::kIgnoreThreshold);
803 }
804}
805
806void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) {
807 StoreBufferBlock* block = store_buffer_block_;
808 store_buffer_block_ = nullptr;
809 isolate_group()->store_buffer()->PushBlock(block, policy);
810}
811
812void Thread::StoreBufferAcquire() {
813 store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
814}
815
816void Thread::MarkingStackBlockProcess() {
817 MarkingStackRelease();
818 MarkingStackAcquire();
819}
820
821void Thread::DeferredMarkingStackBlockProcess() {
822 DeferredMarkingStackRelease();
823 DeferredMarkingStackAcquire();
824}
825
826void Thread::MarkingStackAddObject(ObjectPtr obj) {
827 marking_stack_block_->Push(obj);
828 if (marking_stack_block_->IsFull()) {
829 MarkingStackBlockProcess();
830 }
831}
832
833void Thread::DeferredMarkingStackAddObject(ObjectPtr obj) {
834 deferred_marking_stack_block_->Push(obj);
835 if (deferred_marking_stack_block_->IsFull()) {
836 DeferredMarkingStackBlockProcess();
837 }
838}
839
840void Thread::MarkingStackRelease() {
841 MarkingStackBlock* block = marking_stack_block_;
842 marking_stack_block_ = nullptr;
843 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask;
844 isolate_group()->marking_stack()->PushBlock(block);
845}
846
847void Thread::MarkingStackAcquire() {
848 marking_stack_block_ = isolate_group()->marking_stack()->PopEmptyBlock();
849 write_barrier_mask_ = UntaggedObject::kGenerationalBarrierMask |
850 UntaggedObject::kIncrementalBarrierMask;
851}
852
853void Thread::MarkingStackFlush() {
854 isolate_group()->marking_stack()->PushBlock(marking_stack_block_);
855 marking_stack_block_ = isolate_group()->marking_stack()->PopEmptyBlock();
856}
857
858void Thread::DeferredMarkingStackRelease() {
859 MarkingStackBlock* block = deferred_marking_stack_block_;
860 deferred_marking_stack_block_ = nullptr;
861 isolate_group()->deferred_marking_stack()->PushBlock(block);
862}
863
864void Thread::DeferredMarkingStackAcquire() {
865 deferred_marking_stack_block_ =
866 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
867}
868
869void Thread::DeferredMarkingStackFlush() {
870 isolate_group()->deferred_marking_stack()->PushBlock(
871 deferred_marking_stack_block_);
872 deferred_marking_stack_block_ =
873 isolate_group()->deferred_marking_stack()->PopEmptyBlock();
874}
875
876Heap* Thread::heap() const {
877 return isolate_group_->heap();
878}
879
880bool Thread::IsExecutingDartCode() const {
881 return (top_exit_frame_info() == 0) && VMTag::IsDartTag(vm_tag());
882}
883
884bool Thread::HasExitedDartCode() const {
885 return (top_exit_frame_info() != 0) && !VMTag::IsDartTag(vm_tag());
886}
887
888template <class C>
889C* Thread::AllocateReusableHandle() {
890 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle());
891 C::initializeHandle(handle, C::null());
892 return handle;
893}
894
895void Thread::ClearReusableHandles() {
896#define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null();
898#undef CLEAR_REUSABLE_HANDLE
899}
900
901void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
902 ValidationPolicy validation_policy) {
903 ASSERT(visitor != nullptr);
904
905 if (zone() != nullptr) {
906 zone()->VisitObjectPointers(visitor);
907 }
908
909 // Visit objects in thread specific handles area.
910 reusable_handles_.VisitObjectPointers(visitor);
911
912 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&global_object_pool_));
913 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_exception_));
914 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&active_stacktrace_));
915 visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&sticky_error_));
916
917 // Visit the api local scope as it has all the api local handles.
918 ApiLocalScope* scope = api_top_scope_;
919 while (scope != nullptr) {
920 scope->local_handles()->VisitObjectPointers(visitor);
921 scope = scope->previous();
922 }
923
924 // Only the mutator thread can run Dart code.
925 if (IsDartMutatorThread()) {
926 // The MarkTask, which calls this method, can run on a different thread. We
927 // therefore assume the mutator is at a safepoint and we can iterate its
928 // stack.
929 // TODO(vm-team): It would be beneficial to be able to ask the mutator
930 // thread whether it is in fact blocked at the moment (at a "safepoint") so
931 // we can safely iterate its stack.
932 //
933 // Unfortunately we cannot use `this->IsAtSafepoint()` here because that
934 // will return `false` even though the mutator thread is waiting for mark
935 // tasks (which iterate its stack) to finish.
936 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
937 StackFrameIterator::kAllowCrossThreadIteration;
938
939 // Iterate over all the stack frames and visit objects on the stack.
940 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy,
941 this, cross_thread_policy);
942 StackFrame* frame = frames_iterator.NextFrame();
943 visitor->set_gc_root_type("frame");
944 while (frame != nullptr) {
945 frame->VisitObjectPointers(visitor);
946 frame = frames_iterator.NextFrame();
947 }
948 visitor->clear_gc_root_type();
949 } else {
950 // We are not on the mutator thread.
951 RELEASE_ASSERT(top_exit_frame_info() == 0);
952 }
953}
954
956 public:
958 Thread* thread,
959 Thread::RestoreWriteBarrierInvariantOp op)
960 : ObjectPointerVisitor(group),
961 thread_(thread),
962 current_(Thread::Current()),
963 op_(op) {}
964
965 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
966 for (; first != last + 1; first++) {
967 ObjectPtr obj = *first;
968 // Stores into new-space objects don't need a write barrier.
969 if (obj->IsImmediateObject()) continue;
970
971 // To avoid adding too much work into the remembered set, skip large
972 // arrays. Write barrier elimination will not remove the barrier
973 // if we can trigger GC between array allocation and store.
974 if (obj->GetClassId() == kArrayCid) {
975 const auto length = Smi::Value(Array::RawCast(obj)->untag()->length());
976 if (length > Array::kMaxLengthForWriteBarrierElimination) {
977 continue;
978 }
979 }
980
981 // Dart code won't store into VM-internal objects except Contexts and
982 // UnhandledExceptions. This assumption is checked by an assertion in
983 // WriteBarrierElimination::UpdateVectorForBlock.
984 if (!obj->IsDartInstance() && !obj->IsContext() &&
985 !obj->IsUnhandledException())
986 continue;
987
988 // Dart code won't store into canonical instances.
989 if (obj->untag()->IsCanonical()) continue;
990
991 // Objects in the VM isolate heap are immutable and won't be
992 // stored into. Check this condition last because there's no bit
993 // in the header for it.
994 if (obj->untag()->InVMIsolateHeap()) continue;
995
996 switch (op_) {
997 case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
998 if (obj->IsOldObject()) {
999 obj->untag()->EnsureInRememberedSet(current_);
1000 }
1001 if (current_->is_marking()) {
1002 current_->DeferredMarkingStackAddObject(obj);
1003 }
1004 break;
1005 case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
1006 // Re-scan obj when finalizing marking.
1007 ASSERT(current_->is_marking());
1008 current_->DeferredMarkingStackAddObject(obj);
1009 break;
1010 }
1011 }
1012 }
1013
1014#if defined(DART_COMPRESSED_POINTERS)
1015 void VisitCompressedPointers(uword heap_base,
1016 CompressedObjectPtr* first,
1017 CompressedObjectPtr* last) override {
1018 UNREACHABLE(); // Stack slots are not compressed.
1019 }
1020#endif
1021
1022 private:
1023 Thread* const thread_;
1024 Thread* const current_;
1025 Thread::RestoreWriteBarrierInvariantOp op_;
1026};
1027
1028// Write barrier elimination assumes that all live temporaries will be
1029// in the remembered set after a scavenge triggered by a non-Dart-call
1030// instruction (see Instruction::CanCallDart()), and additionally they will be
1031// in the deferred marking stack if concurrent marking started. Specifically,
1032// this includes any instruction which will always create an exit frame
1033// below the current frame before any other Dart frames.
1034//
1035// Therefore, to support this assumption, we scan the stack after a scavenge
1036// or when concurrent marking begins and add all live temporaries in
1037// Dart frames preceding an exit frame to the store buffer or deferred
1038// marking stack.
1039void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
1040 ASSERT(IsAtSafepoint() || OwnsGCSafepoint() || this == Thread::Current());
1041
1042 const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
1043 StackFrameIterator::kAllowCrossThreadIteration;
1044 StackFrameIterator frames_iterator(top_exit_frame_info(),
1045 ValidationPolicy::kDontValidateFrames,
1046 this, cross_thread_policy);
1047 RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
1048 ObjectStore* object_store = isolate_group()->object_store();
1049 bool scan_next_dart_frame = false;
1050 for (StackFrame* frame = frames_iterator.NextFrame(); frame != nullptr;
1051 frame = frames_iterator.NextFrame()) {
1052 if (frame->IsExitFrame()) {
1053 scan_next_dart_frame = true;
1054 } else if (frame->IsEntryFrame()) {
1055 /* Continue searching. */
1056 } else if (frame->IsStubFrame()) {
1057 const uword pc = frame->pc();
1058 if (Code::ContainsInstructionAt(
1059 object_store->init_late_static_field_stub(), pc) ||
1060 Code::ContainsInstructionAt(
1061 object_store->init_late_final_static_field_stub(), pc) ||
1062 Code::ContainsInstructionAt(
1063 object_store->init_late_instance_field_stub(), pc) ||
1064 Code::ContainsInstructionAt(
1065 object_store->init_late_final_instance_field_stub(), pc)) {
1066 scan_next_dart_frame = true;
1067 }
1068 } else {
1069 ASSERT(frame->IsDartFrame(/*validate=*/false));
1070 if (scan_next_dart_frame) {
1071 frame->VisitObjectPointers(&visitor);
1072 }
1073 scan_next_dart_frame = false;
1074 }
1075 }
1076}
1077
1078void Thread::DeferredMarkLiveTemporaries() {
1079 RestoreWriteBarrierInvariant(
1080 RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
1081}
1082
1083void Thread::RememberLiveTemporaries() {
1084 RestoreWriteBarrierInvariant(
1085 RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
1086}
1087
1088bool Thread::CanLoadFromThread(const Object& object) {
1089 // In order to allow us to use assembler helper routines with non-[Code]
1090 // objects *before* stubs are initialized, we only loop ver the stubs if the
1091 // [object] is in fact a [Code] object.
1092 if (object.IsCode()) {
1093#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1094 if (object.ptr() == expr) { \
1095 return true; \
1096 }
1098#undef CHECK_OBJECT
1099 }
1100
1101 // For non [Code] objects we check if the object equals to any of the cached
1102 // non-stub entries.
1103#define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \
1104 if (object.ptr() == expr) { \
1105 return true; \
1106 }
1108#undef CHECK_OBJECT
1109 return false;
1110}
1111
1112intptr_t Thread::OffsetFromThread(const Object& object) {
1113 // In order to allow us to use assembler helper routines with non-[Code]
1114 // objects *before* stubs are initialized, we only loop ver the stubs if the
1115 // [object] is in fact a [Code] object.
1116 if (object.IsCode()) {
1117#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1118 ASSERT((expr)->untag()->InVMIsolateHeap()); \
1119 if (object.ptr() == expr) { \
1120 return Thread::member_name##offset(); \
1121 }
1123#undef COMPUTE_OFFSET
1124 }
1125
1126 // For non [Code] objects we check if the object equals to any of the cached
1127 // non-stub entries.
1128#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1129 if (object.ptr() == expr) { \
1130 return Thread::member_name##offset(); \
1131 }
1133#undef COMPUTE_OFFSET
1134
1135 UNREACHABLE();
1136 return -1;
1137}
1138
1139bool Thread::ObjectAtOffset(intptr_t offset, Object* object) {
1140 if (Isolate::Current() == Dart::vm_isolate()) {
1141 // --disassemble-stubs runs before all the references through
1142 // thread have targets
1143 return false;
1144 }
1145
1146#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \
1147 if (Thread::member_name##offset() == offset) { \
1148 *object = expr; \
1149 return true; \
1150 }
1152#undef COMPUTE_OFFSET
1153 return false;
1154}
1155
1156intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) {
1157#define COMPUTE_OFFSET(name) \
1158 if (runtime_entry == &k##name##RuntimeEntry) { \
1159 return Thread::name##_entry_point_offset(); \
1160 }
1162#undef COMPUTE_OFFSET
1163
1164#define COMPUTE_OFFSET(returntype, name, ...) \
1165 if (runtime_entry == &k##name##RuntimeEntry) { \
1166 return Thread::name##_entry_point_offset(); \
1167 }
1169#undef COMPUTE_OFFSET
1170
1171 UNREACHABLE();
1172 return -1;
1173}
1174
1175#if defined(DEBUG)
1176bool Thread::TopErrorHandlerIsSetJump() const {
1177 if (long_jump_base() == nullptr) return false;
1178 if (top_exit_frame_info_ == 0) return true;
1179#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1180 // False positives: simulator stack and native stack are unordered.
1181 return true;
1182#else
1183 return reinterpret_cast<uword>(long_jump_base()) < top_exit_frame_info_;
1184#endif
1185}
1186
1187bool Thread::TopErrorHandlerIsExitFrame() const {
1188 if (top_exit_frame_info_ == 0) return false;
1189 if (long_jump_base() == nullptr) return true;
1190#if defined(USING_SIMULATOR) || defined(USING_SAFE_STACK)
1191 // False positives: simulator stack and native stack are unordered.
1192 return true;
1193#else
1194 return top_exit_frame_info_ < reinterpret_cast<uword>(long_jump_base());
1195#endif
1196}
1197#endif // defined(DEBUG)
1198
1199bool Thread::IsValidHandle(Dart_Handle object) const {
1200 return IsValidLocalHandle(object) || IsValidZoneHandle(object) ||
1201 IsValidScopedHandle(object);
1202}
1203
1204bool Thread::IsValidLocalHandle(Dart_Handle object) const {
1205 ApiLocalScope* scope = api_top_scope_;
1206 while (scope != nullptr) {
1207 if (scope->local_handles()->IsValidHandle(object)) {
1208 return true;
1209 }
1210 scope = scope->previous();
1211 }
1212 return false;
1213}
1214
1215intptr_t Thread::CountLocalHandles() const {
1216 intptr_t total = 0;
1217 ApiLocalScope* scope = api_top_scope_;
1218 while (scope != nullptr) {
1219 total += scope->local_handles()->CountHandles();
1220 scope = scope->previous();
1221 }
1222 return total;
1223}
1224
1225int Thread::ZoneSizeInBytes() const {
1226 int total = 0;
1227 ApiLocalScope* scope = api_top_scope_;
1228 while (scope != nullptr) {
1229 total += scope->zone()->SizeInBytes();
1230 scope = scope->previous();
1231 }
1232 return total;
1233}
1234
1235void Thread::EnterApiScope() {
1236 ASSERT(MayAllocateHandles());
1237 ApiLocalScope* new_scope = api_reusable_scope();
1238 if (new_scope == nullptr) {
1239 new_scope = new ApiLocalScope(api_top_scope(), top_exit_frame_info());
1240 ASSERT(new_scope != nullptr);
1241 } else {
1242 new_scope->Reinit(this, api_top_scope(), top_exit_frame_info());
1243 set_api_reusable_scope(nullptr);
1244 }
1245 set_api_top_scope(new_scope); // New scope is now the top scope.
1246}
1247
1248void Thread::ExitApiScope() {
1249 ASSERT(MayAllocateHandles());
1250 ApiLocalScope* scope = api_top_scope();
1251 ApiLocalScope* reusable_scope = api_reusable_scope();
1252 set_api_top_scope(scope->previous()); // Reset top scope to previous.
1253 if (reusable_scope == nullptr) {
1254 scope->Reset(this); // Reset the old scope which we just exited.
1255 set_api_reusable_scope(scope);
1256 } else {
1257 ASSERT(reusable_scope != scope);
1258 delete scope;
1259 }
1260}
1261
1262void Thread::UnwindScopes(uword stack_marker) {
1263 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated
1264 // under the same top_exit_frame_info.
1265 ApiLocalScope* scope = api_top_scope_;
1266 while (scope != nullptr && scope->stack_marker() != 0 &&
1267 scope->stack_marker() == stack_marker) {
1268 api_top_scope_ = scope->previous();
1269 delete scope;
1270 scope = api_top_scope_;
1271 }
1272}
1273
1274void Thread::EnterSafepointUsingLock() {
1275 isolate_group()->safepoint_handler()->EnterSafepointUsingLock(this);
1276}
1277
1278void Thread::ExitSafepointUsingLock() {
1279 isolate_group()->safepoint_handler()->ExitSafepointUsingLock(this);
1280}
1281
1282void Thread::BlockForSafepoint() {
1283 isolate_group()->safepoint_handler()->BlockForSafepoint(this);
1284}
1285
1286bool Thread::OwnsGCSafepoint() const {
1287 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1288 this) <= SafepointLevel::kGCAndDeopt;
1289}
1290
1291bool Thread::OwnsDeoptSafepoint() const {
1292 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1293 this) == SafepointLevel::kGCAndDeopt;
1294}
1295
1296bool Thread::OwnsReloadSafepoint() const {
1297 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1298 this) <= SafepointLevel::kGCAndDeoptAndReload;
1299}
1300
1301bool Thread::OwnsSafepoint() const {
1302 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1303 this) != SafepointLevel::kNoSafepoint;
1304}
1305
1306bool Thread::CanAcquireSafepointLocks() const {
1307 // A thread may acquire locks and then enter a safepoint operation (e.g.
1308 // holding program lock, allocating objects which triggers GC).
1309 //
1310 // So if this code is called inside safepoint operation, we generally have to
1311 // assume other threads may hold locks and are blocked on the safepoint,
1312 // meaning we cannot hold safepoint and acquire locks (deadlock!).
1313 //
1314 // Though if we own a reload safepoint operation it means all other mutators
1315 // are blocked in very specific places, where we know no locks are held. As
1316 // such we allow the current thread to acquire locks.
1317 //
1318 // Example: We own reload safepoint operation, load kernel, which allocates
1319 // symbols, where the symbol implementation acquires the symbol lock (we know
1320 // other mutators at reload safepoint do not hold symbol lock).
1321 return isolate_group()->safepoint_handler()->InnermostSafepointOperation(
1322 this) >= SafepointLevel::kGCAndDeoptAndReload;
1323}
1324
1325void Thread::SetupState(TaskKind kind) {
1326 task_kind_ = kind;
1327}
1328
1329void Thread::ResetState() {
1330 task_kind_ = kUnknownTask;
1331 vm_tag_ = VMTag::kInvalidTagId;
1332}
1333
1334void Thread::SetupMutatorState(TaskKind kind) {
1335 ASSERT(store_buffer_block_ == nullptr);
1336
1337 if (isolate_group()->marking_stack() != nullptr) {
1338 // Concurrent mark in progress. Enable barrier for this thread.
1339 MarkingStackAcquire();
1340 DeferredMarkingStackAcquire();
1341 }
1342
1343 // TODO(koda): Use StoreBufferAcquire once we properly flush
1344 // before Scavenge.
1345 if (kind == kMutatorTask) {
1346 StoreBufferAcquire();
1347 } else {
1348 store_buffer_block_ = isolate_group()->store_buffer()->PopEmptyBlock();
1349 }
1350}
1351
1352void Thread::ResetMutatorState() {
1353 ASSERT(execution_state() == Thread::kThreadInVM);
1354 ASSERT(store_buffer_block_ != nullptr);
1355
1356 if (is_marking()) {
1357 MarkingStackRelease();
1358 DeferredMarkingStackRelease();
1359 }
1360 StoreBufferRelease();
1361}
1362
1363void Thread::SetupDartMutatorState(Isolate* isolate) {
1364 field_table_values_ = isolate->field_table_->table();
1365 isolate->mutator_thread_ = this;
1366
1367 SetupDartMutatorStateDependingOnSnapshot(isolate->group());
1368}
1369
1370void Thread::SetupDartMutatorStateDependingOnSnapshot(IsolateGroup* group) {
1371 // The snapshot may or may not have been read at this point (on isolate group
1372 // creation, the first isolate is first time entered before the snapshot is
1373 // read)
1374 //
1375 // So we call this code explicitly after snapshot reading time and whenever we
1376 // enter an isolate with a new thread object.
1377#if defined(DART_PRECOMPILED_RUNTIME)
1378 auto object_store = group->object_store();
1379 if (object_store != nullptr) {
1380 global_object_pool_ = object_store->global_object_pool();
1381
1382 auto dispatch_table = group->dispatch_table();
1383 if (dispatch_table != nullptr) {
1384 dispatch_table_array_ = dispatch_table->ArrayOrigin();
1385 }
1386#define INIT_ENTRY_POINT(name) \
1387 if (object_store->name() != Object::null()) { \
1388 name##_entry_point_ = Function::EntryPointOf(object_store->name()); \
1389 }
1390 CACHED_FUNCTION_ENTRY_POINTS_LIST(INIT_ENTRY_POINT)
1391#undef INIT_ENTRY_POINT
1392 }
1393#endif // defined(DART_PRECOMPILED_RUNTIME)
1394}
1395
1396void Thread::ResetDartMutatorState(Isolate* isolate) {
1397 ASSERT(execution_state() == Thread::kThreadInVM);
1398
1399 isolate->mutator_thread_ = nullptr;
1400 is_unwind_in_progress_ = false;
1401
1402 field_table_values_ = nullptr;
1403 ONLY_IN_PRECOMPILED(global_object_pool_ = ObjectPool::null());
1404 ONLY_IN_PRECOMPILED(dispatch_table_array_ = nullptr);
1405}
1406
1407#if !defined(PRODUCT)
1408DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread)
1409 : StackResource(thread) {
1410 if (thread != nullptr) {
1411 OSThread* os_thread = thread->os_thread();
1412 ASSERT(os_thread != nullptr);
1413 os_thread->DisableThreadInterrupts();
1414 }
1415}
1416
1418 if (thread() != nullptr) {
1419 OSThread* os_thread = thread()->os_thread();
1420 ASSERT(os_thread != nullptr);
1421 os_thread->EnableThreadInterrupts();
1422 }
1423}
1424#endif
1425
1427#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1428 thread->no_reload_scope_depth_++;
1429 ASSERT(thread->no_reload_scope_depth_ >= 0);
1430#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1431}
1432
1434#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1435 thread()->no_reload_scope_depth_ -= 1;
1436 ASSERT(thread()->no_reload_scope_depth_ >= 0);
1437 auto isolate = thread()->isolate();
1438 const intptr_t state = thread()->safepoint_state();
1439
1440 if (thread()->no_reload_scope_depth_ == 0) {
1441 // If we were asked to go to a reload safepoint & block for a reload
1442 // safepoint operation on another thread - *while* being inside
1443 // [NoReloadScope] - we may have handled & ignored the OOB message telling
1444 // us to reload.
1445 //
1446 // Since we're exiting now the [NoReloadScope], we'll make another OOB
1447 // reload request message to ourselves, which will be handled in
1448 // well-defined place where we can perform reload.
1449 if (isolate != nullptr &&
1453 }
1454 }
1455#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
1456}
1457
1458} // namespace dart
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define ASSERT_NOTNULL(ptr)
Definition assert.h:323
uword stack_marker() const
void Reinit(Thread *thread, ApiLocalScope *previous, uword stack_marker)
LocalHandles * local_handles()
ApiLocalScope * previous() const
void Reset(Thread *thread)
Thread * scheduled_mutator_thread_
Thread * mutator_thread_
bool ShouldSetThreadSamplingInterval()
Definition sampler.h:98
bool ShouldUpdateThreadEnable()
Definition sampler.h:76
void SendInternalLibMessage(LibMsgId msg_id, uint64_t capability)
Definition isolate.cc:997
IsolateGroup * group() const
Definition isolate.h:990
Thread * mutator_thread() const
Definition isolate.cc:1884
int CountHandles() const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
bool IsValidHandle(Dart_Handle object) const
NoReloadScope(Thread *thread)
Definition thread.cc:1426
void DisableThreadInterrupts()
Definition os_thread.cc:143
void EnableThreadInterrupts()
Definition os_thread.cc:148
void set_gc_root_type(const char *gc_root_type)
Definition visitor.h:58
void VisitPointer(ObjectPtr *p)
Definition visitor.h:55
bool IsDartInstance() const
UntaggedObject * untag() const
intptr_t GetClassId() const
Definition raw_object.h:864
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition thread.cc:965
RestoreWriteBarrierInvariantVisitor(IsolateGroup *group, Thread *thread, Thread::RestoreWriteBarrierInvariantOp op)
Definition thread.cc:957
ThreadState * thread() const
Definition allocation.h:33
OSThread * os_thread() const
void set_execution_state(ExecutionState state)
Definition thread.h:1035
void set_vm_tag(uword tag)
Definition thread.h:809
uword safepoint_state()
Definition thread.h:1018
ApiLocalScope * api_top_scope() const
Definition thread.h:512
bool OwnsSafepoint() const
Definition thread.cc:1301
void AssertNonMutatorInvariants()
Definition thread.cc:268
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError()
Definition thread.cc:243
void SetStackLimit(uword value)
Definition thread.cc:685
static bool IsSafepointLevelRequested(uword state, SafepointLevel level)
Definition thread.h:942
friend class compiler::target::Thread
Definition thread.h:1464
void AssertNonDartMutatorInvariants()
Definition thread.cc:276
bool HasActiveState()
Definition thread.cc:349
void ExitSafepoint()
Definition thread.h:1081
uword top_exit_frame_info() const
Definition thread.h:678
bool IsDartMutatorThread() const
Definition thread.h:546
void EnterSafepoint()
Definition thread.h:1063
Isolate * isolate() const
Definition thread.h:533
IsolateGroup * isolate_group() const
Definition thread.h:540
ErrorPtr sticky_error() const
Definition thread.cc:230
DART_FORCE_INLINE void EnsureInRememberedSet(Thread *thread)
Definition raw_object.h:358
bool InVMIsolateHeap() const
Definition raw_object.cc:20
bool IsCanonical() const
Definition raw_object.h:329
uintptr_t SizeInBytes() const
Definition zone.cc:182
struct _Dart_Handle * Dart_Handle
Definition dart_api.h:258
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
double frame
Definition examples.cpp:31
static bool b
struct MyStruct a[10]
AtkStateType state
if(end==-1)
uint8_t value
#define DECLARE_FLAG(type, name)
Definition flags.h:14
const char * name
Definition fuchsia.cc:50
size_t length
@ kGCAndDeoptAndReload
Definition thread.h:295
static bool ShouldSuspend(bool isolate_shutdown, Thread *thread)
Definition thread.cc:407
MarkingStack::Block MarkingStackBlock
uintptr_t uword
Definition globals.h:501
static const double double_nan_constant
Definition thread.cc:141
const intptr_t kStoreBufferWrapperSize
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
ValidationPolicy
Definition thread.h:271
static bool IsInterruptLimit(uword limit)
Definition thread.cc:700
constexpr int kNumberOfDartAvailableCpuRegs
#define RUNTIME_ENTRY_LIST(V)
#define LEAF_RUNTIME_ENTRY_LIST(V)
Point offset
uint64_t a
Definition thread.cc:144
uint32_t c
Definition thread.cc:156
uint64_t b
Definition thread.cc:145
uint32_t d
Definition thread.cc:157
#define CHECK_REUSABLE_HANDLE(object)
#define INIT_VALUE(type_name, member_name, init_expr, default_init_value)
#define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value)
#define DEFAULT_INIT(type_name, member_name, init_expr, default_init_value)
#define CLEAR_REUSABLE_HANDLE(object)
#define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value)
#define REUSABLE_HANDLE_ALLOCATION(object)
#define CHECK_OBJECT(type_name, member_name, expr, default_init_value)
#define CACHED_FUNCTION_ENTRY_POINTS_LIST(V)
Definition thread.h:189
#define REUSABLE_HANDLE_LIST(V)
Definition thread.h:78
#define CACHED_CONSTANTS_LIST(V)
Definition thread.h:267
#define CACHED_NON_VM_STUB_LIST(V)
Definition thread.h:174
#define CACHED_VM_STUBS_LIST(V)
Definition thread.h:100
#define CACHED_VM_OBJECTS_LIST(V)
Definition thread.h:185
#define DO_IF_NOT_TSAN(CODE)
#define DO_IF_TSAN(CODE)
#define SUPPORT_TIMELINE
Definition globals.h:120
#define NOT_IN_PRODUCT(code)
Definition globals.h:84
#define ONLY_IN_PRECOMPILED(code)
Definition globals.h:101
#define ALIGN16
Definition globals.h:172
#define REUSABLE_HANDLE_SCOPE_INIT(object)
Definition isolate.cc:1675
#define REUSABLE_HANDLE_INITIALIZERS(object)
Definition isolate.cc:1678