Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
field_table.cc
Go to the documentation of this file.
1// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/field_table.h"
6
7#include "platform/atomic.h"
8#include "vm/flags.h"
9#include "vm/growable_array.h"
10#include "vm/heap/heap.h"
11#include "vm/object.h"
12#include "vm/object_graph.h"
13#include "vm/object_store.h"
14#include "vm/raw_object.h"
15#include "vm/visitor.h"
16
17namespace dart {
18
21 delete old_tables_; // Allocated in FieldTable::FieldTable()
22 free(table_); // Allocated in FieldTable::Grow()
23}
24
27 IsolateGroup::Current()->IsReloading() ||
28 IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
29 return is_ready_to_use_;
30}
31
33 // The isolate will mark it's field table ready-to-use upon initialization of
34 // the isolate. Only after it was marked as ready-to-use will it participate
35 // in new static field registrations.
36 //
37 // By requiring a read lock here we ensure no other thread is is registering a
38 // new static field at this moment (it would need exclusive writer lock).
40 IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
41 ASSERT(!is_ready_to_use_);
42 is_ready_to_use_ = true;
43}
44
46 while (old_tables_->length() > 0) {
47 free(old_tables_->RemoveLast());
48 }
49}
50
51intptr_t FieldTable::FieldOffsetFor(intptr_t field_id) {
52 return field_id * sizeof(ObjectPtr); // NOLINT
53}
54
55bool FieldTable::Register(const Field& field, intptr_t expected_field_id) {
57 IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
58 ASSERT(is_ready_to_use_);
59
60 if (free_head_ < 0) {
61 bool grown_backing_store = false;
62 if (top_ == capacity_) {
63 const intptr_t new_capacity = capacity_ + kCapacityIncrement;
64 Grow(new_capacity);
65 grown_backing_store = true;
66 }
67
68 ASSERT(top_ < capacity_);
69 ASSERT(expected_field_id == -1 || expected_field_id == top_);
70 field.set_field_id(top_);
71 table_[top_] = Object::sentinel().ptr();
72
73 ++top_;
74 return grown_backing_store;
75 }
76
77 // Reuse existing free element. This is "slow path" that should only be
78 // triggered after hot reload.
79 intptr_t reused_free = free_head_;
80 free_head_ = Smi::Value(Smi::RawCast(table_[free_head_]));
81 field.set_field_id(reused_free);
82 table_[reused_free] = Object::sentinel().ptr();
83 return false;
84}
85
86void FieldTable::Free(intptr_t field_id) {
87 table_[field_id] = Smi::New(free_head_);
88 free_head_ = field_id;
89}
90
91void FieldTable::AllocateIndex(intptr_t index) {
92 if (index >= capacity_) {
93 const intptr_t new_capacity = index + kCapacityIncrement;
94 Grow(new_capacity);
95 }
96
97 ASSERT(table_[index] == ObjectPtr());
98 if (index >= top_) {
99 top_ = index + 1;
100 }
101}
102
103void FieldTable::Grow(intptr_t new_capacity) {
104 ASSERT(new_capacity > capacity_);
105
106 auto old_table = table_;
107 auto new_table = static_cast<ObjectPtr*>(
108 malloc(new_capacity * sizeof(ObjectPtr))); // NOLINT
109 intptr_t i;
110 for (i = 0; i < top_; i++) {
111 new_table[i] = old_table[i];
112 }
113 for (; i < new_capacity; i++) {
114 new_table[i] = ObjectPtr();
115 }
116 capacity_ = new_capacity;
117 old_tables_->Add(old_table);
118 // Ensure that new_table_ is populated before it is published
119 // via store to table_.
120 reinterpret_cast<AcqRelAtomic<ObjectPtr*>*>(&table_)->store(new_table);
121 if (isolate_ != nullptr && isolate_->mutator_thread() != nullptr) {
122 isolate_->mutator_thread()->field_table_values_ = table_;
123 }
124}
125
128 IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
129
130 FieldTable* clone = new FieldTable(for_isolate);
131 auto new_table =
132 static_cast<ObjectPtr*>(malloc(capacity_ * sizeof(ObjectPtr))); // NOLINT
133 memmove(new_table, table_, capacity_ * sizeof(ObjectPtr));
134 ASSERT(clone->table_ == nullptr);
135 clone->table_ = new_table;
136 clone->capacity_ = capacity_;
137 clone->top_ = top_;
138 clone->free_head_ = free_head_;
139 return clone;
140}
141
143 // GC might try to visit field table before it's isolate done setting it up.
144 if (table_ == nullptr) {
145 return;
146 }
147
148 ASSERT(visitor != nullptr);
149 visitor->set_gc_root_type("static fields table");
150 visitor->VisitPointers(&table_[0], &table_[top_ - 1]);
151 visitor->clear_gc_root_type();
152}
153
154} // namespace dart
SI void store(P *ptr, const T &val)
#define DEBUG_ASSERT(cond)
Definition assert.h:321
bool Register(const Field &field, intptr_t expected_field_id=-1)
void Free(intptr_t index)
void AllocateIndex(intptr_t index)
bool IsReadyToUse() const
FieldTable * Clone(Isolate *for_isolate)
static constexpr int kCapacityIncrement
Definition field_table.h:95
static intptr_t FieldOffsetFor(intptr_t field_id)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void set_field_id(intptr_t field_id) const
Definition object.h:13262
static IsolateGroup * Current()
Definition isolate.h:534
Thread * mutator_thread() const
Definition isolate.cc:1884
void set_gc_root_type(const char *gc_root_type)
Definition visitor.h:58
virtual void VisitPointers(ObjectPtr *first, ObjectPtr *last)=0
static ObjectPtr RawCast(ObjectPtr obj)
Definition object.h:325
static SmiPtr New(intptr_t value)
Definition object.h:9985
intptr_t Value() const
Definition object.h:9969
#define ASSERT(E)
void * malloc(size_t size)
Definition allocation.cc:19