Flutter Engine
The Flutter Engine
zone_test.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/zone.h"
7#include "platform/assert.h"
9#include "vm/dart.h"
10#include "vm/isolate.h"
11#include "vm/unit_test.h"
12
13namespace dart {
14
15VM_UNIT_TEST_CASE(AllocateZone) {
16#if defined(DEBUG)
17 FLAG_trace_zones = true;
18#endif
20 Thread* thread = Thread::Current();
21 EXPECT(thread->zone() == nullptr);
22 {
23 TransitionNativeToVM transition(thread);
24 StackZone stack_zone(thread);
25 EXPECT(thread->zone() != nullptr);
26 Zone* zone = stack_zone.GetZone();
27 uintptr_t allocated_size = 0;
28
29 // The loop is to make sure we overflow one segment and go on
30 // to the next segment.
31 for (int i = 0; i < 1000; i++) {
32 uword first = zone->AllocUnsafe(2 * kWordSize);
33 uword second = zone->AllocUnsafe(3 * kWordSize);
34 EXPECT(first != second);
35 allocated_size = ((2 + 3) * kWordSize);
36 }
37 EXPECT_LE(allocated_size, zone->SizeInBytes());
38
39 // Test for allocation of large segments.
40 const uword kLargeSize = 1 * MB;
41 const uword kSegmentSize = 64 * KB;
42 ASSERT(kLargeSize > kSegmentSize);
43 for (int i = 0; i < 10; i++) {
44 EXPECT(zone->AllocUnsafe(kLargeSize) != 0);
45 allocated_size += kLargeSize;
46 }
47 EXPECT_LE(allocated_size, zone->SizeInBytes());
48
49 // Test corner cases of kSegmentSize.
50 uint8_t* buffer = nullptr;
51 buffer =
52 reinterpret_cast<uint8_t*>(zone->AllocUnsafe(kSegmentSize - kWordSize));
53 EXPECT(buffer != nullptr);
54 buffer[(kSegmentSize - kWordSize) - 1] = 0;
55 allocated_size += (kSegmentSize - kWordSize);
56 EXPECT_LE(allocated_size, zone->SizeInBytes());
57
58 buffer = reinterpret_cast<uint8_t*>(
59 zone->AllocUnsafe(kSegmentSize - (2 * kWordSize)));
60 EXPECT(buffer != nullptr);
61 buffer[(kSegmentSize - (2 * kWordSize)) - 1] = 0;
62 allocated_size += (kSegmentSize - (2 * kWordSize));
63 EXPECT_LE(allocated_size, zone->SizeInBytes());
64
65 buffer =
66 reinterpret_cast<uint8_t*>(zone->AllocUnsafe(kSegmentSize + kWordSize));
67 EXPECT(buffer != nullptr);
68 buffer[(kSegmentSize + kWordSize) - 1] = 0;
69 allocated_size += (kSegmentSize + kWordSize);
70 EXPECT_LE(allocated_size, zone->SizeInBytes());
71 }
72 EXPECT(thread->zone() == nullptr);
74}
75
76VM_UNIT_TEST_CASE(AllocGeneric_Success) {
77#if defined(DEBUG)
78 FLAG_trace_zones = true;
79#endif
81 Thread* thread = Thread::Current();
82 EXPECT(thread->zone() == nullptr);
83 {
84 TransitionNativeToVM transition(thread);
85 StackZone zone(thread);
86 EXPECT(thread->zone() != nullptr);
87 uintptr_t allocated_size = 0;
88
89 const intptr_t kNumElements = 1000;
90 zone.GetZone()->Alloc<uint32_t>(kNumElements);
91 allocated_size += sizeof(uint32_t) * kNumElements;
92 EXPECT_LE(allocated_size, zone.SizeInBytes());
93 }
94 EXPECT(thread->zone() == nullptr);
96}
97
98// This test is expected to crash.
99VM_UNIT_TEST_CASE_WITH_EXPECTATION(AllocGeneric_Overflow, "Crash") {
100#if defined(DEBUG)
101 FLAG_trace_zones = true;
102#endif
104 Thread* thread = Thread::Current();
105 EXPECT(thread->zone() == nullptr);
106 {
107 StackZone zone(thread);
108 EXPECT(thread->zone() != nullptr);
109
110 const intptr_t kNumElements = (kIntptrMax / sizeof(uint32_t)) + 1;
111 zone.GetZone()->Alloc<uint32_t>(kNumElements);
112 }
114}
115
116VM_UNIT_TEST_CASE(ZoneRealloc) {
118 Thread* thread = Thread::Current();
119 {
120 TransitionNativeToVM transition(thread);
121 StackZone stack_zone(thread);
122 auto zone = thread->zone();
123
124 const intptr_t kOldLen = 32;
125 const intptr_t kNewLen = 16;
126 const intptr_t kNewLen2 = 16;
127
128 auto data_old = zone->Alloc<uint8_t>(kOldLen);
129 auto data_new = zone->Realloc<uint8_t>(data_old, kOldLen, kNewLen);
130 RELEASE_ASSERT(data_old == data_new);
131
132 auto data_new2 = zone->Realloc<uint8_t>(data_old, kNewLen, kNewLen2);
133 RELEASE_ASSERT(data_old == data_new2);
134 }
136}
137
139#if defined(DEBUG)
140 FLAG_trace_zones = true;
141#endif
143 Thread* thread = Thread::Current();
144 EXPECT(thread->zone() == nullptr);
145 static int marker;
146
147 class SimpleZoneObject : public ZoneAllocated {
148 public:
149 SimpleZoneObject() : slot(marker++) {}
150 virtual ~SimpleZoneObject() {}
151 virtual int GetSlot() { return slot; }
152 int slot;
153 };
154
155 // Reset the marker.
156 marker = 0;
157
158 // Create a few zone allocated objects.
159 {
160 TransitionNativeToVM transition(thread);
161 StackZone zone(thread);
162 EXPECT_EQ(0UL, zone.SizeInBytes());
163 SimpleZoneObject* first = new SimpleZoneObject();
164 EXPECT(first != nullptr);
165 SimpleZoneObject* second = new SimpleZoneObject();
166 EXPECT(second != nullptr);
167 EXPECT(first != second);
168 uintptr_t expected_size = (2 * sizeof(SimpleZoneObject));
169 EXPECT_LE(expected_size, zone.SizeInBytes());
170
171 // Make sure the constructors were invoked.
172 EXPECT_EQ(0, first->slot);
173 EXPECT_EQ(1, second->slot);
174
175 // Make sure we can write to the members of the zone objects.
176 first->slot = 42;
177 second->slot = 87;
178 EXPECT_EQ(42, first->slot);
179 EXPECT_EQ(87, second->slot);
180 }
181 EXPECT(thread->zone() == nullptr);
183}
184
188 const char* result = zone.GetZone()->PrintToString("Hello %s!", "World");
189 EXPECT_STREQ("Hello World!", result);
190}
191
192#if !defined(PRODUCT) && !defined(USING_ADDRESS_SANITIZER) && \
193 !defined(USING_MEMORY_SANITIZER)
194// RSS hooks absent in PRODUCT mode. Scudo quarantine interferes RSS
195// measurements under the sanitizers. Slack to allow for limited pooling
196// in the malloc implementation.
197static constexpr int64_t kRssSlack = 20 * MB;
198#define CHECK_RSS
199#endif
200
201// clang-format off
202static const size_t kSizes[] = {
203 64 * KB,
204 64 * KB + 2 * kWordSize,
205 64 * KB - 2 * kWordSize,
206 128 * KB,
207 128 * KB + 2 * kWordSize,
208 128 * KB - 2 * kWordSize,
209 256 * KB,
210 256 * KB + 2 * kWordSize,
211 256 * KB - 2 * kWordSize,
212 512 * KB,
213 512 * KB + 2 * kWordSize,
214 512 * KB - 2 * kWordSize,
215};
216// clang-format on
217
218TEST_CASE(StressMallocDirectly) {
219#if defined(CHECK_RSS)
220 int64_t start_rss = Service::CurrentRSS();
221#endif
222
223 void* allocations[ARRAY_SIZE(kSizes)];
224 for (size_t i = 0; i < ((3u * GB) / (512u * KB)); i++) {
225 for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
226 allocations[j] = malloc(kSizes[j]);
227 }
228 for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
229 free(allocations[j]);
230 }
231 }
232
233#if defined(CHECK_RSS)
234 int64_t stop_rss = Service::CurrentRSS();
235 EXPECT_LT(stop_rss, start_rss + kRssSlack);
236#endif
237}
238
239ISOLATE_UNIT_TEST_CASE(StressMallocThroughZones) {
240#if defined(CHECK_RSS)
241 int64_t start_rss = Service::CurrentRSS();
242#endif
243
244 for (size_t i = 0; i < ((3u * GB) / (512u * KB)); i++) {
245 StackZone stack_zone(Thread::Current());
246 Zone* zone = stack_zone.GetZone();
247 for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
248 zone->Alloc<uint8_t>(kSizes[j]);
249 }
250 }
251
252#if defined(CHECK_RSS)
253 int64_t stop_rss = Service::CurrentRSS();
254 EXPECT_LT(stop_rss, start_rss + kRssSlack);
255#endif
256}
257
258#if defined(DART_COMPRESSED_POINTERS)
259ISOLATE_UNIT_TEST_CASE(ZonesNotLimitedByCompressedHeap) {
260 StackZone stack_zone(Thread::Current());
261 Zone* zone = stack_zone.GetZone();
262
263 size_t total = 0;
264 while (total <= (4u * GB)) {
265 size_t chunk_size = 512u * MB;
266 zone->AllocUnsafe(chunk_size);
267 total += chunk_size;
268 }
269}
270#endif // defined(DART_COMPRESSED_POINTERS)
271
272ISOLATE_UNIT_TEST_CASE(ZoneVerificationScaling) {
273 // This ought to complete in O(n), not O(n^2).
274 const intptr_t n = 1000000;
275
276 StackZone stack_zone(thread);
277 Zone* zone = stack_zone.GetZone();
278
279 {
280 HANDLESCOPE(thread);
281 for (intptr_t i = 0; i < n; i++) {
282 const Object& a = Object::Handle(zone);
283 DEBUG_ASSERT(!a.IsNotTemporaryScopedHandle());
284 USE(a);
285 const Object& b = Object::ZoneHandle(zone);
286 DEBUG_ASSERT(b.IsNotTemporaryScopedHandle());
287 USE(b);
288 }
289 // Leaves lots of HandleBlocks for recycling.
290 }
291
292 for (intptr_t i = 0; i < n; i++) {
293 HANDLESCOPE(thread);
294 const Object& a = Object::Handle(zone);
295 DEBUG_ASSERT(!a.IsNotTemporaryScopedHandle());
296 USE(a);
297 const Object& b = Object::ZoneHandle(zone);
298 DEBUG_ASSERT(b.IsNotTemporaryScopedHandle());
299 USE(b);
300 // Should not visit those recyclable blocks over and over again.
301 }
302}
303
304} // namespace dart
static const char marker[]
#define EXPECT(type, expectedAlignment, expectedSize)
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
static int64_t CurrentRSS()
Definition: service.cc:1451
Zone * GetZone()
Definition: zone.h:213
uintptr_t SizeInBytes() const
Definition: zone.h:208
static Dart_Isolate CreateTestIsolate(const char *name=nullptr, void *isolate_group_data=nullptr, void *isolate_data=nullptr)
Definition: unit_test.cc:140
Zone * zone() const
Definition: thread_state.h:37
static Thread * Current()
Definition: thread.h:362
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition: zone.cc:313
void * AllocUnsafe(intptr_t size)
uintptr_t SizeInBytes() const
Definition: zone.cc:182
ElementType * Alloc(intptr_t length)
#define ASSERT(E)
static bool b
struct MyStruct a[10]
GAsyncResult * result
#define HANDLESCOPE(thread)
Definition: handles.h:321
static intptr_t chunk_size(intptr_t bytes_left)
Definition: dart_vm.cc:33
constexpr intptr_t MB
Definition: globals.h:530
void * malloc(size_t size)
Definition: allocation.cc:19
VM_UNIT_TEST_CASE_WITH_EXPECTATION(BitFields_Assert, DEBUG_CRASH)
static CStringUniquePtr PrintToString(const char *format,...)
Definition: uri.cc:41
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
ISOLATE_UNIT_TEST_CASE(StackAllocatedDestruction)
static void USE(T &&)
Definition: globals.h:618
constexpr intptr_t GB
Definition: globals.h:532
TEST_CASE(DirectoryCurrent)
static constexpr int64_t kRssSlack
Definition: zone_test.cc:197
constexpr intptr_t kWordSize
Definition: globals.h:509
static const size_t kSizes[]
Definition: zone_test.cc:202
DART_EXPORT void Dart_ShutdownIsolate()
constexpr intptr_t kIntptrMax
Definition: globals.h:557
VM_UNIT_TEST_CASE(DirectoryCurrentNoScope)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
#define ARRAY_SIZE(array)
Definition: globals.h:72