Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
pages.h
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_HEAP_PAGES_H_
6#define RUNTIME_VM_HEAP_PAGES_H_
7
8#include "platform/atomic.h"
9#include "vm/globals.h"
10#include "vm/heap/freelist.h"
11#include "vm/heap/page.h"
12#include "vm/heap/spaces.h"
13#include "vm/lockers.h"
14#include "vm/ring_buffer.h"
15#include "vm/thread.h"
16#include "vm/virtual_memory.h"
17
18namespace dart {
19
20DECLARE_FLAG(bool, write_protect_code);
21
22// Forward declarations.
23class Heap;
24class JSONObject;
26class ObjectSet;
27class ForwardingPage;
28class GCMarker;
29
30// The history holds the timing information of the last garbage collection
31// runs.
33 public:
36
37 void AddGarbageCollectionTime(int64_t start, int64_t end);
38
40
41 bool IsEmpty() const { return history_.Size() == 0; }
42
43 private:
44 struct Entry {
45 int64_t start;
46 int64_t end;
47 };
48 static constexpr intptr_t kHistoryLength = 4;
49 RingBuffer<Entry, kHistoryLength> history_;
50
51 DISALLOW_ALLOCATION();
53};
54
55// PageSpaceController controls the heap size.
57 public:
58 // The heap is passed in for recording stats only. The controller does not
59 // invoke GC by itself.
61 int heap_growth_ratio,
62 int heap_growth_max,
63 int garbage_collection_time_ratio);
65
66 // Returns whether growing to 'after' should trigger a GC.
67 // This method can be called before allocation (e.g., pretenuring) or after
68 // (e.g., promotion), as it does not change the state of the controller.
69 bool ReachedHardThreshold(SpaceUsage after) const;
70 bool ReachedSoftThreshold(SpaceUsage after) const;
71
72 // Returns whether an idle GC is worthwhile.
73 bool ReachedIdleThreshold(SpaceUsage current) const;
74
75 // Should be called after each collection to update the controller state.
77 SpaceUsage after,
78 int64_t start,
79 int64_t end);
81
82 void set_last_usage(SpaceUsage current) { last_usage_ = current; }
83
84 private:
85 friend class PageSpace; // For MergeOtherPageSpaceController
86
87 void RecordUpdate(SpaceUsage before, SpaceUsage after, const char* reason);
88 void RecordUpdate(SpaceUsage before,
89 SpaceUsage after,
90 intptr_t growth_in_pages,
91 const char* reason);
92
93 Heap* heap_;
94
95 // Usage after last evaluated GC or last enabled.
96 SpaceUsage last_usage_;
97
98 // If the garbage collector was not able to free more than heap_growth_ratio_
99 // memory, then the heap is grown. Otherwise garbage collection is performed.
100 const int heap_growth_ratio_;
101
102 // The desired percent of heap in-use after a garbage collection.
103 // Equivalent to \frac{100-heap_growth_ratio_}{100}.
104 const double desired_utilization_;
105
106 // Max number of pages we grow.
107 const int heap_growth_max_;
108
109 // If the relative GC time goes above garbage_collection_time_ratio_ %,
110 // we grow the heap more aggressively.
111 const int garbage_collection_time_ratio_;
112
113 // Perform a stop-the-world GC when usage exceeds this amount.
114 intptr_t hard_gc_threshold_in_words_;
115
116 // Begin concurrent marking when usage exceeds this amount.
117 intptr_t soft_gc_threshold_in_words_;
118
119 // Run idle GC if time permits when usage exceeds this amount.
120 intptr_t idle_gc_threshold_in_words_;
121
123
125};
126
128 public:
137
138 PageSpace(Heap* heap, intptr_t max_capacity_in_words);
139 ~PageSpace();
140
141 uword TryAllocate(intptr_t size,
142 bool is_executable = false,
143 GrowthPolicy growth_policy = kControlGrowth) {
144 bool is_protected = (is_executable) && FLAG_write_protect_code;
145 bool is_locked = false;
146 return TryAllocateInternal(
147 size, &freelists_[is_executable ? kExecutableFreelist : kDataFreelist],
148 is_executable, growth_policy, is_protected, is_locked);
149 }
150 DART_FORCE_INLINE
151 uword TryAllocatePromoLocked(FreeList* freelist, intptr_t size) {
154 if (freelist->TryAllocateBumpLocked(size, &result)) {
155 return result;
156 }
157 }
158 return TryAllocatePromoLockedSlow(freelist, size);
159 }
160 DART_FORCE_INLINE
161 uword AllocateSnapshotLocked(FreeList* freelist, intptr_t size) {
164 if (freelist->TryAllocateBumpLocked(size, &result)) {
165 return result;
166 }
167 }
168 return AllocateSnapshotLockedSlow(freelist, size);
169 }
170
172 bool MarkReservation();
173 void TryReserveForOOM();
174 void VisitRoots(ObjectPointerVisitor* visitor);
175
176 bool ReachedHardThreshold() const {
177 return page_space_controller_.ReachedHardThreshold(usage_);
178 }
179 bool ReachedSoftThreshold() const {
180 return page_space_controller_.ReachedSoftThreshold(usage_);
181 }
182 bool ReachedIdleThreshold() const {
183 return page_space_controller_.ReachedIdleThreshold(usage_);
184 }
186 page_space_controller_.EvaluateAfterLoading(usage_);
187 }
188
189 intptr_t UsedInWords() const { return usage_.used_in_words; }
190 intptr_t CapacityInWords() const {
191 MutexLocker ml(&pages_lock_);
192 return usage_.capacity_in_words;
193 }
194 void IncreaseCapacityInWords(intptr_t increase_in_words) {
195 MutexLocker ml(&pages_lock_);
196 IncreaseCapacityInWordsLocked(increase_in_words);
197 }
198 void IncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
200 usage_.capacity_in_words += increase_in_words;
202 }
203
205 void UpdateMaxUsed();
206
207 intptr_t ExternalInWords() const { return usage_.external_in_words; }
209 MutexLocker ml(&pages_lock_);
210 return usage_;
211 }
212 intptr_t ImageInWords() const {
213 intptr_t size = 0;
214 MutexLocker ml(&pages_lock_);
215 for (Page* page = image_pages_; page != nullptr; page = page->next()) {
216 size += page->memory_->size();
217 }
218 return size >> kWordSizeLog2;
219 }
220
221 bool Contains(uword addr) const;
222 bool ContainsUnsafe(uword addr) const;
223 bool CodeContains(uword addr) const;
224 bool DataContains(uword addr) const;
225 bool IsValidAddress(uword addr) const { return Contains(addr); }
226
227 void VisitObjects(ObjectVisitor* visitor) const;
228 void VisitObjectsNoImagePages(ObjectVisitor* visitor) const;
229 void VisitObjectsImagePages(ObjectVisitor* visitor) const;
230 void VisitObjectsUnsafe(ObjectVisitor* visitor) const;
231 void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
232
233 void VisitRememberedCards(ObjectPointerVisitor* visitor) const;
234 void ResetProgressBars() const;
235
236 // Collect the garbage in the page space using mark-sweep or mark-compact.
237 void CollectGarbage(Thread* thread, bool compact, bool finalize);
238
239 void AddRegionsToObjectSet(ObjectSet* set) const;
240
241 // Note: Code pages are made executable/non-executable when 'read_only' is
242 // true/false, respectively.
243 void WriteProtect(bool read_only);
244 void WriteProtectCode(bool read_only);
245
246 bool ShouldStartIdleMarkSweep(int64_t deadline);
247 bool ShouldPerformIdleMarkCompact(int64_t deadline);
248 void IncrementalMarkWithSizeBudget(intptr_t size);
249 void IncrementalMarkWithTimeBudget(int64_t deadline);
250 void AssistTasks(MonitorLocker* ml);
251
252 void AddGCTime(int64_t micros) { gc_time_micros_ += micros; }
253
254 int64_t gc_time_micros() const { return gc_time_micros_; }
255
256 void IncrementCollections() { collections_++; }
257
258 intptr_t collections() const { return collections_; }
259
260#ifndef PRODUCT
261 void PrintToJSONObject(JSONObject* object) const;
262 void PrintHeapMapToJSONStream(IsolateGroup* isolate_group,
263 JSONStream* stream) const;
264#endif // PRODUCT
265
266 void AllocateBlack(intptr_t size) {
267 allocated_black_in_words_.fetch_add(size >> kWordSizeLog2);
268 }
269
270 // Tracks an external allocation by incrementing the old space's total
271 // external size tracker. Returns false without incrementing the tracker if
272 // this allocation will make it exceed kMaxAddrSpaceInWords.
273 bool AllocatedExternal(intptr_t size) {
274 ASSERT(size >= 0);
275 intptr_t size_in_words = size >> kWordSizeLog2;
276 intptr_t expected = usage_.external_in_words.load();
277 intptr_t desired;
278 do {
279 desired = expected + size_in_words;
280 if (desired < 0 || desired > kMaxAddrSpaceInWords) {
281 return false;
282 }
283 ASSERT(desired >= 0);
284 } while (
285 !usage_.external_in_words.compare_exchange_weak(expected, desired));
286 return true;
287 }
288 void FreedExternal(intptr_t size) {
289 ASSERT(size >= 0);
290 intptr_t size_in_words = size >> kWordSizeLog2;
291 usage_.external_in_words -= size_in_words;
292 ASSERT(usage_.external_in_words >= 0);
293 }
294
295 // Bulk data allocation.
296 FreeList* DataFreeList(intptr_t i = 0) {
297 return &freelists_[kDataFreelist + i];
298 }
299 void AcquireLock(FreeList* freelist);
300 void ReleaseLock(FreeList* freelist);
301
306 pause_concurrent_marking_.fetch_or(0);
307 }
308
309 Monitor* tasks_lock() const { return &tasks_lock_; }
310 intptr_t tasks() const { return tasks_; }
311 void set_tasks(intptr_t val) {
312 ASSERT(val >= 0);
313 tasks_ = val;
314 }
315 intptr_t concurrent_marker_tasks() const {
317 return concurrent_marker_tasks_;
318 }
319 void set_concurrent_marker_tasks(intptr_t val) {
320 ASSERT(val >= 0);
322 concurrent_marker_tasks_ = val;
323 }
326 return concurrent_marker_tasks_active_;
327 }
329 ASSERT(val >= 0);
331 concurrent_marker_tasks_active_ = val;
332 }
334 return pause_concurrent_marking_.load() != 0;
335 }
336 Phase phase() const { return phase_; }
337 void set_phase(Phase val) { phase_ = val; }
338
339 void SetupImagePage(void* pointer, uword size, bool is_executable);
340
341 // Return any bump allocation block to the freelist.
343 // Have threads release marking stack blocks, etc.
345
346 bool enable_concurrent_mark() const { return enable_concurrent_mark_; }
348 enable_concurrent_mark_ = enable_concurrent_mark;
349 }
350
352
353 GCMarker* marker() const { return marker_; }
354
355 private:
356 // Ids for time and data records in Heap::GCStats.
357 enum {
358 // Time
359 kConcurrentSweep = 0,
360 kSafePoint = 1,
361 kMarkObjects = 2,
362 kResetFreeLists = 3,
363 kSweepPages = 4,
364 kSweepLargePages = 5,
365 };
366
367 uword TryAllocateDataLocked(FreeList* freelist,
368 intptr_t size,
369 GrowthPolicy growth_policy) {
370 bool is_executable = false;
371 bool is_protected = false;
372 bool is_locked = true;
373 return TryAllocateInternal(size, freelist, is_executable, growth_policy,
374 is_protected, is_locked);
375 }
376 uword TryAllocateInternal(intptr_t size,
377 FreeList* freelist,
378 bool is_executable,
379 GrowthPolicy growth_policy,
380 bool is_protected,
381 bool is_locked);
382 uword TryAllocateInFreshPage(intptr_t size,
383 FreeList* freelist,
384 bool is_executable,
385 GrowthPolicy growth_policy,
386 bool is_locked);
387 uword TryAllocateInFreshLargePage(intptr_t size,
388 bool is_executable,
389 GrowthPolicy growth_policy);
390
391 // Attempt to allocate from bump block rather than normal freelist.
392 uword TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size);
393 uword TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size);
394 uword AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t size);
395
396 // Makes bump block walkable; do not call concurrently with mutator.
397 void MakeIterable() const;
398
399 void AddPageLocked(Page* page);
400 void AddLargePageLocked(Page* page);
401 void AddExecPageLocked(Page* page);
402 void RemovePageLocked(Page* page, Page* previous_page);
403 void RemoveLargePageLocked(Page* page, Page* previous_page);
404 void RemoveExecPageLocked(Page* page, Page* previous_page);
405
406 Page* AllocatePage(bool is_executable, bool link = true);
407 Page* AllocateLargePage(intptr_t size, bool is_executable);
408
409 void TruncateLargePage(Page* page, intptr_t new_object_size_in_bytes);
410 void FreePage(Page* page, Page* previous_page);
411 void FreeLargePage(Page* page, Page* previous_page);
412 void FreePages(Page* pages);
413
414 void CollectGarbageHelper(Thread* thread, bool compact, bool finalize);
415 void SweepNew();
416 void SweepLarge();
417 void Sweep(bool exclusive);
418 void ConcurrentSweep(IsolateGroup* isolate_group);
419 void Compact(Thread* thread);
420
421 static intptr_t LargePageSizeInWordsFor(intptr_t size);
422
423 bool CanIncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
424 if (max_capacity_in_words_ == 0) {
425 // Unlimited.
426 return true;
427 }
428 intptr_t free_capacity_in_words =
429 (max_capacity_in_words_ - usage_.capacity_in_words);
430 return ((free_capacity_in_words > 0) &&
431 (increase_in_words <= free_capacity_in_words));
432 }
433
434 Heap* const heap_;
435
436 // One list for executable pages at freelists_[kExecutableFreelist].
437 // FLAG_scavenger_tasks count of lists for data pages starting at
438 // freelists_[kDataFreelist]. The sweeper inserts into the data page
439 // freelists round-robin. The scavenger workers each use one of the data
440 // page freelists without locking.
441 const intptr_t num_freelists_;
442 enum {
443 kExecutableFreelist = 0,
444 kDataFreelist = 1,
445 };
446 FreeList* freelists_;
447 static constexpr intptr_t kOOMReservationSize = 32 * KB;
448 FreeListElement* oom_reservation_ = nullptr;
449
450 // Use ExclusivePageIterator for safe access to these.
451 mutable Mutex pages_lock_;
452 Page* pages_ = nullptr;
453 Page* pages_tail_ = nullptr;
454 Page* exec_pages_ = nullptr;
455 Page* exec_pages_tail_ = nullptr;
456 Page* large_pages_ = nullptr;
457 Page* large_pages_tail_ = nullptr;
458 Page* image_pages_ = nullptr;
459 Page* sweep_regular_ = nullptr;
460 Page* sweep_large_ = nullptr;
461
462 // Various sizes being tracked for this generation.
463 intptr_t max_capacity_in_words_;
464
465 // NOTE: The capacity component of usage_ is updated by the concurrent
466 // sweeper. Use (Increase)CapacityInWords(Locked) for thread-safe access.
467 SpaceUsage usage_;
468 RelaxedAtomic<intptr_t> allocated_black_in_words_;
469
470 // Keep track of running MarkSweep tasks.
471 mutable Monitor tasks_lock_;
472 intptr_t tasks_;
473 intptr_t concurrent_marker_tasks_;
474 intptr_t concurrent_marker_tasks_active_;
475 AcqRelAtomic<uword> pause_concurrent_marking_;
476 Phase phase_;
477
478#if defined(DEBUG)
479 Thread* iterating_thread_;
480#endif
481 PageSpaceController page_space_controller_;
482 GCMarker* marker_;
483
484 int64_t gc_time_micros_;
485 intptr_t collections_;
486 intptr_t mark_words_per_micro_;
487
488 bool enable_concurrent_mark_;
489
490 friend class BasePageIterator;
494 friend class HeapIterationScope;
495 friend class HeapSnapshotWriter;
498 friend class GCCompactor;
499 friend class CompactorTask;
500 friend class Code;
501
503};
504
505} // namespace dart
506
507#endif // RUNTIME_VM_HEAP_PAGES_H_
#define DEBUG_ASSERT(cond)
Definition assert.h:321
T fetch_or(T arg, std::memory_order order=std::memory_order_acq_rel)
Definition atomic.h:114
T load(std::memory_order order=std::memory_order_acquire) const
Definition atomic.h:101
DART_FORCE_INLINE bool TryAllocateBumpLocked(intptr_t size, uword *result)
Definition freelist.h:122
bool IsOwnedByCurrentThread() const
Definition os_thread.h:370
bool IsOwnedByCurrentThread() const
Definition os_thread.h:401
bool ReachedSoftThreshold(SpaceUsage after) const
Definition pages.cc:1395
bool ReachedHardThreshold(SpaceUsage after) const
Definition pages.cc:1385
bool ReachedIdleThreshold(SpaceUsage current) const
Definition pages.cc:1405
void set_last_usage(SpaceUsage current)
Definition pages.h:82
void EvaluateAfterLoading(SpaceUsage after)
Definition pages.cc:1517
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
Definition pages.cc:1412
void AddGarbageCollectionTime(int64_t start, int64_t end)
Definition pages.cc:1587
void set_concurrent_marker_tasks_active(intptr_t val)
Definition pages.h:328
void WriteProtectCode(bool read_only)
Definition pages.cc:805
FreeList * DataFreeList(intptr_t i=0)
Definition pages.h:296
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
Definition pages.cc:765
intptr_t UsedInWords() const
Definition pages.h:189
bool ReachedSoftThreshold() const
Definition pages.h:179
void AllocateBlack(intptr_t size)
Definition pages.h:266
intptr_t concurrent_marker_tasks_active() const
Definition pages.h:324
bool ShouldStartIdleMarkSweep(int64_t deadline)
Definition pages.cc:826
intptr_t tasks() const
Definition pages.h:310
void IncrementalMarkWithSizeBudget(intptr_t size)
Definition pages.cc:892
void AddGCTime(int64_t micros)
Definition pages.h:252
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
Definition pages.h:151
bool Contains(uword addr) const
Definition pages.cc:601
int64_t gc_time_micros() const
Definition pages.h:254
void WriteProtect(bool read_only)
Definition pages.cc:715
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
Definition pages.h:141
void TryReleaseReservation()
Definition pages.cc:918
void AcquireLock(FreeList *freelist)
Definition pages.cc:426
bool ReachedIdleThreshold() const
Definition pages.h:182
void VisitRememberedCards(ObjectPointerVisitor *visitor) const
Definition pages.cc:679
void IncrementCollections()
Definition pages.h:256
bool ShouldPerformIdleMarkCompact(int64_t deadline)
Definition pages.cc:853
void PushDependencyToConcurrentMarking()
Definition pages.h:305
bool pause_concurrent_marking() const
Definition pages.h:333
friend class ExclusiveLargePageIterator
Definition pages.h:493
void set_tasks(intptr_t val)
Definition pages.h:311
bool enable_concurrent_mark() const
Definition pages.h:346
void set_concurrent_marker_tasks(intptr_t val)
Definition pages.h:319
@ kAwaitingFinalization
Definition pages.h:133
void IncrementalMarkWithTimeBudget(int64_t deadline)
Definition pages.cc:898
intptr_t collections() const
Definition pages.h:258
void VisitObjects(ObjectVisitor *visitor) const
Definition pages.cc:645
void CollectGarbage(Thread *thread, bool compact, bool finalize)
Definition pages.cc:961
void ReleaseLock(FreeList *freelist)
Definition pages.cc:430
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
Definition pages.cc:651
void UpdateMaxCapacityLocked()
Definition pages.cc:586
SpaceUsage GetCurrentUsage() const
Definition pages.h:208
bool DataContains(uword addr) const
Definition pages.cc:628
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
Definition pages.h:198
bool ReachedHardThreshold() const
Definition pages.h:176
bool CodeContains(uword addr) const
Definition pages.cc:619
void EvaluateAfterLoading()
Definition pages.h:185
bool MarkReservation()
Definition pages.cc:928
friend class HeapSnapshotWriter
Definition pages.h:495
DART_FORCE_INLINE uword AllocateSnapshotLocked(FreeList *freelist, intptr_t size)
Definition pages.h:161
void ReleaseBumpAllocation()
Definition pages.cc:574
bool ContainsUnsafe(uword addr) const
Definition pages.cc:610
intptr_t ImageInWords() const
Definition pages.h:212
void SetupImagePage(void *pointer, uword size, bool is_executable)
Definition pages.cc:1323
void UpdateMaxUsed()
Definition pages.cc:594
intptr_t CapacityInWords() const
Definition pages.h:190
void ResumeConcurrentMarking()
Definition pages.cc:446
intptr_t concurrent_marker_tasks() const
Definition pages.h:315
void YieldConcurrentMarking()
Definition pages.cc:453
void AddRegionsToObjectSet(ObjectSet *set) const
Definition pages.cc:637
void AbandonMarkingForShutdown()
Definition pages.cc:581
void FreedExternal(intptr_t size)
Definition pages.h:288
void PrintToJSONObject(JSONObject *object) const
Definition pages.cc:728
void VisitObjectsImagePages(ObjectVisitor *visitor) const
Definition pages.cc:659
void set_phase(Phase val)
Definition pages.h:337
void TryReserveForOOM()
Definition pages.cc:939
bool AllocatedExternal(intptr_t size)
Definition pages.h:273
void IncreaseCapacityInWords(intptr_t increase_in_words)
Definition pages.h:194
void PauseConcurrentMarking()
Definition pages.cc:437
bool IsValidAddress(uword addr) const
Definition pages.h:225
bool IsObjectFromImagePages(ObjectPtr object)
Definition pages.cc:1357
void ResetProgressBars() const
Definition pages.cc:709
void AssistTasks(MonitorLocker *ml)
Definition pages.cc:904
void VisitRoots(ObjectPointerVisitor *visitor)
Definition pages.cc:949
Monitor * tasks_lock() const
Definition pages.h:309
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition pages.cc:673
intptr_t ExternalInWords() const
Definition pages.h:207
GCMarker * marker() const
Definition pages.h:353
void set_enable_concurrent_mark(bool enable_concurrent_mark)
Definition pages.h:347
friend class PageSpaceController
Definition pages.h:496
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition pages.cc:667
Phase phase() const
Definition pages.h:336
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:35
bool compare_exchange_weak(T &expected, T desired, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:52
int64_t Size() const
Definition ring_buffer.h:28
RelaxedAtomic< intptr_t > external_in_words
Definition spaces.h:22
RelaxedAtomic< intptr_t > capacity_in_words
Definition spaces.h:20
RelaxedAtomic< intptr_t > used_in_words
Definition spaces.h:21
#define ASSERT(E)
glong glong end
GAsyncResult * result
#define DECLARE_FLAG(type, name)
Definition flags.h:14
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
constexpr intptr_t KB
Definition globals.h:528
uintptr_t uword
Definition globals.h:501
bool IsAllocatableViaFreeLists(intptr_t size)
Definition spaces.h:60
const intptr_t kMaxAddrSpaceInWords
Definition globals.h:50
#define LIKELY(cond)
Definition globals.h:260
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581