5#ifndef RUNTIME_VM_FLAG_LIST_H_
6#define RUNTIME_VM_FLAG_LIST_H_
12#define USING_PRODUCT true
14#define USING_PRODUCT false
17#if defined(DART_PRECOMPILED_RUNTIME)
28#if defined(FORCE_INCLUDE_DISASSEMBLER)
29#define DISASSEMBLE_FLAGS(P, R, C, D) \
30 P(disassemble, bool, false, "Disassemble dart code.") \
31 P(disassemble_optimized, bool, false, "Disassemble optimized code.") \
32 P(disassemble_relative, bool, false, "Use offsets instead of absolute PCs") \
33 P(disassemble_stubs, bool, false, "Disassemble generated stubs.") \
34 P(support_disassembler, bool, true, "Support the disassembler.")
36#define DISASSEMBLE_FLAGS(P, R, C, D) \
37 R(disassemble, false, bool, false, "Disassemble dart code.") \
38 R(disassemble_optimized, false, bool, false, "Disassemble optimized code.") \
39 R(disassemble_relative, false, bool, false, \
40 "Use offsets instead of absolute PCs") \
41 R(disassemble_stubs, false, bool, false, "Disassemble generated stubs.") \
42 R(support_disassembler, false, bool, true, "Support the disassembler.")
45#if defined(INCLUDE_IL_PRINTER)
58#define VM_GLOBAL_FLAG_LIST(P, R, C, D) \
59 P(code_comments, bool, false, "Include comments into code and disassembly.") \
60 P(dwarf_stack_traces_mode, bool, false, \
61 "Use --[no-]dwarf-stack-traces instead.") \
62 R(dedup_instructions, true, bool, false, \
63 "Canonicalize instructions when precompiling.")
80#define FLAG_LIST(P, R, C, D) \
81 VM_GLOBAL_FLAG_LIST(P, R, C, D) \
82 DISASSEMBLE_FLAGS(P, R, C, D) \
83 P(abort_on_oom, bool, false, \
84 "Abort if memory allocation fails - use only with --old-gen-heap-size") \
85 P(add_readonly_data_symbols, bool, false, \
86 "Add static symbols for objects in snapshot read-only data") \
87 P(background_compilation, bool, true, \
88 "Run optimizing compilation in background") \
89 P(check_token_positions, bool, false, \
90 "Check validity of token positions while compiling flow graphs") \
91 P(collect_dynamic_function_names, bool, true, \
92 "Collects all dynamic function names to identify unique targets") \
93 P(compactor_tasks, int, 2, \
94 "The number of tasks to use for parallel compaction.") \
95 P(concurrent_mark, bool, true, "Concurrent mark for old generation.") \
96 P(concurrent_sweep, bool, true, "Concurrent sweep for old generation.") \
97 C(deoptimize_alot, false, false, bool, false, \
98 "Deoptimizes we are about to return to Dart code from native entries.") \
99 C(deoptimize_every, 0, 0, int, 0, \
100 "Deoptimize on every N stack overflow checks") \
101 P(deoptimize_on_runtime_call_every, int, 0, \
102 "Deoptimize functions on every runtime call.") \
103 P(dontneed_on_sweep, bool, false, \
104 "madvise(DONTNEED) free areas in partially used heap regions") \
105 R(dump_megamorphic_stats, false, bool, false, \
106 "Dump megamorphic cache statistics") \
107 R(dump_symbol_stats, false, bool, false, "Dump symbol table statistics") \
108 P(enable_asserts, bool, false, "Enable assert statements.") \
109 P(inline_alloc, bool, true, "Whether to use inline allocation fast paths.") \
110 P(enable_mirrors, bool, true, \
111 "Disable to make importing dart:mirrors an error.") \
112 P(enable_ffi, bool, true, "Disable to make importing dart:ffi an error.") \
113 P(force_clone_compiler_objects, bool, false, \
114 "Force cloning of objects needed in compiler (ICData and Field).") \
115 P(guess_icdata_cid, bool, true, \
116 "Artificially create type feedback for arithmetic etc. operations") \
117 P(huge_method_cutoff_in_ast_nodes, int, 10000, \
118 "Huge method cutoff in AST nodes: Disables optimizations for huge " \
120 P(idle_timeout_micros, int, 61 * kMicrosecondsPerSecond, \
121 "Consider thread pool isolates for idle tasks after this long.") \
122 P(idle_duration_micros, int, kMaxInt32, \
123 "Allow idle tasks to run for this long.") \
124 P(interpret_irregexp, bool, false, "Use irregexp bytecode interpreter") \
125 P(link_natives_lazily, bool, false, "Link native calls lazily") \
126 R(log_marker_tasks, false, bool, false, \
127 "Log debugging information for old gen GC marking tasks.") \
128 P(scavenger_tasks, int, 2, \
129 "The number of tasks to spawn during scavenging and incremental " \
130 "compaction (0 means perform all work on the main thread).") \
131 P(mark_when_idle, bool, false, \
132 "The Dart thread will assist in concurrent marking during idle time and " \
133 "is counted as one marker task") \
134 P(marker_tasks, int, 2, \
135 "The number of tasks to spawn during old gen GC marking (0 means " \
136 "perform all marking on main thread).") \
137 P(hash_map_probes_limit, int, kMaxInt32, \
138 "Limit number of probes while doing lookups in hash maps.") \
139 P(max_polymorphic_checks, int, 4, \
140 "Maximum number of polymorphic check, otherwise it is megamorphic.") \
141 P(max_equality_polymorphic_checks, int, 32, \
142 "Maximum number of polymorphic checks in equality operator,") \
143 P(new_gen_semi_max_size, int, kDefaultNewGenSemiMaxSize, \
144 "Max size of new gen semi space in MB") \
145 P(new_gen_semi_initial_size, int, (kWordSize <= 4) ? 1 : 2, \
146 "Initial size of new gen semi space in MB") \
147 P(optimization_counter_threshold, int, kDefaultOptimizationCounterThreshold, \
148 "Function's usage-counter value before it is optimized, -1 means never") \
149 P(optimization_level, int, 2, \
150 "Optimization level: 1 (favor size), 2 (default), 3 (favor speed)") \
151 P(old_gen_heap_size, int, kDefaultMaxOldGenHeapSize, \
152 "Max size of old gen heap size in MB, or 0 for unlimited," \
153 "e.g: --old_gen_heap_size=1024 allows up to 1024MB old gen heap") \
154 R(pause_isolates_on_start, false, bool, false, \
155 "Pause isolates before starting.") \
156 R(pause_isolates_on_exit, false, bool, false, "Pause isolates exiting.") \
157 R(pause_isolates_on_unhandled_exceptions, false, bool, false, \
158 "Pause isolates on unhandled exceptions.") \
159 P(polymorphic_with_deopt, bool, true, \
160 "Polymorphic calls with deoptimization / megamorphic call") \
161 P(precompiled_mode, bool, false, "Precompilation compiler mode") \
162 P(print_snapshot_sizes, bool, false, "Print sizes of generated snapshots.") \
163 P(print_snapshot_sizes_verbose, bool, false, \
164 "Print cluster sizes of generated snapshots.") \
165 R(print_ssa_liveranges, false, bool, false, \
166 "Print live ranges after allocation.") \
167 R(print_stacktrace_at_api_error, false, bool, false, \
168 "Attempt to print a native stack trace when an API error is created.") \
169 D(print_variable_descriptors, bool, false, \
170 "Print variable descriptors in disassembly.") \
171 R(profiler, false, bool, false, "Enable the profiler.") \
172 R(profiler_native_memory, false, bool, false, \
173 "Enable native memory statistic collection.") \
174 P(reorder_basic_blocks, bool, true, "Reorder basic blocks") \
175 C(stress_async_stacks, false, false, bool, false, \
176 "Stress test async stack traces") \
177 P(retain_function_objects, bool, true, \
178 "Serialize function objects for all code objects even if not otherwise " \
179 "needed in the precompiled runtime.") \
180 P(retain_code_objects, bool, true, \
181 "Serialize all code objects even if not otherwise " \
182 "needed in the precompiled runtime.") \
183 P(show_invisible_frames, bool, false, \
184 "Show invisible frames in stack traces.") \
185 P(target_unknown_cpu, bool, false, \
186 "Generate code for a generic CPU, unknown at compile time") \
187 D(trace_cha, bool, false, "Trace CHA operations") \
188 R(trace_field_guards, false, bool, false, "Trace changes in field's cids.") \
189 D(trace_finalizers, bool, false, "Traces finalizers.") \
190 D(trace_ic, bool, false, "Trace IC handling") \
191 D(trace_ic_miss_in_optimized, bool, false, \
192 "Trace IC miss in optimized code") \
193 C(trace_irregexp, false, false, bool, false, "Trace irregexps.") \
194 D(trace_intrinsified_natives, bool, false, \
195 "Report if any of the intrinsified natives are called") \
196 D(trace_isolates, bool, false, "Trace isolate creation and shut down.") \
197 D(trace_handles, bool, false, "Traces allocation of handles.") \
198 D(trace_kernel_binary, bool, false, "Trace Kernel reader/writer.") \
199 D(trace_natives, bool, false, "Trace invocation of natives") \
200 D(trace_optimization, bool, false, "Print optimization details.") \
201 R(trace_profiler, false, bool, false, "Profiler trace") \
202 D(trace_profiler_verbose, bool, false, "Verbose profiler trace") \
203 D(trace_runtime_calls, bool, false, "Trace runtime calls.") \
204 R(trace_ssa_allocator, false, bool, false, \
205 "Trace register allocation over SSA.") \
206 P(trace_strong_mode_types, bool, false, \
207 "Trace optimizations based on strong mode types.") \
208 D(trace_type_checks, bool, false, "Trace runtime type checks.") \
209 D(trace_type_checks_verbose, bool, false, \
210 "Enable verbose trace of runtime type checks.") \
211 D(trace_patching, bool, false, "Trace patching of code.") \
212 D(trace_optimized_ic_calls, bool, false, \
213 "Trace IC calls in optimized code.") \
214 D(trace_zones, bool, false, "Traces allocation sizes in the zone.") \
215 P(truncating_left_shift, bool, true, \
216 "Optimize left shift to truncate if possible") \
217 P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
218 P(use_incremental_compactor, bool, true, \
219 "Compact the heap during old-space GC.") \
220 P(use_cha_deopt, bool, true, \
221 "Use class hierarchy analysis even if it can cause deoptimization.") \
222 P(use_field_guards, bool, true, "Use field guards and track field types") \
223 C(use_osr, false, true, bool, true, "Use OSR") \
224 P(use_slow_path, bool, false, "Whether to avoid inlined fast paths.") \
225 P(verbose_gc, bool, false, "Enables verbose GC.") \
226 P(verbose_gc_hdr, int, 40, "Print verbose GC header interval.") \
227 R(verify_after_gc, false, bool, false, \
228 "Enables heap verification after GC.") \
229 R(verify_before_gc, false, bool, false, \
230 "Enables heap verification before GC.") \
231 R(verify_store_buffer, false, bool, false, \
232 "Enables store buffer verification before and after scavenges.") \
233 R(verify_after_marking, false, bool, false, \
234 "Enables heap verification after marking.") \
235 P(enable_slow_path_sharing, bool, true, "Enable sharing of slow-path code.") \
236 P(shared_slow_path_triggers_gc, bool, false, \
237 "TESTING: slow-path triggers a GC.") \
238 P(enable_multiple_entrypoints, bool, true, \
239 "Enable multiple entrypoints per-function and related optimizations.") \
240 P(enable_testing_pragmas, bool, false, \
241 "Enable magical pragmas for testing purposes. Use at your own risk!") \
242 R(eliminate_type_checks, true, bool, true, \
243 "Eliminate type checks when allowed by static type analysis.") \
244 D(support_rr, bool, false, "Support running within RR.") \
245 P(verify_entry_points, bool, false, \
246 "Throw API error on invalid member access through native API. See " \
247 "entry_point_pragma.md") \
248 C(branch_coverage, false, false, bool, false, "Enable branch coverage") \
249 C(coverage, false, false, bool, true, "Enable coverage")
constexpr bool FLAG_support_il_printer
constexpr intptr_t kDefaultOptimizationCounterThreshold
constexpr bool kDartPrecompiledRuntime