Flutter Engine
The Flutter Engine
compiler_pass.cc
Go to the documentation of this file.
1// Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
21#if defined(DART_PRECOMPILER)
24#endif
25#include "vm/thread.h"
26#include "vm/timeline.h"
27
28#define COMPILER_PASS_REPEAT(Name, Body) \
29 class CompilerPass_##Name : public CompilerPass { \
30 public: \
31 CompilerPass_##Name() : CompilerPass(k##Name, #Name) {} \
32 \
33 static bool Register() { \
34 return true; \
35 } \
36 \
37 protected: \
38 virtual bool DoBody(CompilerPassState* state) const { \
39 FlowGraph* flow_graph = state->flow_graph(); \
40 USE(flow_graph); \
41 Body; \
42 } \
43 }; \
44 static CompilerPass_##Name compiler_pass_##Name;
45
46#define COMPILER_PASS(Name, Body) \
47 COMPILER_PASS_REPEAT(Name, { \
48 Body; \
49 return false; \
50 })
51
52namespace dart {
53
55 Thread* thread,
56 FlowGraph* flow_graph,
57 SpeculativeInliningPolicy* speculative_policy,
58 Precompiler* precompiler)
59 : thread(thread),
60 precompiler(precompiler),
61 inlining_depth(0),
62 sinking(nullptr),
63 call_specializer(nullptr),
64 speculative_policy(speculative_policy),
65 sticky_flags(0),
66 flow_graph_(flow_graph) {
67 // Top scope function is at inlining id 0.
69 // Top scope function has no caller (-1).
71 // We do not add a token position for the top scope function to
72 // |inline_id_to_token_pos| because it is not (currently) inlined into
73 // another graph at a given token position. A side effect of this is that
74 // the length of |inline_id_to_function| and |caller_inline_id| is always
75 // larger than the length of |inline_id_to_token_pos| by one.
76}
77
78CompilerPass* CompilerPass::passes_[CompilerPass::kNumPasses] = {nullptr};
79uint8_t CompilerPass::flags_[CompilerPass::kNumPasses] = {0};
80
82 compiler_passes,
83 "List of comma separated compilation passes flags. "
84 "Use -Name to disable a pass, Name to print IL after it. "
85 "Do --compiler-passes=help for more information.");
86DECLARE_FLAG(bool, print_flow_graph);
87DECLARE_FLAG(bool, print_flow_graph_optimized);
88DEFINE_FLAG(bool, test_il_serialization, false, "Test IL serialization.");
89
91 flow_graph_ = flow_graph;
92 if (call_specializer != nullptr) {
94 }
95}
96
97static const char* kCompilerPassesUsage =
98 "=== How to use --compiler-passes flag\n"
99 "\n"
100 "Pass the list of comma separated compiler pass filter flags.\n"
101 "\n"
102 "For the given pass Name the following flags are supported:\n"
103 "\n"
104 " -Name disable the pass\n"
105 " ]Name or Name print IL after the pass\n"
106 " [Name print IL before the pass\n"
107 " *Name print IL before and after the pass\n"
108 " * print IL after each pass.\n"
109 "\n"
110 " The flag can be followed by '+' which makes it sticky, e.g. Inlining+\n"
111 " would cause IL to be printed after all passes that follow inlining and\n"
112 " are not disabled.\n"
113 "\n"
114 "List of compiler passes:\n";
115
116void CompilerPass::ParseFiltersFromFlag(const char* filter) {
117 ParseFilters(filter, flags_);
118}
119
120uint8_t* CompilerPass::ParseFiltersFromPragma(const char* filter) {
121 auto flags =
123 ParseFilters(filter, flags);
124 return flags;
125}
126
127void CompilerPass::ParseFilters(const char* filter, uint8_t* pass_flags) {
128 if (filter == nullptr || *filter == 0) {
129 return;
130 }
131
132 if (strcmp(filter, "help") == 0) {
134 for (intptr_t i = 0; i < kNumPasses; i++) {
135 if (passes_[i] != nullptr) {
136 OS::PrintErr(" %s\n", passes_[i]->name());
137 }
138 }
139 return;
140 }
141
142 // Clear all flags.
143 memset(pass_flags, 0, CompilerPass::kNumPasses);
144
145 for (const char *start = filter, *end = filter; *end != 0;
146 start = (end + 1)) {
147 // Search forward until the separator ',' or the end of filter is reached.
148 end = start;
149 while (*end != ',' && *end != '\0') {
150 end++;
151 }
152 if (start == end) {
153 OS::PrintErr("Ignoring empty compiler pass flag\n");
154 continue;
155 }
156
157 ParseOneFilter(start, end, pass_flags);
158 }
159}
160
161void CompilerPass::ParseOneFilter(const char* start,
162 const char* end,
163 uint8_t* pass_flags) {
164 uint8_t flags = 0;
165 if (*start == '-') {
167 } else if (*start == ']') {
169 } else if (*start == '[') {
171 } else if (*start == '*') {
173 }
174 if (flags == 0) {
176 } else {
177 start++; // Skip the modifier
178 }
179
180 size_t suffix = 0;
181 if (end[-1] == '+') {
182 if (start == (end - 1)) {
183 OS::PrintErr("Sticky modifier '+' should follow pass name\n");
184 return;
185 }
186 flags |= kSticky;
187 suffix = 1;
188 }
189
190 size_t length = (end - start) - suffix;
191 if (length != 0) {
192 char* pass_name = Utils::StrNDup(start, length);
193 CompilerPass* pass = FindPassByName(pass_name);
194 if (pass != nullptr) {
195 pass_flags[pass->id()] |= flags;
196 } else {
197 OS::PrintErr("Unknown compiler pass: %s\n", pass_name);
198 }
199 free(pass_name);
200 } else if (flags == kTraceBeforeOrAfter) {
201 for (intptr_t i = 0; i < kNumPasses; i++) {
202 pass_flags[i] = kTraceAfter;
203 }
204 }
205}
206
208 if ((flags() & kDisabled) != 0) {
209 return;
210 }
211
212 if ((flags() & kSticky) != 0) {
213 state->sticky_flags |= flags();
214 }
215
216 const intptr_t kMaxRounds = 2;
217 Thread* thread = state->thread;
218 bool repeat = true;
219 for (intptr_t round = 1; round <= kMaxRounds && repeat; round++) {
220 if (round > 1) {
221 Get(kCanonicalize)->Run(state);
222 }
223
225 PrintGraph(state, kTraceBefore, round);
226 {
227 TIMELINE_DURATION(thread, CompilerVerbose, name());
228 {
231 }
232 thread->CheckForSafepoint();
233 }
234 PrintGraph(state, kTraceAfter, round);
235#if defined(DEBUG)
236 if (CompilerState::Current().is_optimizing()) {
237 FlowGraphChecker(state->flow_graph(), state->inline_id_to_function)
238 .Check(name());
239 }
240#endif
241 CompilerState::Current().set_current_pass(nullptr, nullptr);
242 }
243}
244
245void CompilerPass::PrintGraph(CompilerPassState* state,
246 Flag mask,
247 intptr_t round) const {
248 FlowGraph* flow_graph = state->flow_graph();
249 const uint8_t* graph_flags = flow_graph->compiler_pass_filters();
250 const uint8_t current_flags =
251 (graph_flags != nullptr ? graph_flags[id()] : flags()) |
252 state->sticky_flags;
253
254 if ((FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) &&
255 flow_graph->should_print() && ((current_flags & mask) != 0)) {
256 Zone* zone = state->thread->zone();
257 const char* when = mask == kTraceBefore ? "Before" : "After";
258 const char* phase =
259 round == 1
260 ? zone->PrintToString("%s %s", when, name())
261 : zone->PrintToString("%s %s (round %" Pd ")", when, name(), round);
262
263 FlowGraphPrinter::PrintGraph(phase, flow_graph);
264 }
265}
266
267#define INVOKE_PASS(Name) \
268 CompilerPass::Get(CompilerPass::k##Name)->Run(pass_state);
269
270#if defined(DART_PRECOMPILER)
271#define INVOKE_PASS_AOT(Name) \
272 if (mode == kAOT) { \
273 INVOKE_PASS(Name); \
274 }
275#else
276#define INVOKE_PASS_AOT(Name)
277#endif
278
280 INVOKE_PASS(AllocateRegistersForGraphIntrinsic);
281}
282
284 CompilerPassState* pass_state) {
285 INVOKE_PASS(ApplyClassIds);
286 INVOKE_PASS(TypePropagation);
287 INVOKE_PASS(ApplyICData);
289 // Run constant propagation to make sure we specialize for
290 // (optional) constant arguments passed into the inlined method.
291 INVOKE_PASS(ConstantPropagation);
292 // Constant propagation removes unreachable basic blocks and
293 // may open more opportunities for call specialization.
294 // Call specialization during inlining may cause more call
295 // sites to be discovered and more functions inlined.
296 INVOKE_PASS_AOT(ApplyClassIds);
297 // Optimize (a << b) & c patterns, merge instructions. Must occur
298 // before 'SelectRepresentations' which inserts conversion nodes.
299 INVOKE_PASS(TryOptimizePatterns);
300}
301
303 CompilerPassState* pass_state,
304 bool compute_ssa) {
305 if (compute_ssa) {
306 INVOKE_PASS(ComputeSSA);
307 }
308 INVOKE_PASS_AOT(ApplyClassIds);
309 INVOKE_PASS_AOT(TypePropagation);
310 INVOKE_PASS(ApplyICData);
311 INVOKE_PASS(TryOptimizePatterns);
312 INVOKE_PASS(SetOuterInliningId);
313 INVOKE_PASS(TypePropagation);
314 INVOKE_PASS(ApplyClassIds);
315 INVOKE_PASS(Inlining);
316 INVOKE_PASS(TypePropagation);
317 INVOKE_PASS(ApplyClassIds);
318 INVOKE_PASS(TypePropagation);
319 INVOKE_PASS(ApplyICData);
321 INVOKE_PASS(BranchSimplify);
322 INVOKE_PASS(IfConvert);
324 INVOKE_PASS(ConstantPropagation);
325 INVOKE_PASS(OptimisticallySpecializeSmiPhis);
326 INVOKE_PASS(TypePropagation);
327 // The extra call specialization pass in AOT is able to specialize more
328 // calls after ConstantPropagation, which removes unreachable code, and
329 // TypePropagation, which can infer more accurate types after removing
330 // unreachable code.
331 INVOKE_PASS_AOT(ApplyICData);
332 INVOKE_PASS_AOT(OptimizeTypedDataAccesses);
333 INVOKE_PASS(SelectRepresentations);
334 INVOKE_PASS(CSE);
337 INVOKE_PASS(TryOptimizePatterns);
338 INVOKE_PASS(DSE);
339 INVOKE_PASS(TypePropagation);
341 INVOKE_PASS(OptimizeBranches);
342 INVOKE_PASS(TypePropagation);
343 INVOKE_PASS(TryCatchOptimization);
344 INVOKE_PASS(EliminateEnvironments);
345 INVOKE_PASS(EliminateDeadPhis);
346 // Currently DCE assumes that EliminateEnvironments has already been run,
347 // so it should not be lifted earlier than that pass.
348 INVOKE_PASS(DCE);
351 // Repeat branches optimization after DCE, as it could make more
352 // empty blocks.
353 INVOKE_PASS(OptimizeBranches);
354 INVOKE_PASS(AllocationSinking_Sink);
355 INVOKE_PASS(EliminateDeadPhis);
356 INVOKE_PASS(DCE);
358 INVOKE_PASS(TypePropagation);
359 INVOKE_PASS(SelectRepresentations_Final);
360 INVOKE_PASS(UseTableDispatch);
361 INVOKE_PASS(EliminateStackOverflowChecks);
363 INVOKE_PASS(AllocationSinking_DetachMaterializations);
365 // This must be done after all other possible intra-block code motion.
366 INVOKE_PASS(LoweringAfterCodeMotionDisabled);
367 INVOKE_PASS(FinalizeGraph);
369 INVOKE_PASS(ReorderBlocks);
370 INVOKE_PASS(AllocateRegisters);
371 INVOKE_PASS(TestILSerialization); // Must be last.
372 return pass_state->flow_graph();
373}
374
377 std::initializer_list<CompilerPass::Id> passes) {
378 for (auto pass_id : passes) {
379 passes_[pass_id]->Run(state);
380 }
381 return state->flow_graph();
382}
383
384COMPILER_PASS(ComputeSSA, {
385 // Transform to SSA (no inlining arguments).
386 flow_graph->ComputeSSA(nullptr);
387});
388
389COMPILER_PASS(ApplyICData, { state->call_specializer->ApplyICData(); });
390
391COMPILER_PASS(TryOptimizePatterns, { flow_graph->TryOptimizePatterns(); });
392
393COMPILER_PASS(SetOuterInliningId,
394 { FlowGraphInliner::SetInliningId(flow_graph, 0); });
395
396COMPILER_PASS(Inlining, {
397 FlowGraphInliner inliner(
398 flow_graph, &state->inline_id_to_function, &state->inline_id_to_token_pos,
399 &state->caller_inline_id, state->speculative_policy, state->precompiler);
400 state->inlining_depth = inliner.Inline();
401});
402
403COMPILER_PASS(TypePropagation,
404 { FlowGraphTypePropagator::Propagate(flow_graph); });
405
406COMPILER_PASS(ApplyClassIds, { state->call_specializer->ApplyClassIds(); });
407
408COMPILER_PASS(EliminateStackOverflowChecks, {
409 if (!flow_graph->IsCompiledForOsr()) {
410 CheckStackOverflowElimination::EliminateStackOverflow(flow_graph);
411 }
412});
413
415 // Do optimizations that depend on the propagated type information.
416 if (flow_graph->Canonicalize()) {
417 flow_graph->Canonicalize();
418 }
419});
420
421COMPILER_PASS(BranchSimplify, { BranchSimplifier::Simplify(flow_graph); });
422
423COMPILER_PASS(IfConvert, { IfConverter::Simplify(flow_graph); });
424
425COMPILER_PASS_REPEAT(ConstantPropagation, {
427 return true;
428});
429
430// Optimistically convert loop phis that have a single non-smi input
431// coming from the loop pre-header into smi-phis.
432COMPILER_PASS(OptimisticallySpecializeSmiPhis, {
433 LICM licm(flow_graph);
435});
436
437COMPILER_PASS(SelectRepresentations, {
438 // Unbox doubles. Performed after constant propagation to minimize
439 // interference from phis merging double values and tagged
440 // values coming from dead paths.
441 flow_graph->SelectRepresentations();
442});
443
444COMPILER_PASS(SelectRepresentations_Final, {
445 // Final selection of representations. After this pass
446 // representations of inputs/outputs should match.
447 flow_graph->SelectRepresentations();
449});
450
451COMPILER_PASS(UseTableDispatch, {
452 state->call_specializer->ReplaceInstanceCallsWithDispatchTableCalls();
453});
454
456
459 DEBUG_ASSERT(flow_graph->VerifyRedefinitions());
460 LICM licm(flow_graph);
461 licm.Optimize();
462 flow_graph->RemoveRedefinitions(/*keep_checks*/ true);
463});
464
466
468 // We have to perform range analysis after LICM because it
469 // optimistically moves CheckSmi through phis into loop preheaders
470 // making some phis smi.
471 RangeAnalysis range_analysis(flow_graph);
472 range_analysis.Analyze();
473});
474
475COMPILER_PASS(OptimizeBranches, {
476 // Constant propagation can use information from range analysis to
477 // find unreachable branch targets and eliminate branches that have
478 // the same true- and false-target.
480});
481
482COMPILER_PASS(OptimizeTypedDataAccesses,
483 { TypedDataSpecializer::Optimize(flow_graph); });
484
485COMPILER_PASS(TryCatchOptimization, {
486 OptimizeCatchEntryStates(flow_graph,
487 /*is_aot=*/CompilerState::Current().is_aot());
488});
489
490COMPILER_PASS(EliminateEnvironments, { flow_graph->EliminateEnvironments(); });
491
492COMPILER_PASS(EliminateDeadPhis,
494
496
498
499COMPILER_PASS(AllocationSinking_Sink, {
500 // TODO(vegorov): Support allocation sinking with try-catch.
501 if (flow_graph->graph_entry()->catch_entries().is_empty()) {
502 state->sinking = new AllocationSinking(flow_graph);
503 state->sinking->Optimize();
504 }
505});
506
507COMPILER_PASS(AllocationSinking_DetachMaterializations, {
508 if (state->sinking != nullptr) {
509 // Remove all MaterializeObject instructions inserted by allocation
510 // sinking from the flow graph and let them float on the side
511 // referenced only from environments. Register allocator will consider
512 // them as part of a deoptimization environment.
513 state->sinking->DetachMaterializations();
514 }
515});
516
517COMPILER_PASS(AllocateRegisters, {
518 flow_graph->InsertMoveArguments();
519 // Ensure loop hierarchy has been computed.
520 flow_graph->GetLoopHierarchy();
521 // Perform register allocation on the SSA graph.
522 FlowGraphAllocator allocator(*flow_graph);
523 allocator.AllocateRegisters();
524});
525
526COMPILER_PASS(AllocateRegistersForGraphIntrinsic, {
527 flow_graph->set_max_argument_slot_count(0);
528 // Ensure loop hierarchy has been computed.
529 flow_graph->GetLoopHierarchy();
530 // Perform register allocation on the SSA graph.
531 FlowGraphAllocator allocator(*flow_graph, /*intrinsic_mode=*/true);
532 allocator.AllocateRegisters();
533});
534
535COMPILER_PASS(ReorderBlocks, { BlockScheduler::ReorderBlocks(flow_graph); });
536
538
539COMPILER_PASS(FinalizeGraph, {
540 // At the end of the pipeline, force recomputing and caching graph
541 // information (instruction and call site counts) for the (assumed)
542 // non-specialized case with better values, for future inlining.
543 intptr_t instruction_count = 0;
544 intptr_t call_site_count = 0;
546 /*constants_count*/ 0,
547 /*force*/ true, &instruction_count,
548 &call_site_count);
549 flow_graph->function().set_inlining_depth(state->inlining_depth);
550 // Remove redefinitions for the rest of the pipeline.
551 flow_graph->RemoveRedefinitions();
552});
553
554COMPILER_PASS(TestILSerialization, {
555 // This is the last compiler pass.
556 // Test that round-trip IL serialization works before generating code.
557 if (FLAG_test_il_serialization && CompilerState::Current().is_aot()) {
558 Zone* zone = flow_graph->zone();
559 auto* detached_defs = new (zone) ZoneGrowableArray<Definition*>(zone, 0);
560 flow_graph->CompactSSA(detached_defs);
561
562 ZoneWriteStream write_stream(flow_graph->zone(), 1024);
563 FlowGraphSerializer serializer(&write_stream);
564 serializer.WriteFlowGraph(*flow_graph, *detached_defs);
565 ReadStream read_stream(write_stream.buffer(), write_stream.bytes_written());
566 FlowGraphDeserializer deserializer(flow_graph->parsed_function(),
567 &read_stream);
568 state->set_flow_graph(deserializer.ReadFlowGraph());
569 }
570});
571
572COMPILER_PASS(LoweringAfterCodeMotionDisabled,
573 { flow_graph->ExtractNonInternalTypedDataPayloads(); });
574
575COMPILER_PASS(GenerateCode, { state->graph_compiler->CompileGraph(); });
576
577} // namespace dart
static void round(SkPoint *p)
static unsigned repeat(SkFixed fx, int max)
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
void Add(const T &value)
DART_FORCE_INLINE intptr_t bytes_written() const
Definition: datastream.h:338
static void ReorderBlocks(FlowGraph *flow_graph)
static void Simplify(FlowGraph *flow_graph)
void set_flow_graph(FlowGraph *flow_graph)
void Run(CompilerPassState *state) const
static void ParseOneFilter(const char *start, const char *end, uint8_t *flags)
static CompilerPass * Get(Id id)
virtual bool DoBody(CompilerPassState *state) const =0
static uint8_t * ParseFiltersFromPragma(const char *filter)
static void RunGraphIntrinsicPipeline(CompilerPassState *state)
static void ParseFilters(const char *filter, uint8_t *flags)
static void RunInliningPipeline(PipelineMode mode, CompilerPassState *state)
static constexpr intptr_t kNumPasses
static void ParseFiltersFromFlag(const char *filter)
static DART_WARN_UNUSED_RESULT FlowGraph * RunPipeline(PipelineMode mode, CompilerPassState *state, bool compute_ssa=true)
static DART_WARN_UNUSED_RESULT FlowGraph * RunPipelineWithPasses(CompilerPassState *state, std::initializer_list< CompilerPass::Id > passes)
uint8_t flags() const
const char * name() const
void set_current_pass(const CompilerPass *pass, const CompilerPassState *pass_state)
static CompilerState & Current()
static void Optimize(FlowGraph *graph)
static void OptimizeBranches(FlowGraph *graph)
static void EliminateDeadPhis(FlowGraph *graph)
static void EliminateDeadCode(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
static bool Optimize(FlowGraph *graph, bool run_load_optimization=true)
static void SetInliningId(FlowGraph *flow_graph, intptr_t inlining_id)
Definition: inliner.cc:2449
static void CollectGraphInfo(FlowGraph *flow_graph, intptr_t num_constant_args, bool force, intptr_t *instruction_count, intptr_t *call_site_count)
Definition: inliner.cc:2415
static void PrintGraph(const char *phase, FlowGraph *flow_graph)
Definition: il_printer.cc:1706
void WriteFlowGraph(const FlowGraph &flow_graph, const ZoneGrowableArray< Definition * > &detached_defs)
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
bool should_print() const
Definition: flow_graph.h:503
bool VerifyRedefinitions()
Definition: flow_graph.cc:652
void CompactSSA(ZoneGrowableArray< Definition * > *detached_defs=nullptr)
Definition: flow_graph.cc:3123
bool IsCompiledForOsr() const
Definition: flow_graph.h:460
Zone * zone() const
Definition: flow_graph.h:261
const uint8_t * compiler_pass_filters() const
Definition: flow_graph.h:504
void RemoveRedefinitions(bool keep_checks=false)
Definition: flow_graph.cc:1810
void set_max_argument_slot_count(intptr_t count)
Definition: flow_graph.h:566
void InsertMoveArguments()
Definition: flow_graph.cc:2946
void EliminateEnvironments()
Definition: flow_graph.cc:2351
void disallow_unmatched_representations()
Definition: flow_graph.h:417
const LoopHierarchy & GetLoopHierarchy()
Definition: flow_graph.h:430
const Function & function() const
Definition: flow_graph.h:130
const ParsedFunction & parsed_function() const
Definition: flow_graph.h:129
void RenameUsesDominatedByRedefinitions()
Definition: flow_graph.cc:2653
void TryOptimizePatterns()
Definition: flow_graph.cc:2570
void ComputeSSA(ZoneGrowableArray< Definition * > *inlining_parameters)
Definition: flow_graph.cc:926
void SelectRepresentations()
Definition: flow_graph.cc:2298
void ExtractNonInternalTypedDataPayloads()
Definition: flow_graph.cc:2444
const GrowableArray< CatchBlockEntryInstr * > & catch_entries() const
Definition: il.h:2012
static void Simplify(FlowGraph *flow_graph)
void OptimisticallySpecializeSmiPhis()
uint8_t * buffer() const
Definition: datastream.h:615
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
const Function & function() const
Definition: parser.h:73
Zone * zone() const
Definition: thread_state.h:37
static ThreadState * Current()
Definition: thread_state.h:27
void CheckForSafepoint()
Definition: thread.h:1104
static void Optimize(FlowGraph *flow_graph)
static char * StrNDup(const char *s, intptr_t n)
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition: zone.cc:313
ElementType * Alloc(intptr_t length)
#define INVOKE_PASS_AOT(Name)
#define INVOKE_PASS(Name)
#define COMPILER_TIMINGS_PASS_TIMER_SCOPE(thread, pass_id)
AtkStateType state
if(end==-1)
glong glong end
size_t length
Definition: dart_vm.cc:33
void OptimizeCatchEntryStates(FlowGraph *flow_graph, bool is_aot)
static const char * kCompilerPassesUsage
void EliminateWriteBarriers(FlowGraph *flow_graph)
DEFINE_OPTION_HANDLER(CompilerPass::ParseFiltersFromFlag, compiler_passes, "List of comma separated compilation passes flags. " "Use -Name to disable a pass, Name to print IL after it. " "Do --compiler-passes=help for more information.")
COMPILER_PASS(ComputeSSA, { flow_graph->ComputeSSA(nullptr);})
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
COMPILER_PASS_REPEAT(ConstantPropagation, { ConstantPropagator::Optimize(flow_graph);return true;})
DECLARE_FLAG(bool, show_invisible_frames)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
#define Pd
Definition: globals.h:408
CompilerPassState(Thread *thread, FlowGraph *flow_graph, SpeculativeInliningPolicy *speculative_policy, Precompiler *precompiler=nullptr)
CallSpecializer * call_specializer
Definition: compiler_pass.h:94
FlowGraph * flow_graph() const
Definition: compiler_pass.h:77
GrowableArray< intptr_t > caller_inline_id
Definition: compiler_pass.h:92
GrowableArray< const Function * > inline_id_to_function
Definition: compiler_pass.h:88
void set_flow_graph(FlowGraph *flow_graph)
#define TIMELINE_DURATION(thread, stream, name)
Definition: timeline.h:39