Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
compiler_pass.cc
Go to the documentation of this file.
1// Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
21#if defined(DART_PRECOMPILER)
24#endif
25#include "vm/thread.h"
26#include "vm/timeline.h"
27
28#define COMPILER_PASS_REPEAT(Name, Body) \
29 class CompilerPass_##Name : public CompilerPass { \
30 public: \
31 CompilerPass_##Name() : CompilerPass(k##Name, #Name) {} \
32 \
33 static bool Register() { \
34 return true; \
35 } \
36 \
37 protected: \
38 virtual bool DoBody(CompilerPassState* state) const { \
39 FlowGraph* flow_graph = state->flow_graph(); \
40 USE(flow_graph); \
41 Body; \
42 } \
43 }; \
44 static CompilerPass_##Name compiler_pass_##Name;
45
46#define COMPILER_PASS(Name, Body) \
47 COMPILER_PASS_REPEAT(Name, { \
48 Body; \
49 return false; \
50 })
51
52namespace dart {
53
55 Thread* thread,
56 FlowGraph* flow_graph,
57 SpeculativeInliningPolicy* speculative_policy,
58 Precompiler* precompiler)
59 : thread(thread),
60 precompiler(precompiler),
61 inlining_depth(0),
62 sinking(nullptr),
63 call_specializer(nullptr),
64 speculative_policy(speculative_policy),
65 sticky_flags(0),
66 flow_graph_(flow_graph) {
67 // Top scope function is at inlining id 0.
69 // Top scope function has no caller (-1).
71 // We do not add a token position for the top scope function to
72 // |inline_id_to_token_pos| because it is not (currently) inlined into
73 // another graph at a given token position. A side effect of this is that
74 // the length of |inline_id_to_function| and |caller_inline_id| is always
75 // larger than the length of |inline_id_to_token_pos| by one.
76}
77
78CompilerPass* CompilerPass::passes_[CompilerPass::kNumPasses] = {nullptr};
79uint8_t CompilerPass::flags_[CompilerPass::kNumPasses] = {0};
80
82 compiler_passes,
83 "List of comma separated compilation passes flags. "
84 "Use -Name to disable a pass, Name to print IL after it. "
85 "Do --compiler-passes=help for more information.");
86DECLARE_FLAG(bool, print_flow_graph);
87DECLARE_FLAG(bool, print_flow_graph_optimized);
88DEFINE_FLAG(bool, test_il_serialization, false, "Test IL serialization.");
89
91 flow_graph_ = flow_graph;
92 if (call_specializer != nullptr) {
94 }
95}
96
97static const char* kCompilerPassesUsage =
98 "=== How to use --compiler-passes flag\n"
99 "\n"
100 "Pass the list of comma separated compiler pass filter flags.\n"
101 "\n"
102 "For the given pass Name the following flags are supported:\n"
103 "\n"
104 " -Name disable the pass\n"
105 " ]Name or Name print IL after the pass\n"
106 " [Name print IL before the pass\n"
107 " *Name print IL before and after the pass\n"
108 " * print IL after each pass.\n"
109 "\n"
110 " The flag can be followed by '+' which makes it sticky, e.g. Inlining+\n"
111 " would cause IL to be printed after all passes that follow inlining and\n"
112 " are not disabled.\n"
113 "\n"
114 "List of compiler passes:\n";
115
116void CompilerPass::ParseFiltersFromFlag(const char* filter) {
117 ParseFilters(filter, flags_);
118}
119
120uint8_t* CompilerPass::ParseFiltersFromPragma(const char* filter) {
121 auto flags =
123 ParseFilters(filter, flags);
124 return flags;
125}
126
127void CompilerPass::ParseFilters(const char* filter, uint8_t* pass_flags) {
128 if (filter == nullptr || *filter == 0) {
129 return;
130 }
131
132 if (strcmp(filter, "help") == 0) {
134 for (intptr_t i = 0; i < kNumPasses; i++) {
135 if (passes_[i] != nullptr) {
136 OS::PrintErr(" %s\n", passes_[i]->name());
137 }
138 }
139 return;
140 }
141
142 // Clear all flags.
143 memset(pass_flags, 0, CompilerPass::kNumPasses);
144
145 for (const char *start = filter, *end = filter; *end != 0;
146 start = (end + 1)) {
147 // Search forward until the separator ',' or the end of filter is reached.
148 end = start;
149 while (*end != ',' && *end != '\0') {
150 end++;
151 }
152 if (start == end) {
153 OS::PrintErr("Ignoring empty compiler pass flag\n");
154 continue;
155 }
156
157 ParseOneFilter(start, end, pass_flags);
158 }
159}
160
162 const char* end,
163 uint8_t* pass_flags) {
164 uint8_t flags = 0;
165 if (*start == '-') {
167 } else if (*start == ']') {
169 } else if (*start == '[') {
171 } else if (*start == '*') {
173 }
174 if (flags == 0) {
176 } else {
177 start++; // Skip the modifier
178 }
179
180 size_t suffix = 0;
181 if (end[-1] == '+') {
182 if (start == (end - 1)) {
183 OS::PrintErr("Sticky modifier '+' should follow pass name\n");
184 return;
185 }
186 flags |= kSticky;
187 suffix = 1;
188 }
189
190 size_t length = (end - start) - suffix;
191 if (length != 0) {
192 char* pass_name = Utils::StrNDup(start, length);
193 CompilerPass* pass = FindPassByName(pass_name);
194 if (pass != nullptr) {
195 pass_flags[pass->id()] |= flags;
196 } else {
197 OS::PrintErr("Unknown compiler pass: %s\n", pass_name);
198 }
199 free(pass_name);
200 } else if (flags == kTraceBeforeOrAfter) {
201 for (intptr_t i = 0; i < kNumPasses; i++) {
202 pass_flags[i] = kTraceAfter;
203 }
204 }
205}
206
208 if ((flags() & kDisabled) != 0) {
209 return;
210 }
211
212 if ((flags() & kSticky) != 0) {
213 state->sticky_flags |= flags();
214 }
215
216 const intptr_t kMaxRounds = 2;
217 Thread* thread = state->thread;
218 bool repeat = true;
219 for (intptr_t round = 1; round <= kMaxRounds && repeat; round++) {
220 if (round > 1) {
221 Get(kCanonicalize)->Run(state);
222 }
223
225 PrintGraph(state, kTraceBefore, round);
226 {
227 TIMELINE_DURATION(thread, CompilerVerbose, name());
228 {
231 }
232 thread->CheckForSafepoint();
233 }
234 PrintGraph(state, kTraceAfter, round);
235#if defined(DEBUG)
236 if (CompilerState::Current().is_optimizing()) {
237 FlowGraphChecker(state->flow_graph(), state->inline_id_to_function)
238 .Check(name());
239 }
240#endif
241 CompilerState::Current().set_current_pass(nullptr, nullptr);
242 }
243}
244
245void CompilerPass::PrintGraph(CompilerPassState* state,
246 Flag mask,
247 intptr_t round) const {
248 FlowGraph* flow_graph = state->flow_graph();
249 const uint8_t* graph_flags = flow_graph->compiler_pass_filters();
250 const uint8_t current_flags =
251 (graph_flags != nullptr ? graph_flags[id()] : flags()) |
252 state->sticky_flags;
253
254 if ((FLAG_print_flow_graph || FLAG_print_flow_graph_optimized) &&
255 flow_graph->should_print() && ((current_flags & mask) != 0)) {
256 Zone* zone = state->thread->zone();
257 const char* when = mask == kTraceBefore ? "Before" : "After";
258 const char* phase =
259 round == 1
260 ? zone->PrintToString("%s %s", when, name())
261 : zone->PrintToString("%s %s (round %" Pd ")", when, name(), round);
262
263 FlowGraphPrinter::PrintGraph(phase, flow_graph);
264 }
265}
266
267#define INVOKE_PASS(Name) \
268 CompilerPass::Get(CompilerPass::k##Name)->Run(pass_state);
269
270#if defined(DART_PRECOMPILER)
271#define INVOKE_PASS_AOT(Name) \
272 if (mode == kAOT) { \
273 INVOKE_PASS(Name); \
274 }
275#else
276#define INVOKE_PASS_AOT(Name)
277#endif
278
280 INVOKE_PASS(AllocateRegistersForGraphIntrinsic);
281}
282
284 CompilerPassState* pass_state) {
285 INVOKE_PASS(ApplyClassIds);
286 INVOKE_PASS(TypePropagation);
287 INVOKE_PASS(ApplyICData);
289 // Run constant propagation to make sure we specialize for
290 // (optional) constant arguments passed into the inlined method.
291 INVOKE_PASS(ConstantPropagation);
292 // Constant propagation removes unreachable basic blocks and
293 // may open more opportunities for call specialization.
294 // Call specialization during inlining may cause more call
295 // sites to be discovered and more functions inlined.
296 INVOKE_PASS_AOT(ApplyClassIds);
297 // Optimize (a << b) & c patterns, merge instructions. Must occur
298 // before 'SelectRepresentations' which inserts conversion nodes.
299 INVOKE_PASS(TryOptimizePatterns);
300}
301
303 CompilerPassState* pass_state) {
304 INVOKE_PASS(ComputeSSA);
305 INVOKE_PASS_AOT(ApplyClassIds);
306 INVOKE_PASS_AOT(TypePropagation);
307 INVOKE_PASS(ApplyICData);
308 INVOKE_PASS(TryOptimizePatterns);
309 INVOKE_PASS(SetOuterInliningId);
310 INVOKE_PASS(TypePropagation);
311 INVOKE_PASS(ApplyClassIds);
312 INVOKE_PASS(Inlining);
313 INVOKE_PASS(TypePropagation);
314 INVOKE_PASS(ApplyClassIds);
315 INVOKE_PASS(TypePropagation);
316 INVOKE_PASS(ApplyICData);
318 INVOKE_PASS(BranchSimplify);
319 INVOKE_PASS(IfConvert);
321 INVOKE_PASS(ConstantPropagation);
322 INVOKE_PASS(OptimisticallySpecializeSmiPhis);
323 INVOKE_PASS(TypePropagation);
324 // The extra call specialization pass in AOT is able to specialize more
325 // calls after ConstantPropagation, which removes unreachable code, and
326 // TypePropagation, which can infer more accurate types after removing
327 // unreachable code.
328 INVOKE_PASS_AOT(ApplyICData);
329 INVOKE_PASS_AOT(OptimizeTypedDataAccesses);
330 INVOKE_PASS(WidenSmiToInt32);
331 INVOKE_PASS(SelectRepresentations);
332 INVOKE_PASS(CSE);
335 INVOKE_PASS(TryOptimizePatterns);
336 INVOKE_PASS(DSE);
337 INVOKE_PASS(TypePropagation);
339 INVOKE_PASS(OptimizeBranches);
340 INVOKE_PASS(TypePropagation);
341 INVOKE_PASS(TryCatchOptimization);
342 INVOKE_PASS(EliminateEnvironments);
343 INVOKE_PASS(EliminateDeadPhis);
344 // Currently DCE assumes that EliminateEnvironments has already been run,
345 // so it should not be lifted earlier than that pass.
346 INVOKE_PASS(DCE);
349 // Repeat branches optimization after DCE, as it could make more
350 // empty blocks.
351 INVOKE_PASS(OptimizeBranches);
352 INVOKE_PASS(AllocationSinking_Sink);
353 INVOKE_PASS(EliminateDeadPhis);
354 INVOKE_PASS(DCE);
356 INVOKE_PASS(TypePropagation);
357 INVOKE_PASS(SelectRepresentations_Final);
358 INVOKE_PASS(UseTableDispatch);
359 INVOKE_PASS(EliminateStackOverflowChecks);
361 INVOKE_PASS(AllocationSinking_DetachMaterializations);
363 // This must be done after all other possible intra-block code motion.
364 INVOKE_PASS(LoweringAfterCodeMotionDisabled);
365 INVOKE_PASS(FinalizeGraph);
367 INVOKE_PASS(ReorderBlocks);
368 INVOKE_PASS(AllocateRegisters);
369 INVOKE_PASS(TestILSerialization); // Must be last.
370 return pass_state->flow_graph();
371}
372
375 std::initializer_list<CompilerPass::Id> passes) {
376 for (auto pass_id : passes) {
377 passes_[pass_id]->Run(state);
378 }
379 return state->flow_graph();
380}
381
382COMPILER_PASS(ComputeSSA, {
383 // Transform to SSA (no inlining arguments).
384 flow_graph->ComputeSSA(nullptr);
385});
386
387COMPILER_PASS(ApplyICData, { state->call_specializer->ApplyICData(); });
388
389COMPILER_PASS(TryOptimizePatterns, { flow_graph->TryOptimizePatterns(); });
390
391COMPILER_PASS(SetOuterInliningId,
392 { FlowGraphInliner::SetInliningId(flow_graph, 0); });
393
394COMPILER_PASS(Inlining, {
395 FlowGraphInliner inliner(
396 flow_graph, &state->inline_id_to_function, &state->inline_id_to_token_pos,
397 &state->caller_inline_id, state->speculative_policy, state->precompiler);
398 state->inlining_depth = inliner.Inline();
399});
400
401COMPILER_PASS(TypePropagation,
402 { FlowGraphTypePropagator::Propagate(flow_graph); });
403
404COMPILER_PASS(ApplyClassIds, { state->call_specializer->ApplyClassIds(); });
405
406COMPILER_PASS(EliminateStackOverflowChecks, {
407 if (!flow_graph->IsCompiledForOsr()) {
408 CheckStackOverflowElimination::EliminateStackOverflow(flow_graph);
409 }
410});
411
413 // Do optimizations that depend on the propagated type information.
414 if (flow_graph->Canonicalize()) {
415 flow_graph->Canonicalize();
416 }
417});
418
419COMPILER_PASS(BranchSimplify, { BranchSimplifier::Simplify(flow_graph); });
420
421COMPILER_PASS(IfConvert, { IfConverter::Simplify(flow_graph); });
422
423COMPILER_PASS_REPEAT(ConstantPropagation, {
425 return true;
426});
427
428// Optimistically convert loop phis that have a single non-smi input
429// coming from the loop pre-header into smi-phis.
430COMPILER_PASS(OptimisticallySpecializeSmiPhis, {
431 LICM licm(flow_graph);
433});
434
435COMPILER_PASS(WidenSmiToInt32, {
436 // Where beneficial convert Smi operations into Int32 operations.
437 // Only meaningful for 32bit platforms right now.
438 flow_graph->WidenSmiToInt32();
439});
440
441COMPILER_PASS(SelectRepresentations, {
442 // Unbox doubles. Performed after constant propagation to minimize
443 // interference from phis merging double values and tagged
444 // values coming from dead paths.
445 flow_graph->SelectRepresentations();
446});
447
448COMPILER_PASS(SelectRepresentations_Final, {
449 // Final selection of representations. After this pass
450 // representations of inputs/outputs should match.
451 flow_graph->SelectRepresentations();
453});
454
455COMPILER_PASS(UseTableDispatch, {
456 state->call_specializer->ReplaceInstanceCallsWithDispatchTableCalls();
457});
458
460
463 DEBUG_ASSERT(flow_graph->VerifyRedefinitions());
464 LICM licm(flow_graph);
465 licm.Optimize();
466 flow_graph->RemoveRedefinitions(/*keep_checks*/ true);
467});
468
470
472 // We have to perform range analysis after LICM because it
473 // optimistically moves CheckSmi through phis into loop preheaders
474 // making some phis smi.
475 RangeAnalysis range_analysis(flow_graph);
476 range_analysis.Analyze();
477});
478
479COMPILER_PASS(OptimizeBranches, {
480 // Constant propagation can use information from range analysis to
481 // find unreachable branch targets and eliminate branches that have
482 // the same true- and false-target.
484});
485
486COMPILER_PASS(OptimizeTypedDataAccesses,
487 { TypedDataSpecializer::Optimize(flow_graph); });
488
489COMPILER_PASS(TryCatchOptimization, {
490 OptimizeCatchEntryStates(flow_graph,
491 /*is_aot=*/CompilerState::Current().is_aot());
492});
493
494COMPILER_PASS(EliminateEnvironments, { flow_graph->EliminateEnvironments(); });
495
496COMPILER_PASS(EliminateDeadPhis,
498
500
502
503COMPILER_PASS(AllocationSinking_Sink, {
504 // TODO(vegorov): Support allocation sinking with try-catch.
505 if (flow_graph->graph_entry()->catch_entries().is_empty()) {
506 state->sinking = new AllocationSinking(flow_graph);
507 state->sinking->Optimize();
508 }
509});
510
511COMPILER_PASS(AllocationSinking_DetachMaterializations, {
512 if (state->sinking != nullptr) {
513 // Remove all MaterializeObject instructions inserted by allocation
514 // sinking from the flow graph and let them float on the side
515 // referenced only from environments. Register allocator will consider
516 // them as part of a deoptimization environment.
517 state->sinking->DetachMaterializations();
518 }
519});
520
521COMPILER_PASS(AllocateRegisters, {
522 flow_graph->InsertMoveArguments();
523 // Ensure loop hierarchy has been computed.
524 flow_graph->GetLoopHierarchy();
525 // Perform register allocation on the SSA graph.
526 FlowGraphAllocator allocator(*flow_graph);
527 allocator.AllocateRegisters();
528});
529
530COMPILER_PASS(AllocateRegistersForGraphIntrinsic, {
531 flow_graph->set_max_argument_slot_count(0);
532 // Ensure loop hierarchy has been computed.
533 flow_graph->GetLoopHierarchy();
534 // Perform register allocation on the SSA graph.
535 FlowGraphAllocator allocator(*flow_graph, /*intrinsic_mode=*/true);
536 allocator.AllocateRegisters();
537});
538
539COMPILER_PASS(ReorderBlocks, { BlockScheduler::ReorderBlocks(flow_graph); });
540
542
543COMPILER_PASS(FinalizeGraph, {
544 // At the end of the pipeline, force recomputing and caching graph
545 // information (instruction and call site counts) for the (assumed)
546 // non-specialized case with better values, for future inlining.
547 intptr_t instruction_count = 0;
548 intptr_t call_site_count = 0;
550 /*constants_count*/ 0,
551 /*force*/ true, &instruction_count,
552 &call_site_count);
553 flow_graph->function().set_inlining_depth(state->inlining_depth);
554 // Remove redefinitions for the rest of the pipeline.
555 flow_graph->RemoveRedefinitions();
556});
557
558COMPILER_PASS(TestILSerialization, {
559 // This is the last compiler pass.
560 // Test that round-trip IL serialization works before generating code.
561 if (FLAG_test_il_serialization && CompilerState::Current().is_aot()) {
562 Zone* zone = flow_graph->zone();
563 auto* detached_defs = new (zone) ZoneGrowableArray<Definition*>(zone, 0);
564 flow_graph->CompactSSA(detached_defs);
565
566 ZoneWriteStream write_stream(flow_graph->zone(), 1024);
567 FlowGraphSerializer serializer(&write_stream);
568 serializer.WriteFlowGraph(*flow_graph, *detached_defs);
569 ReadStream read_stream(write_stream.buffer(), write_stream.bytes_written());
570 FlowGraphDeserializer deserializer(flow_graph->parsed_function(),
571 &read_stream);
572 state->set_flow_graph(deserializer.ReadFlowGraph());
573 }
574});
575
576COMPILER_PASS(LoweringAfterCodeMotionDisabled,
577 { flow_graph->ExtractNonInternalTypedDataPayloads(); });
578
579COMPILER_PASS(GenerateCode, { state->graph_compiler->CompileGraph(); });
580
581} // namespace dart
static void round(SkPoint *p)
static unsigned repeat(SkFixed fx, int max)
#define DEBUG_ASSERT(cond)
Definition assert.h:321
void Add(const T &value)
DART_FORCE_INLINE intptr_t bytes_written() const
Definition datastream.h:338
static void ReorderBlocks(FlowGraph *flow_graph)
static void Simplify(FlowGraph *flow_graph)
void set_flow_graph(FlowGraph *flow_graph)
void Run(CompilerPassState *state) const
static void ParseOneFilter(const char *start, const char *end, uint8_t *flags)
static CompilerPass * Get(Id id)
virtual bool DoBody(CompilerPassState *state) const =0
static uint8_t * ParseFiltersFromPragma(const char *filter)
static void RunGraphIntrinsicPipeline(CompilerPassState *state)
static DART_WARN_UNUSED_RESULT FlowGraph * RunPipeline(PipelineMode mode, CompilerPassState *state)
static void ParseFilters(const char *filter, uint8_t *flags)
static void RunInliningPipeline(PipelineMode mode, CompilerPassState *state)
static constexpr intptr_t kNumPasses
static void ParseFiltersFromFlag(const char *filter)
static DART_WARN_UNUSED_RESULT FlowGraph * RunPipelineWithPasses(CompilerPassState *state, std::initializer_list< CompilerPass::Id > passes)
uint8_t flags() const
const char * name() const
void set_current_pass(const CompilerPass *pass, const CompilerPassState *pass_state)
static CompilerState & Current()
static void Optimize(FlowGraph *graph)
static void OptimizeBranches(FlowGraph *graph)
static void EliminateDeadPhis(FlowGraph *graph)
static void EliminateDeadCode(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
static bool Optimize(FlowGraph *graph, bool run_load_optimization=true)
static void SetInliningId(FlowGraph *flow_graph, intptr_t inlining_id)
Definition inliner.cc:2449
static void CollectGraphInfo(FlowGraph *flow_graph, intptr_t num_constant_args, bool force, intptr_t *instruction_count, intptr_t *call_site_count)
Definition inliner.cc:2415
static void PrintGraph(const char *phase, FlowGraph *flow_graph)
void WriteFlowGraph(const FlowGraph &flow_graph, const ZoneGrowableArray< Definition * > &detached_defs)
GraphEntryInstr * graph_entry() const
Definition flow_graph.h:268
bool should_print() const
Definition flow_graph.h:505
bool VerifyRedefinitions()
void CompactSSA(ZoneGrowableArray< Definition * > *detached_defs=nullptr)
bool IsCompiledForOsr() const
Definition flow_graph.h:460
Zone * zone() const
Definition flow_graph.h:261
const uint8_t * compiler_pass_filters() const
Definition flow_graph.h:506
void RemoveRedefinitions(bool keep_checks=false)
void set_max_argument_slot_count(intptr_t count)
Definition flow_graph.h:568
void InsertMoveArguments()
void EliminateEnvironments()
void disallow_unmatched_representations()
Definition flow_graph.h:417
const LoopHierarchy & GetLoopHierarchy()
Definition flow_graph.h:430
const Function & function() const
Definition flow_graph.h:130
const ParsedFunction & parsed_function() const
Definition flow_graph.h:129
void RenameUsesDominatedByRedefinitions()
void TryOptimizePatterns()
void ComputeSSA(ZoneGrowableArray< Definition * > *inlining_parameters)
void SelectRepresentations()
void ExtractNonInternalTypedDataPayloads()
const GrowableArray< CatchBlockEntryInstr * > & catch_entries() const
Definition il.h:1997
static void Simplify(FlowGraph *flow_graph)
void OptimisticallySpecializeSmiPhis()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
const Function & function() const
Definition parser.h:73
Zone * zone() const
static ThreadState * Current()
void CheckForSafepoint()
Definition thread.h:1091
static void Optimize(FlowGraph *flow_graph)
static char * StrNDup(const char *s, intptr_t n)
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition zone.cc:313
ElementType * Alloc(intptr_t length)
#define COMPILER_PASS(Name, Body)
#define INVOKE_PASS_AOT(Name)
#define INVOKE_PASS(Name)
#define COMPILER_PASS_REPEAT(Name, Body)
#define COMPILER_TIMINGS_PASS_TIMER_SCOPE(thread, pass_id)
AtkStateType state
glong glong end
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
#define DEFINE_OPTION_HANDLER(handler, name, comment)
Definition flags.h:23
size_t length
void OptimizeCatchEntryStates(FlowGraph *flow_graph, bool is_aot)
static const char * kCompilerPassesUsage
void EliminateWriteBarriers(FlowGraph *flow_graph)
#define Pd
Definition globals.h:408
CompilerPassState(Thread *thread, FlowGraph *flow_graph, SpeculativeInliningPolicy *speculative_policy, Precompiler *precompiler=nullptr)
CallSpecializer * call_specializer
FlowGraph * flow_graph() const
GrowableArray< intptr_t > caller_inline_id
GrowableArray< const Function * > inline_id_to_function
void set_flow_graph(FlowGraph *flow_graph)
#define TIMELINE_DURATION(thread, stream, name)
Definition timeline.h:39