Flutter Engine
The Flutter Engine
flow_graph_compiler.h
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
6#define RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
7
9#if defined(DART_PRECOMPILED_RUNTIME)
10#error "AOT runtime should not use compiler sources (including header files)"
11#endif // defined(DART_PRECOMPILED_RUNTIME)
12
13#include <functional>
14
15#include "vm/allocation.h"
16#include "vm/code_descriptors.h"
22#include "vm/runtime_entry.h"
23
24namespace dart {
25
26// Forward declarations.
27class CatchEntryMovesMapBuilder;
28class Code;
29class DeoptInfoBuilder;
30class FlowGraph;
31class FlowGraphCompiler;
32class Function;
33template <typename T>
34class GrowableArray;
35class ParsedFunction;
36class SpeculativeInliningPolicy;
37
38namespace compiler {
39struct TableSelector;
40}
41
42// Used in methods which need conditional access to a temporary register.
43// May only be used to allocate a single temporary register.
45 public:
48 virtual void ReleaseTemporary() = 0;
49};
50
52 public:
53 explicit ConstantTemporaryAllocator(Register tmp) : tmp_(tmp) {}
54
55 Register AllocateTemporary() override { return tmp_; }
56 void ReleaseTemporary() override {}
57
58 private:
59 Register const tmp_;
60};
61
63 public:
65 void ReleaseTemporary() override { UNREACHABLE(); }
66};
67
68// Used for describing a deoptimization point after call (lazy deoptimization).
69// For deoptimization before instruction use class CompilerDeoptInfoWithStub.
71 public:
74 uint32_t flags,
76 : pc_offset_(-1),
77 deopt_id_(deopt_id),
78 reason_(reason),
79 flags_(flags),
80 deopt_env_(deopt_env) {
81 ASSERT(deopt_env != nullptr);
82 }
83 virtual ~CompilerDeoptInfo() {}
84
87 const Array& deopt_table);
88
89 // No code needs to be generated.
90 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix) {}
91
92 intptr_t pc_offset() const { return pc_offset_; }
93 void set_pc_offset(intptr_t offset) { pc_offset_ = offset; }
94
95 intptr_t deopt_id() const { return deopt_id_; }
96 ICData::DeoptReasonId reason() const { return reason_; }
97 uint32_t flags() const { return flags_; }
98 const Environment* deopt_env() const { return deopt_env_; }
99
100 private:
101 void EmitMaterializations(Environment* env, DeoptInfoBuilder* builder);
102
103 void AllocateOutgoingArguments(Environment* env);
104
105 intptr_t pc_offset_;
106 const intptr_t deopt_id_;
107 const ICData::DeoptReasonId reason_;
108 const uint32_t flags_;
109 Environment* deopt_env_;
110
111 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfo);
112};
113
115 public:
118 uint32_t flags,
120 : CompilerDeoptInfo(deopt_id, reason, flags, deopt_env), entry_label_() {
121 ASSERT(reason != ICData::kDeoptAtCall);
122 }
123
124 compiler::Label* entry_label() { return &entry_label_; }
125
126 // Implementation is in architecture specific file.
127 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix);
128
129 const char* Name() const {
130 const char* kFormat = "Deopt stub for id %d, reason: %s";
131 const intptr_t len = Utils::SNPrint(nullptr, 0, kFormat, deopt_id(),
133 1;
134 char* chars = Thread::Current()->zone()->Alloc<char>(len);
135 Utils::SNPrint(chars, len, kFormat, deopt_id(),
137 return chars;
138 }
139
140 private:
141 compiler::Label entry_label_;
142
143 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfoWithStub);
144};
145
147 public:
149 : instruction_(instruction), entry_label_(), exit_label_() {}
150 virtual ~SlowPathCode() {}
151
152 Instruction* instruction() const { return instruction_; }
153 compiler::Label* entry_label() { return &entry_label_; }
154 compiler::Label* exit_label() { return &exit_label_; }
155
158 ASSERT(entry_label_.IsBound());
159 }
160
161 private:
163
164 Instruction* instruction_;
165 compiler::Label entry_label_;
166 compiler::Label exit_label_;
167
168 DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
169};
170
171template <typename T>
173 public:
175
176 T* instruction() const {
177 return static_cast<T*>(SlowPathCode::instruction());
178 }
179};
180
181class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
182 public:
184 const Class& cls,
186 : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
187
189
192 const Class& cls,
194 Register temp);
195
196 private:
197 const Class& cls_;
198 const Register result_;
199};
200
202 : public TemplateSlowPathCode<DoubleToIntegerInstr> {
203 public:
205 FpuRegister value_reg)
206 : TemplateSlowPathCode(instruction), value_reg_(value_reg) {}
207
209
210 private:
211 FpuRegister value_reg_;
212};
213
214// Slow path code which calls runtime entry to throw an exception.
215class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
216 public:
218 const RuntimeEntry& runtime_entry)
219 : TemplateSlowPathCode(instruction), runtime_entry_(runtime_entry) {}
220
221 // This name appears in disassembly.
222 virtual const char* name() = 0;
223
224 // Subclasses can override these methods to customize slow path code.
228
229 // Returns number of arguments for runtime call (if shared stub is not used).
230 virtual intptr_t GetNumberOfArgumentsForRuntimeCall() { return 0; }
231
233 bool save_fpu_registers) {
234 UNREACHABLE();
235 }
236
238
239 private:
240 const RuntimeEntry& runtime_entry_;
241};
242
244 public:
247 GetRuntimeEntry(instruction->exception_type())) {
248 }
249
251 return instruction()->AsCheckNull()->exception_type();
252 }
253
254 const char* name() override;
255
257 bool save_fpu_registers) override;
258
261 compiler);
262 }
263
264 static CodePtr GetStub(FlowGraphCompiler* compiler,
266 bool save_fpu_registers);
267
268 private:
269 static const RuntimeEntry& GetRuntimeEntry(
271};
272
274 public:
278 GenericCheckBoundInstr::UseUnboxedRepresentation()
279 ? kRangeErrorUnboxedInt64RuntimeEntry
280 : kRangeErrorRuntimeEntry) {}
281 virtual const char* name() { return "check bound"; }
282
285 return 0; // Unboxed arguments are passed through Thread.
286 }
287 return 2; // length and index
288 }
289
291
293 bool save_fpu_registers);
294};
295
297 public:
299 : ThrowErrorSlowPathCode(instruction, kWriteErrorRuntimeEntry) {}
300 virtual const char* name() { return "check writable"; }
301
303 bool save_fpu_registers);
304
306
308 return 2; // receiver, kind
309 }
310};
311
313 public:
316 kLateFieldNotInitializedErrorRuntimeEntry) {
317 ASSERT(instruction->IsLoadField() || instruction->IsLoadStaticField());
318 }
319 virtual const char* name() { return "late initialization error"; }
320
322 return 1; // field
323 }
324
326
328 bool save_fpu_registers);
329
330 private:
331 FieldPtr OriginalField() const {
332 return instruction()->IsLoadField()
333 ? instruction()->AsLoadField()->slot().field().Original()
334 : instruction()->AsLoadStaticField()->field().Original();
335 }
336};
337
339 private:
340 class BlockInfo : public ZoneAllocated {
341 public:
342 BlockInfo()
343 : block_label_(),
344 jump_label_(&block_label_),
345 next_nonempty_label_(nullptr),
346 is_marked_(false) {}
347
348 // The label to jump to when control is transferred to this block. For
349 // nonempty blocks it is the label of the block itself. For empty
350 // blocks it is the label of the first nonempty successor block.
351 compiler::Label* jump_label() const { return jump_label_; }
352 void set_jump_label(compiler::Label* label) { jump_label_ = label; }
353
354 // The label of the first nonempty block after this one in the block
355 // order, or nullptr if there is no nonempty block following this one.
356 compiler::Label* next_nonempty_label() const {
357 return next_nonempty_label_;
358 }
359 void set_next_nonempty_label(compiler::Label* label) {
360 next_nonempty_label_ = label;
361 }
362
363 bool WasCompacted() const { return jump_label_ != &block_label_; }
364
365 // Block compaction is recursive. Block info for already-compacted
366 // blocks is marked so as to avoid cycles in the graph.
367 bool is_marked() const { return is_marked_; }
368 void mark() { is_marked_ = true; }
369
370 private:
371 compiler::Label block_label_;
372
373 compiler::Label* jump_label_;
374 compiler::Label* next_nonempty_label_;
375
376 bool is_marked_;
377 };
378
379 public:
383 bool is_optimizing,
384 SpeculativeInliningPolicy* speculative_policy,
385 const GrowableArray<const Function*>& inline_id_to_function,
386 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
387 const GrowableArray<intptr_t>& caller_inline_id,
389 CodeStatistics* stats = nullptr);
390
392
394
397
398 // Accessors.
399 compiler::Assembler* assembler() const { return assembler_; }
400 const ParsedFunction& parsed_function() const { return parsed_function_; }
401 const Function& function() const { return parsed_function_.function(); }
403 return block_order_;
404 }
407 return dispatch_table_call_targets_;
408 }
409
410 // If 'ForcedOptimization()' returns 'true', we are compiling in optimized
411 // mode for a function which cannot deoptimize. Certain optimizations, e.g.
412 // speculative optimizations and call patching are disabled.
413 bool ForcedOptimization() const { return function().ForceOptimize(); }
414
415 const FlowGraph& flow_graph() const {
416 return intrinsic_mode() ? *intrinsic_flow_graph_ : flow_graph_;
417 }
418
419 BlockEntryInstr* current_block() const { return current_block_; }
420 void set_current_block(BlockEntryInstr* value) { current_block_ = value; }
421
422 Instruction* current_instruction() const { return current_instruction_; }
423
424 bool CanOptimize() const;
425 bool CanOptimizeFunction() const;
426 bool CanOSRFunction() const;
427 bool is_optimizing() const { return is_optimizing_; }
428
431
432 // The function was fully intrinsified, so the body is unreachable.
433 //
434 // We still need to compile the body in unoptimized mode because the
435 // 'ICData's are added to the function's 'ic_data_array_' when instance
436 // calls are compiled.
438 return fully_intrinsified_ && is_optimizing();
439 }
440
443 bool intrinsic_mode() const { return intrinsic_mode_; }
444
446 intrinsic_flow_graph_ = &flow_graph;
447 }
448
450 ASSERT(intrinsic_slow_path_label_ == nullptr || label == nullptr);
451 intrinsic_slow_path_label_ = label;
452 }
454 ASSERT(intrinsic_slow_path_label_ != nullptr);
455 return intrinsic_slow_path_label_;
456 }
457
459
460 const GrowableArray<BlockInfo*>& block_info() const { return block_info_; }
461
462 void StatsBegin(Instruction* instr) {
463 if (stats_ != nullptr) stats_->Begin(instr);
464 }
465
466 void StatsEnd(Instruction* instr) {
467 if (stats_ != nullptr) stats_->End(instr);
468 }
469
470 void SpecialStatsBegin(intptr_t tag) {
471 if (stats_ != nullptr) stats_->SpecialBegin(tag);
472 }
473
474 void SpecialStatsEnd(intptr_t tag) {
475 if (stats_ != nullptr) stats_->SpecialEnd(tag);
476 }
477
479 return used_static_fields_;
480 }
481
482 // Constructor is lightweight, major initialization work should occur here.
483 // This makes it easier to measure time spent in the compiler.
484 void InitCompiler();
485
486 void CompileGraph();
487
489
490 void VisitBlocks();
491
493
494 // Bail out of the flow graph compiler. Does not return to the caller.
495 void Bailout(const char* reason);
496
497 // Returns 'true' if regular code generation should be skipped.
498 bool TryIntrinsify();
499
500 // Emits code for a generic move from a location 'src' to a location 'dst'.
501 //
502 // Note that Location does not include a size (that can only be deduced from
503 // a Representation), so these moves might overapproximate the size needed
504 // to move. The maximal overapproximation is moving 8 bytes instead of 4 on
505 // 64 bit architectures. This overapproximation is not a problem, because
506 // the Dart calling convention only uses word-sized stack slots.
507 //
508 // TODO(dartbug.com/40400): Express this in terms of EmitMove(NativeLocation
509 // NativeLocation) to remove code duplication.
511
512 // Emits code for a move from a location `src` to a location `dst`.
513 //
514 // Takes into account the payload and container representations of `dst` and
515 // `src` to do the smallest move possible, and sign (or zero) extend or
516 // truncate if needed.
517 //
518 // Makes use of TMP, FpuTMP, and `temp`.
522
523 // Helper method to move from a Location to a NativeLocation.
525 Location src_loc,
526 Representation src_type,
528
529 // Helper method to move from a NativeLocation to a Location.
530 void EmitMoveFromNative(Location dst_loc,
531 Representation dst_type,
534
535 // Helper method to move a Dart const to a native location.
538 Representation src_type,
540
542 const LocationSummary& locs);
543
544 void GenerateAssertAssignable(CompileType* receiver_type,
546 intptr_t deopt_id,
548 const String& dst_name,
549 LocationSummary* locs);
550
551#if !defined(TARGET_ARCH_IA32)
553 const AbstractType& dst_type,
555
557 intptr_t deopt_id,
559 Register reg_with_type,
560 const AbstractType& dst_type,
561 const String& dst_name,
562 LocationSummary* locs);
563
565 Register reg_with_type,
566 intptr_t sub_type_cache_index);
567#endif
568
570 const Code& stub,
572 LocationSummary* locs,
573 intptr_t deopt_id,
575
578 const Code& stub,
580 LocationSummary* locs,
581 ObjectPool::SnapshotBehavior snapshot_behavior =
583
586 const Code& stub,
588 LocationSummary* locs,
589 ObjectPool::SnapshotBehavior snapshot_behavior =
591
592 void GenerateDartCall(intptr_t deopt_id,
594 const Code& stub,
596 LocationSummary* locs,
598
600 intptr_t deopt_id,
603 LocationSummary* locs,
604 const Function& target,
606
608 intptr_t deopt_id,
610 const AbstractType& type,
611 LocationSummary* locs);
612
613 void GenerateInstanceCall(intptr_t deopt_id,
615 LocationSummary* locs,
616 const ICData& ic_data,
617 Code::EntryKind entry_kind,
618 bool receiver_can_be_smi);
619
621 intptr_t deopt_id,
623 const Function& function,
624 ArgumentsInfo args_info,
625 LocationSummary* locs,
626 const ICData& ic_data_in,
627 ICData::RebindRule rebind_rule,
629
630 void GenerateNumberTypeCheck(Register kClassIdReg,
631 const AbstractType& type,
632 compiler::Label* is_instance_lbl,
633 compiler::Label* is_not_instance_lbl);
634 void GenerateStringTypeCheck(Register kClassIdReg,
635 compiler::Label* is_instance_lbl,
636 compiler::Label* is_not_instance_lbl);
637 void GenerateListTypeCheck(Register kClassIdReg,
638 compiler::Label* is_instance_lbl);
639
640 // Returns true if no further checks are necessary but the code coming after
641 // the emitted code here is still required do a runtime call (for the negative
642 // case of throwing an exception).
643 bool GenerateSubtypeRangeCheck(Register class_id_reg,
644 const Class& type_class,
645 compiler::Label* is_subtype_lbl);
646
647 // We test up to 4 different cid ranges, if we would need to test more in
648 // order to get a definite answer we fall back to the old mechanism (namely
649 // of going into the subtyping cache)
650 static constexpr intptr_t kMaxNumberOfCidRangesToTest = 4;
651
652 // If [fall_through_if_inside] is `true`, then [outside_range_lbl] must be
653 // supplied, since it will be jumped to in the last case if the cid is outside
654 // the range.
655 //
656 // Returns whether [class_id_reg] is clobbered by the check.
657 static bool GenerateCidRangesCheck(
659 Register class_id_reg,
660 const CidRangeVector& cid_ranges,
661 compiler::Label* inside_range_lbl,
662 compiler::Label* outside_range_lbl = nullptr,
663 bool fall_through_if_inside = false);
664
666 const Code& stub,
667 const ICData& ic_data,
668 intptr_t deopt_id,
670 LocationSummary* locs,
672
673 void EmitInstanceCallJIT(const Code& stub,
674 const ICData& ic_data,
675 intptr_t deopt_id,
677 LocationSummary* locs,
678 Code::EntryKind entry_kind);
679
681 const CallTargets& targets,
682 ArgumentsInfo args_info,
683 intptr_t deopt_id,
685 LocationSummary* locs,
686 bool complete,
687 intptr_t total_call_count,
688 bool receiver_can_be_smi = true);
689
691 intptr_t deopt_id,
693 LocationSummary* locs) {
694 const String& name = String::Handle(icdata.target_name());
695 const Array& arguments_descriptor =
697 EmitMegamorphicInstanceCall(name, arguments_descriptor, deopt_id, source,
698 locs);
699 }
700
702 const Array& arguments_descriptor,
703 intptr_t deopt_id,
705 LocationSummary* locs);
706
708 const ICData& ic_data,
709 intptr_t deopt_id,
711 LocationSummary* locs,
713 bool receiver_can_be_smi = true);
714
715 void EmitTestAndCall(const CallTargets& targets,
716 const String& function_name,
717 ArgumentsInfo args_info,
718 compiler::Label* failed,
719 compiler::Label* match_found,
720 intptr_t deopt_id,
721 const InstructionSource& source_index,
722 LocationSummary* locs,
723 bool complete,
724 intptr_t total_ic_calls,
726
727 void EmitDispatchTableCall(int32_t selector_offset,
728 const Array& arguments_descriptor);
729
731 const Object& obj,
732 bool needs_number_check,
734 intptr_t deopt_id);
736 Register right,
737 bool needs_number_check,
739 intptr_t deopt_id);
741
743
744 void EmitEdgeCounter(intptr_t edge_id);
745
747
748 void EmitCallToStub(const Code& stub,
749 ObjectPool::SnapshotBehavior snapshot_behavior =
751 void EmitJumpToStub(const Code& stub);
752 void EmitTailCallToStub(const Code& stub);
753
754 void EmitDropArguments(intptr_t count);
755
756 // Emits the following metadata for the current PC:
757 //
758 // * Attaches current try index
759 // * Attaches stackmaps
760 // * Attaches catch entry moves (in AOT)
761 // * Deoptimization information (in JIT)
762 //
763 // If [env] is not `nullptr` it will be used instead of the
764 // `pending_deoptimization_env`.
766 intptr_t deopt_id,
768 LocationSummary* locs,
770
772 intptr_t yield_index);
773
774 void EmitComment(Instruction* instr);
775
776 // Returns stack size (number of variables on stack for unoptimized
777 // code, or number of spill slots for optimized code).
778 intptr_t StackSize() const;
779
780 // Returns the number of extra stack slots used during an Osr entry
781 // (values for all [ParameterInstr]s, representing local variables
782 // and expression stack values, are already on the stack).
783 intptr_t ExtraStackSlotsOnOsrEntry() const;
784
785#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
786 // Changes the base register of this Location if this allows us to utilize
787 // a better addressing mode. For RISC-V, this is the wider range of compressed
788 // instructions available for SP-relative load compared to FP-relative loads.
789 // Assumes `StackSize` accounts for everything at the point of use.
790 Location RebaseIfImprovesAddressing(Location loc) const;
791#endif // defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
792
793 // Returns assembler label associated with the given block entry.
794 compiler::Label* GetJumpLabel(BlockEntryInstr* block_entry) const;
795 bool WasCompacted(BlockEntryInstr* block_entry) const;
796
797 // Returns the label of the fall-through of the current block.
799
800 // Returns true if there is a next block after the current one in
801 // the block order and if it is the given block.
802 bool CanFallThroughTo(BlockEntryInstr* block_entry) const;
803
804 // Return true-, false- and fall-through label for a branch instruction.
806
808 void SetNeedsStackTrace(intptr_t try_index);
810 intptr_t deopt_id,
812 void AddDescriptor(
814 intptr_t pc_offset,
815 intptr_t deopt_id,
817 intptr_t try_index,
818 intptr_t yield_index = UntaggedPcDescriptors::kInvalidYieldIndex);
819
820 // Add NullCheck information for the current PC.
821 void AddNullCheck(const InstructionSource& source, const String& name);
822
824 intptr_t slow_path_argument_count = 0);
825
826 compiler::Label* AddDeoptStub(intptr_t deopt_id,
828 uint32_t flags = 0);
829
832
833 void AddSlowPathCode(SlowPathCode* slow_path);
834
836 void FinalizePcDescriptors(const Code& code);
838 void FinalizeStackMaps(const Code& code);
839 void FinalizeVarDescriptors(const Code& code);
842 void FinalizeCodeSourceMap(const Code& code);
843
844 const Class& double_class() const { return double_class_; }
845 const Class& mint_class() const { return mint_class_; }
846 const Class& float32x4_class() const { return float32x4_class_; }
847 const Class& float64x2_class() const { return float64x2_class_; }
848 const Class& int32x4_class() const { return int32x4_class_; }
849
850 const Class& BoxClassFor(Representation rep);
851
854#if defined(DEBUG)
855 void ClobberDeadTempRegisters(LocationSummary* locs);
856#endif
857
858 // Returns a new environment based on [env] which accounts for the new
859 // locations of values in the slow path call.
861 intptr_t num_slow_path_args) {
862 if (inst->env() == nullptr && is_optimizing()) {
863 if (pending_deoptimization_env_ == nullptr) {
864 return nullptr;
865 }
866 return SlowPathEnvironmentFor(pending_deoptimization_env_, inst->locs(),
867 num_slow_path_args);
868 }
869 return SlowPathEnvironmentFor(inst->env(), inst->locs(),
870 num_slow_path_args);
871 }
872
874 LocationSummary* locs,
875 intptr_t num_slow_path_args);
876
877 intptr_t CurrentTryIndex() const {
878 if (current_block_ == nullptr) {
879 return kInvalidTryIndex;
880 }
881 return current_block_->try_index();
882 }
883
884 bool may_reoptimize() const { return may_reoptimize_; }
885
886 // Use in unoptimized compilation to preserve/reuse ICData.
887 //
888 // If [binary_smi_target] is non-null and we have to create the ICData, the
889 // ICData will get an (kSmiCid, kSmiCid, binary_smi_target) entry.
890 const ICData* GetOrAddInstanceCallICData(intptr_t deopt_id,
891 const String& target_name,
892 const Array& arguments_descriptor,
893 intptr_t num_args_tested,
894 const AbstractType& receiver_type,
895 const Function& binary_smi_target);
896
897 const ICData* GetOrAddStaticCallICData(intptr_t deopt_id,
898 const Function& target,
899 const Array& arguments_descriptor,
900 intptr_t num_args_tested,
901 ICData::RebindRule rebind_rule);
902
904 intptr_t cid,
905 const String& selector,
906 const Array& args_desc_array);
907
909 return *deopt_id_to_ic_data_;
910 }
911
912 Thread* thread() const { return thread_; }
913 IsolateGroup* isolate_group() const { return thread_->isolate_group(); }
914 Zone* zone() const { return zone_; }
915
916 void AddStubCallTarget(const Code& code);
918
919 ArrayPtr edge_counters_array() const { return edge_counters_array_.ptr(); }
920
921 ArrayPtr InliningIdToFunction() const;
922
925
926 static bool LookupMethodFor(int class_id,
927 const String& name,
928 const ArgumentsDescriptor& args_desc,
929 Function* fn_return,
930 bool* class_is_abstract_return = nullptr);
931
932 // Returns new class-id bias.
933 //
934 // TODO(kustermann): We should move this code out of the [FlowGraphCompiler]!
936 compiler::Label* label,
937 Register class_id_reg,
938 const CidRangeValue& range,
939 int bias,
940 bool jump_on_miss = true);
941
942 bool IsEmptyBlock(BlockEntryInstr* block) const;
943
945 const Function& function,
946 const Array& arguments_descriptor,
947 intptr_t size_with_type_args,
948 intptr_t deopt_id,
950 LocationSummary* locs,
952
953 private:
954 friend class BoxInt64Instr; // For AddPcRelativeCallStubTarget().
955 friend class CheckNullInstr; // For AddPcRelativeCallStubTarget().
956 friend class NullErrorSlowPath; // For AddPcRelativeCallStubTarget().
957 friend class CheckStackOverflowInstr; // For AddPcRelativeCallStubTarget().
958 friend class StoreIndexedInstr; // For AddPcRelativeCallStubTarget().
959 friend class StoreFieldInstr; // For AddPcRelativeCallStubTarget().
960 friend class CheckStackOverflowSlowPath; // For pending_deoptimization_env_.
961 friend class GraphIntrinsicCodeGenScope; // For optimizing_.
962
963 // Architecture specific implementation of simple native moves.
964 void EmitNativeMoveArchitecture(const compiler::ffi::NativeLocation& dst,
966 void EmitNativeLoad(Register dst,
968 intptr_t offset,
970
971 void EmitFrameEntry();
972
973 bool TryIntrinsifyHelper();
974 void AddPcRelativeCallTarget(const Function& function,
975 Code::EntryKind entry_kind);
976 void AddPcRelativeCallStubTarget(const Code& stub_code);
977 void AddPcRelativeTailCallStubTarget(const Code& stub_code);
978 void AddPcRelativeTTSCallTypeTarget(const AbstractType& type);
979 void AddStaticCallTarget(const Function& function,
980 Code::EntryKind entry_kind);
981
982 void GenerateDeferredCode();
983
984 void EmitInstructionPrologue(Instruction* instr);
985 void EmitInstructionEpilogue(Instruction* instr);
986
987 // Emit code to load a Value into register 'dst'.
988 void LoadValue(Register dst, Value* value);
989
990 void EmitUnoptimizedStaticCall(
991 intptr_t size_with_type_args,
992 intptr_t deopt_id,
994 LocationSummary* locs,
995 const ICData& ic_data,
997
998 // Helper for TestAndCall that calculates a good bias that
999 // allows more compact instructions to be emitted.
1000 intptr_t ComputeGoodBiasForCidComparison(const CallTargets& sorted,
1001 intptr_t max_immediate);
1002
1003 // More helpers for EmitTestAndCall.
1004
1005 static Register EmitTestCidRegister();
1006
1007 void EmitTestAndCallLoadReceiver(intptr_t count_without_type_args,
1008 const Array& arguments_descriptor);
1009
1010 void EmitTestAndCallSmiBranch(compiler::Label* label, bool jump_if_smi);
1011
1012 void EmitTestAndCallLoadCid(Register class_id_reg);
1013
1014 // Type checking helper methods.
1015 void CheckClassIds(Register class_id_reg,
1016 const GrowableArray<intptr_t>& class_ids,
1017 compiler::Label* is_instance_lbl,
1018 compiler::Label* is_not_instance_lbl);
1019
1020 SubtypeTestCachePtr GenerateInlineInstanceof(
1022 const AbstractType& type,
1023 compiler::Label* is_instance_lbl,
1024 compiler::Label* is_not_instance_lbl);
1025
1026 SubtypeTestCachePtr GenerateInstantiatedTypeWithArgumentsTest(
1028 const AbstractType& dst_type,
1029 compiler::Label* is_instance_lbl,
1030 compiler::Label* is_not_instance_lbl);
1031
1032 bool GenerateInstantiatedTypeNoArgumentsTest(
1034 const AbstractType& dst_type,
1035 compiler::Label* is_instance_lbl,
1036 compiler::Label* is_not_instance_lbl);
1037
1038 SubtypeTestCachePtr GenerateUninstantiatedTypeTest(
1040 const AbstractType& dst_type,
1041 compiler::Label* is_instance_lbl,
1042 compiler::Label* is_not_instance_label);
1043
1044 SubtypeTestCachePtr GenerateFunctionTypeTest(
1046 const AbstractType& dst_type,
1047 compiler::Label* is_instance_lbl,
1048 compiler::Label* is_not_instance_label);
1049
1050 SubtypeTestCachePtr GenerateSubtype1TestCacheLookup(
1052 const Class& type_class,
1053 compiler::Label* is_instance_lbl,
1054 compiler::Label* is_not_instance_lbl);
1055
1056 enum class TypeTestStubKind {
1057 // Just check the instance cid (no closures).
1058 kTestTypeOneArg = 1,
1059 // Also check the instance type arguments.
1060 kTestTypeTwoArgs = 2,
1061 // Also check the instantiator type arguments for the destination type.
1062 kTestTypeThreeArgs = 3,
1063 // Also check the function type arguments for the destination type.
1064 kTestTypeFourArgs = 4,
1065 // Also check the parent function and delayed type arguments for a closure.
1066 kTestTypeSixArgs = 6,
1067 // Also check the destination type, as it is not known at compile time.
1068 kTestTypeSevenArgs = 7,
1069 };
1070
1071 static_assert(static_cast<intptr_t>(TypeTestStubKind::kTestTypeSevenArgs) ==
1073 "Need to adjust kTestTypeMaxArgs");
1074 static constexpr TypeTestStubKind kTestTypeMaxArgs =
1075 TypeTestStubKind::kTestTypeSevenArgs;
1076
1077 // Returns the number of used inputs for a given type test stub kind.
1078 intptr_t UsedInputsForTTSKind(TypeTestStubKind kind) {
1079 return static_cast<intptr_t>(kind);
1080 }
1081
1082 // Returns type test stub kind for a type test against type parameter type.
1083 TypeTestStubKind GetTypeTestStubKindForTypeParameter(
1084 const TypeParameter& type_param);
1085
1086 // Takes input from TypeTestABI registers (or stack on IA32), see
1087 // StubCodeCompiler::GenerateSubtypeNTestCacheStub for caller-save registers.
1088 SubtypeTestCachePtr GenerateCallSubtypeTestStub(
1089 TypeTestStubKind test_kind,
1090 compiler::Label* is_instance_lbl,
1091 compiler::Label* is_not_instance_lbl);
1092
1093 void GenerateBoolToJump(Register bool_reg,
1094 compiler::Label* is_true,
1095 compiler::Label* is_false);
1096
1097 // Perform a greedy local register allocation. Consider all registers free.
1098 void AllocateRegistersLocally(Instruction* instr);
1099
1100 // Map a block number in a forward iteration into the block number in the
1101 // corresponding reverse iteration. Used to obtain an index into
1102 // block_order for reverse iterations.
1103 intptr_t reverse_index(intptr_t index) const {
1104 return block_order_.length() - index - 1;
1105 }
1106
1107 void set_current_instruction(Instruction* current_instruction) {
1108 current_instruction_ = current_instruction;
1109 }
1110
1111 void CompactBlock(BlockEntryInstr* block);
1112 void CompactBlocks();
1113
1114 bool IsListClass(const Class& cls) const {
1115 return cls.ptr() == list_class_.ptr();
1116 }
1117
1118 void EmitSourceLine(Instruction* instr);
1119
1120 intptr_t GetOptimizationThreshold() const;
1121
1122#if defined(DEBUG)
1123 void FrameStateUpdateWith(Instruction* instr);
1124 void FrameStatePush(Definition* defn);
1125 void FrameStatePop(intptr_t count);
1126 bool FrameStateIsSafeToCall();
1127 void FrameStateClear();
1128#endif
1129
1130 // Returns true if instruction lookahead (window size one)
1131 // is amenable to a peephole optimization.
1132 bool IsPeephole(Instruction* instr) const;
1133
1134#if defined(DEBUG)
1135 bool CanCallDart() const {
1136 return current_instruction_ == nullptr ||
1137 current_instruction_->CanCallDart();
1138 }
1139#else
1140 bool CanCallDart() const { return true; }
1141#endif
1142
1143 bool CanPcRelativeCall(const Function& target) const;
1144 bool CanPcRelativeCall(const Code& target) const;
1145 bool CanPcRelativeCall(const AbstractType& target) const;
1146
1147 // This struct contains either function or code, the other one being nullptr.
1148 class StaticCallsStruct : public ZoneAllocated {
1149 public:
1150 Code::CallKind call_kind;
1151 Code::CallEntryPoint entry_point;
1152 const intptr_t offset;
1153 const Function* function; // Can be nullptr.
1154 const Code* code; // Can be nullptr.
1155 const AbstractType* dst_type; // Can be nullptr.
1156 StaticCallsStruct(Code::CallKind call_kind,
1157 Code::CallEntryPoint entry_point,
1158 intptr_t offset_arg,
1159 const Function* function_arg,
1160 const Code* code_arg,
1161 const AbstractType* dst_type)
1162 : call_kind(call_kind),
1163 entry_point(entry_point),
1164 offset(offset_arg),
1165 function(function_arg),
1166 code(code_arg),
1167 dst_type(dst_type) {
1168 DEBUG_ASSERT(function == nullptr ||
1169 function->IsNotTemporaryScopedHandle());
1170 DEBUG_ASSERT(code == nullptr || code->IsNotTemporaryScopedHandle());
1171 DEBUG_ASSERT(dst_type == nullptr ||
1172 dst_type->IsNotTemporaryScopedHandle());
1173 ASSERT(code == nullptr || dst_type == nullptr);
1174 }
1175
1176 private:
1177 DISALLOW_COPY_AND_ASSIGN(StaticCallsStruct);
1178 };
1179
1180 Thread* thread_;
1181 Zone* zone_;
1182 compiler::Assembler* assembler_;
1183 const ParsedFunction& parsed_function_;
1184 const FlowGraph& flow_graph_;
1185 const FlowGraph* intrinsic_flow_graph_ = nullptr;
1186 const GrowableArray<BlockEntryInstr*>& block_order_;
1187
1188#if defined(DEBUG)
1189 GrowableArray<Representation> frame_state_;
1190#endif
1191
1192 // Compiler specific per-block state. Indexed by postorder block number
1193 // for convenience. This is not the block's index in the block order,
1194 // which is reverse postorder.
1195 BlockEntryInstr* current_block_;
1196 ExceptionHandlerList* exception_handlers_list_;
1197 DescriptorList* pc_descriptors_list_;
1198 CompressedStackMapsBuilder* compressed_stackmaps_builder_;
1199 CodeSourceMapBuilder* code_source_map_builder_;
1200 CatchEntryMovesMapBuilder* catch_entry_moves_maps_builder_;
1201 GrowableArray<BlockInfo*> block_info_;
1202 GrowableArray<CompilerDeoptInfo*> deopt_infos_;
1203 GrowableArray<SlowPathCode*> slow_path_code_;
1204 // Fields that were referenced by generated code.
1205 // This list is needed by precompiler to ensure they are retained.
1206 GrowableArray<const Field*> used_static_fields_;
1207 // Stores static call targets as well as stub targets.
1208 // TODO(srdjan): Evaluate if we should store allocation stub targets into a
1209 // separate table?
1210 GrowableArray<StaticCallsStruct*> static_calls_target_table_;
1211 // The table selectors of all dispatch table calls in the current function.
1212 GrowableArray<const compiler::TableSelector*> dispatch_table_call_targets_;
1213 GrowableArray<IndirectGotoInstr*> indirect_gotos_;
1214 bool is_optimizing_;
1215 SpeculativeInliningPolicy* speculative_policy_;
1216 // Set to true if optimized code has IC calls.
1217 bool may_reoptimize_;
1218 // True while emitting intrinsic code.
1219 bool intrinsic_mode_;
1220 compiler::Label* intrinsic_slow_path_label_ = nullptr;
1221 bool fully_intrinsified_ = false;
1222 CodeStatistics* stats_;
1223
1224 // The definition whose value is supposed to be at the top of the
1225 // expression stack. Used by peephole optimization (window size one)
1226 // to eliminate redundant push/pop pairs.
1227 Definition* top_of_stack_ = nullptr;
1228
1229 const Class& double_class_;
1230 const Class& mint_class_;
1231 const Class& float32x4_class_;
1232 const Class& float64x2_class_;
1233 const Class& int32x4_class_;
1234 const Class& list_class_;
1235
1236 // Currently instructions generate deopt stubs internally by
1237 // calling AddDeoptStub. To communicate deoptimization environment
1238 // that should be used when deoptimizing we store it in this variable.
1239 // In future AddDeoptStub should be moved out of the instruction template.
1240 Environment* pending_deoptimization_env_;
1241
1242 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
1243 Array& edge_counters_array_;
1244
1245 // Instruction currently running EmitNativeCode().
1246 Instruction* current_instruction_ = nullptr;
1247
1248 DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
1249};
1250
1251} // namespace dart
1252
1253#endif // RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
GLenum type
intptr_t try_index() const
Definition: il.h:1730
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.cc:6283
BoxAllocationSlowPath(Instruction *instruction, const Class &cls, Register result)
StringPtr target_name() const
Definition: object.h:2372
ArrayPtr arguments_descriptor() const
Definition: object.h:2373
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
Definition: il.cc:6278
void End(Instruction *instruction)
void SpecialBegin(intptr_t tag)
void SpecialEnd(intptr_t tag)
void Begin(Instruction *instruction)
CallEntryPoint
Definition: object.h:6975
CompilerDeoptInfoWithStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags, Environment *deopt_env)
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
CompilerDeoptInfo(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags, Environment *deopt_env)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.cc:6332
DoubleToIntegerSlowPath(DoubleToIntegerInstr *instruction, FpuRegister value_reg)
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
static bool GenerateCidRangesCheck(compiler::Assembler *assembler, Register class_id_reg, const CidRangeVector &cid_ranges, compiler::Label *inside_range_lbl, compiler::Label *outside_range_lbl=nullptr, bool fall_through_if_inside=false)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void EmitMegamorphicInstanceCall(const String &function_name, const Array &arguments_descriptor, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void AddStubCallTarget(const Code &code)
Instruction * current_instruction() const
void EmitJumpToStub(const Code &stub)
void StatsBegin(Instruction *instr)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
void FinalizeVarDescriptors(const Code &code)
void set_current_block(BlockEntryInstr *value)
BranchLabels CreateBranchLabels(BranchInstr *branch) const
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
intptr_t CurrentTryIndex() const
const Class & float64x2_class() const
const Class & BoxClassFor(Representation rep)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void AddExceptionHandler(CatchBlockEntryInstr *entry)
void SaveLiveRegisters(LocationSummary *locs)
ArrayPtr edge_counters_array() const
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const Class & double_class() const
void FinalizeCatchEntryMovesMap(const Code &code)
bool ForceSlowPathForStackOverflow() const
void GenerateNumberTypeCheck(Register kClassIdReg, const AbstractType &type, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
const FlowGraph & flow_graph() const
compiler::Label * GetJumpLabel(BlockEntryInstr *block_entry) const
void RecordCatchEntryMoves(Environment *env)
void set_intrinsic_flow_graph(const FlowGraph &flow_graph)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
friend class CheckStackOverflowSlowPath
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
bool WasCompacted(BlockEntryInstr *block_entry) const
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, const InstructionSource &source, intptr_t try_index, intptr_t yield_index=UntaggedPcDescriptors::kInvalidYieldIndex)
void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr *call, const CallTargets &targets, ArgumentsInfo args_info, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, bool complete, intptr_t total_call_count, bool receiver_can_be_smi=true)
BlockEntryInstr * current_block() const
static constexpr intptr_t kMaxNumberOfCidRangesToTest
void EmitEdgeCounter(intptr_t edge_id)
compiler::Label * AddDeoptStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags=0)
void EmitMoveToNative(const compiler::ffi::NativeLocation &dst, Location src_loc, Representation src_type, TemporaryRegisterAllocator *temp)
const Class & float32x4_class() const
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool CanFallThroughTo(BlockEntryInstr *block_entry) const
void EmitComment(Instruction *instr)
void EmitTestAndCall(const CallTargets &targets, const String &function_name, ArgumentsInfo args_info, compiler::Label *failed, compiler::Label *match_found, intptr_t deopt_id, const InstructionSource &source_index, LocationSummary *locs, bool complete, intptr_t total_ic_calls, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static int EmitTestAndCallCheckCid(compiler::Assembler *assembler, compiler::Label *label, Register class_id_reg, const CidRangeValue &range, int bias, bool jump_on_miss=true)
void SetNeedsStackTrace(intptr_t try_index)
CompilerDeoptInfo * AddSlowPathDeoptInfo(intptr_t deopt_id, Environment *env)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
const Class & mint_class() const
const ICData * GetOrAddInstanceCallICData(intptr_t deopt_id, const String &target_name, const Array &arguments_descriptor, intptr_t num_args_tested, const AbstractType &receiver_type, const Function &binary_smi_target)
void EmitMoveFromNative(Location dst_loc, Representation dst_type, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
bool IsEmptyBlock(BlockEntryInstr *block) const
void AddSlowPathCode(SlowPathCode *slow_path)
const GrowableArray< const compiler::TableSelector * > & dispatch_table_call_targets() const
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void FinalizeStaticCallTargetsTable(const Code &code)
bool GenerateSubtypeRangeCheck(Register class_id_reg, const Class &type_class, compiler::Label *is_subtype_lbl)
void AddDispatchTableCallTarget(const compiler::TableSelector *selector)
void GenerateInstanceCall(intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, const ICData &ic_data, Code::EntryKind entry_kind, bool receiver_can_be_smi)
void EmitFunctionEntrySourcePositionDescriptorIfNeeded()
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void FinalizeExceptionHandlers(const Code &code)
FlowGraphCompiler(compiler::Assembler *assembler, FlowGraph *flow_graph, const ParsedFunction &parsed_function, bool is_optimizing, SpeculativeInliningPolicy *speculative_policy, const GrowableArray< const Function * > &inline_id_to_function, const GrowableArray< TokenPosition > &inline_id_to_token_pos, const GrowableArray< intptr_t > &caller_inline_id, ZoneGrowableArray< const ICData * > *deopt_id_to_ic_data, CodeStatistics *stats=nullptr)
void GenerateListTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
void GenerateTTSCall(const InstructionSource &source, intptr_t deopt_id, Environment *env, Register reg_with_type, const AbstractType &dst_type, const String &dst_name, LocationSummary *locs)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void FinalizeStackMaps(const Code &code)
const Class & int32x4_class() const
const GrowableArray< BlockEntryInstr * > & block_order() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void set_intrinsic_slow_path_label(compiler::Label *label)
void Bailout(const char *reason)
const ZoneGrowableArray< const ICData * > & deopt_id_to_ic_data() const
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
void FinalizeCodeSourceMap(const Code &code)
GrowableArray< const Field * > & used_static_fields()
ArrayPtr CreateDeoptInfo(compiler::Assembler *assembler)
void GenerateStringTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
void SpecialStatsEnd(intptr_t tag)
const GrowableArray< BlockInfo * > & block_info() const
bool NeedsEdgeCounter(BlockEntryInstr *block)
static bool SupportsUnboxedSimd128()
void FinalizePcDescriptors(const Code &code)
void EmitYieldPositionMetadata(const InstructionSource &source, intptr_t yield_index)
void StatsEnd(Instruction *instr)
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
ArrayPtr InliningIdToFunction() const
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
void GenerateInstanceOf(const InstructionSource &source, intptr_t deopt_id, Environment *env, const AbstractType &type, LocationSummary *locs)
compiler::Assembler * assembler() const
void EmitMoveConst(const compiler::ffi::NativeLocation &dst, Location src, Representation src_type, TemporaryRegisterAllocator *temp)
const ICData * GetOrAddStaticCallICData(intptr_t deopt_id, const Function &target, const Array &arguments_descriptor, intptr_t num_args_tested, ICData::RebindRule rebind_rule)
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
compiler::Label * NextNonEmptyLabel() const
void GenerateCallerChecksForAssertAssignable(CompileType *receiver_type, const AbstractType &dst_type, compiler::Label *done)
void EndCodeSourceRange(const InstructionSource &source)
void SpecialStatsBegin(intptr_t tag)
void EmitNativeMove(const compiler::ffi::NativeLocation &dst, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
compiler::Label * intrinsic_slow_path_label() const
void AddNullCheck(const InstructionSource &source, const String &name)
void GenerateStaticCall(intptr_t deopt_id, const InstructionSource &source, const Function &function, ArgumentsInfo args_info, LocationSummary *locs, const ICData &ic_data_in, ICData::RebindRule rebind_rule, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Environment * SlowPathEnvironmentFor(Instruction *inst, intptr_t num_slow_path_args)
void BeginCodeSourceRange(const InstructionSource &source)
bool ForceOptimize() const
Definition: object.cc:9017
static bool UseUnboxedRepresentation()
Definition: il.h:10864
virtual bool CanCallDart() const
Definition: il.h:1304
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
LateInitializationErrorSlowPath(Instruction *instruction)
Register AllocateTemporary() override
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
NullErrorSlowPath(CheckNullInstr *instruction)
void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers) override
void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler) override
const char * name() override
CheckNullInstr::ExceptionType exception_type() const
ObjectPtr ptr() const
Definition: object.h:332
static Object & Handle()
Definition: object.h:407
const Function & function() const
Definition: parser.h:73
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual const char * name()
RangeErrorSlowPath(GenericCheckBoundInstr *instruction)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)=0
void GenerateCode(FlowGraphCompiler *compiler)
Instruction * instruction() const
compiler::Label * entry_label()
compiler::Label * exit_label()
SlowPathCode(Instruction *instruction)
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
virtual Register AllocateTemporary()=0
Zone * zone() const
Definition: thread_state.h:37
static Thread * Current()
Definition: thread.h:362
IsolateGroup * isolate_group() const
Definition: thread.h:541
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
ThrowErrorSlowPathCode(Instruction *instruction, const RuntimeEntry &runtime_entry)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
static constexpr intptr_t kInvalidYieldIndex
Definition: raw_object.h:2081
static int SNPrint(char *str, size_t size, const char *format,...) PRINTF_ATTRIBUTE(3
Definition: il.h:75
virtual const char * name()
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
WriteErrorSlowPath(CheckWritableInstr *instruction)
ElementType * Alloc(intptr_t length)
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
FlutterSemanticsFlag flags
gboolean invert
uint8_t value
GAsyncResult * result
uint32_t * target
static void mark(SkCanvas *canvas, SkScalar x, SkScalar y, Fn &&fn)
Definition: gm.cpp:211
Definition: dart_vm.cc:33
const char *const name
Representation
Definition: locations.h:66
const intptr_t cid
const char *const function_name
const char * DeoptReasonToCString(ICData::DeoptReasonId deopt_reason)
static constexpr intptr_t kInvalidTryIndex
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
dst
Definition: cp.py:12
dictionary stats
Definition: malisc.py:20
inst
Definition: malisc.py:37
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:581
#define T
Definition: precompiler.cc:65
SeparatedVector2 offset