Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Namespaces | Macros | Typedefs | Enumerations
il.h File Reference
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>
#include "vm/allocation.h"
#include "vm/code_descriptors.h"
#include "vm/compiler/backend/compile_type.h"
#include "vm/compiler/backend/il_serializer.h"
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/backend/slot.h"
#include "vm/compiler/compiler_pass.h"
#include "vm/compiler/compiler_state.h"
#include "vm/compiler/ffi/marshaller.h"
#include "vm/compiler/ffi/native_calling_convention.h"
#include "vm/compiler/ffi/native_location.h"
#include "vm/compiler/ffi/native_type.h"
#include "vm/compiler/method_recognizer.h"
#include "vm/dart_entry.h"
#include "vm/flags.h"
#include "vm/growable_array.h"
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/parser.h"
#include "vm/runtime_entry.h"
#include "vm/static_type_exactness_state.h"
#include "vm/token_position.h"

Go to the source code of this file.

Classes

class  dart::Value
 
class  dart::Value::Iterator
 
struct  dart::CidRange
 
struct  dart::CidRangeValue
 
class  dart::CidRangeVectorUtils
 
class  dart::HierarchyInfo
 
class  dart::EmbeddedArray< T, N >
 
class  dart::EmbeddedArray< T, 0 >
 
struct  dart::InstrAttrs
 
struct  dart::unwrap_enum< T, is_enum >
 
struct  dart::unwrap_enum< T, true >
 
struct  dart::unwrap_enum< T, false >
 
struct  dart::TargetInfo
 
class  dart::Cids
 
class  dart::CallTargets
 
class  dart::BinaryFeedback
 
class  dart::InstructionIndexedPropertyIterable< Trait >
 
struct  dart::InstructionIndexedPropertyIterable< Trait >::Iterator
 
class  dart::ValueListIterable
 
struct  dart::ValueListIterable::Iterator
 
class  dart::Instruction
 
struct  dart::Instruction::InputsTrait
 
struct  dart::Instruction::SuccessorsTrait
 
struct  dart::BranchLabels
 
class  dart::PureInstruction
 
struct  dart::Throws
 
struct  dart::NoThrow
 
struct  dart::Pure< DefaultBase, PureBase >
 
struct  dart::NoCSE< DefaultBase, PureBase >
 
class  dart::TemplateInstruction< N, ThrowsTrait, CSETrait >
 
class  dart::MoveOperands
 
class  dart::ParallelMoveInstr
 
class  dart::BlockEntryInstr
 
class  dart::BlockEntryInstr::InstructionsIterable
 
class  dart::ForwardInstructionIterator
 
class  dart::BackwardInstructionIterator
 
class  dart::BlockEntryWithInitialDefs
 
class  dart::GraphEntryInstr
 
class  dart::JoinEntryInstr
 
class  dart::PhiIterator
 
class  dart::TargetEntryInstr
 
class  dart::FunctionEntryInstr
 
class  dart::NativeEntryInstr
 
class  dart::OsrEntryInstr
 
class  dart::IndirectEntryInstr
 
class  dart::CatchBlockEntryInstr
 
class  dart::AliasIdentity
 
class  dart::Definition
 
class  dart::PureDefinition
 
class  dart::TemplateDefinition< N, ThrowsTrait, CSETrait >
 
class  dart::VariadicDefinition
 
class  dart::PhiInstr
 
class  dart::ParameterInstr
 
class  dart::NativeParameterInstr
 
class  dart::StoreIndexedUnsafeInstr
 
class  dart::LoadIndexedUnsafeInstr
 
class  dart::MemoryCopyInstr
 
class  dart::TailCallInstr
 
class  dart::MoveArgumentInstr
 
class  dart::ReturnBaseInstr
 
class  dart::DartReturnInstr
 
class  dart::NativeReturnInstr
 
class  dart::ThrowInstr
 
class  dart::ReThrowInstr
 
class  dart::StopInstr
 
class  dart::GotoInstr
 
class  dart::IndirectGotoInstr
 
class  dart::ComparisonInstr
 
class  dart::PureComparison
 
class  dart::TemplateComparison< N, ThrowsTrait, CSETrait >
 
class  dart::BranchInstr
 
class  dart::DeoptimizeInstr
 
class  dart::RedefinitionInstr
 
class  dart::ReachabilityFenceInstr
 
class  dart::ConstraintInstr
 
class  dart::ConstantInstr
 
class  dart::UnboxedConstantInstr
 
class  dart::AssertSubtypeInstr
 
class  dart::AssertAssignableInstr
 
class  dart::AssertBooleanInstr
 
struct  dart::ArgumentsInfo
 
class  dart::TemplateDartCall< kExtraInputs >
 
class  dart::ClosureCallInstr
 
class  dart::InstanceCallBaseInstr
 
class  dart::InstanceCallInstr
 
class  dart::PolymorphicInstanceCallInstr
 
class  dart::DispatchTableCallInstr
 
class  dart::StrictCompareInstr
 
class  dart::TestSmiInstr
 
class  dart::TestCidsInstr
 
class  dart::TestRangeInstr
 
class  dart::EqualityCompareInstr
 
class  dart::RelationalOpInstr
 
class  dart::IfThenElseInstr
 
class  dart::StaticCallInstr
 
class  dart::CachableIdempotentCallInstr
 
class  dart::LoadLocalInstr
 
class  dart::DropTempsInstr
 
class  dart::MakeTempInstr
 
class  dart::StoreLocalInstr
 
class  dart::NativeCallInstr
 
class  dart::FfiCallInstr
 
class  dart::LeafRuntimeCallInstr
 
class  dart::DebugStepCheckInstr
 
class  dart::StoreFieldInstr
 
class  dart::GuardFieldInstr
 
class  dart::GuardFieldClassInstr
 
class  dart::GuardFieldLengthInstr
 
class  dart::GuardFieldTypeInstr
 
class  dart::TemplateLoadField< N >
 
class  dart::LoadStaticFieldInstr
 
class  dart::StoreStaticFieldInstr
 
class  dart::LoadIndexedInstr
 
class  dart::LoadCodeUnitsInstr
 
class  dart::OneByteStringFromCharCodeInstr
 
class  dart::StringToCharCodeInstr
 
class  dart::Utf8ScanInstr
 
class  dart::StoreIndexedInstr
 
class  dart::RecordCoverageInstr
 
class  dart::BooleanNegateInstr
 
class  dart::BoolToIntInstr
 
class  dart::IntToBoolInstr
 
class  dart::InstanceOfInstr
 
class  dart::AllocationInstr
 
class  dart::TemplateAllocation< N >
 
class  dart::AllocateObjectInstr
 
class  dart::AllocateClosureInstr
 
class  dart::AllocateUninitializedContextInstr
 
class  dart::AllocateRecordInstr
 
class  dart::AllocateSmallRecordInstr
 
class  dart::MaterializeObjectInstr
 
class  dart::ArrayAllocationInstr
 
class  dart::TemplateArrayAllocation< N >
 
class  dart::CreateArrayInstr
 
class  dart::AllocateTypedDataInstr
 
class  dart::LoadUntaggedInstr
 
class  dart::CalculateElementAddressInstr
 
class  dart::LoadClassIdInstr
 
class  dart::LoadFieldInstr
 
class  dart::InstantiateTypeInstr
 
class  dart::InstantiateTypeArgumentsInstr
 
class  dart::AllocateContextInstr
 
class  dart::CloneContextInstr
 
class  dart::CheckEitherNonSmiInstr
 
struct  dart::Boxing
 
class  dart::BoxInstr
 
class  dart::BoxIntegerInstr
 
class  dart::BoxSmallIntInstr
 
class  dart::BoxInteger32Instr
 
class  dart::BoxInt32Instr
 
class  dart::BoxUint32Instr
 
class  dart::BoxInt64Instr
 
class  dart::UnboxInstr
 
class  dart::UnboxIntegerInstr
 
class  dart::UnboxInteger32Instr
 
class  dart::UnboxUint32Instr
 
class  dart::UnboxInt32Instr
 
class  dart::UnboxInt64Instr
 
class  dart::CaseInsensitiveCompareInstr
 
class  dart::MathMinMaxInstr
 
class  dart::BinaryDoubleOpInstr
 
class  dart::DoubleTestOpInstr
 
class  dart::HashDoubleOpInstr
 
class  dart::HashIntegerOpInstr
 
class  dart::UnaryIntegerOpInstr
 
class  dart::UnarySmiOpInstr
 
class  dart::UnaryUint32OpInstr
 
class  dart::UnaryInt64OpInstr
 
class  dart::BinaryIntegerOpInstr
 
class  dart::BinarySmiOpInstr
 
class  dart::BinaryInt32OpInstr
 
class  dart::BinaryUint32OpInstr
 
class  dart::BinaryInt64OpInstr
 
class  dart::ShiftIntegerOpInstr
 
class  dart::ShiftInt64OpInstr
 
class  dart::SpeculativeShiftInt64OpInstr
 
class  dart::ShiftUint32OpInstr
 
class  dart::SpeculativeShiftUint32OpInstr
 
class  dart::UnaryDoubleOpInstr
 
class  dart::CheckStackOverflowInstr
 
class  dart::SmiToDoubleInstr
 
class  dart::Int32ToDoubleInstr
 
class  dart::Int64ToDoubleInstr
 
class  dart::DoubleToIntegerInstr
 
class  dart::DoubleToSmiInstr
 
class  dart::DoubleToFloatInstr
 
class  dart::FloatToDoubleInstr
 
class  dart::FloatCompareInstr
 
class  dart::InvokeMathCFunctionInstr
 
class  dart::ExtractNthOutputInstr
 
class  dart::MakePairInstr
 
class  dart::UnboxLaneInstr
 
class  dart::BoxLanesInstr
 
class  dart::TruncDivModInstr
 
class  dart::CheckClassInstr
 
class  dart::CheckSmiInstr
 
class  dart::CheckNullInstr
 
class  dart::CheckClassIdInstr
 
class  dart::CheckBoundBaseInstr
 
class  dart::CheckArrayBoundInstr
 
class  dart::GenericCheckBoundInstr
 
class  dart::CheckWritableInstr
 
class  dart::CheckConditionInstr
 
class  dart::IntConverterInstr
 
class  dart::BitCastInstr
 
class  dart::LoadThreadInstr
 
class  dart::SimdOpInstr
 
class  dart::Call1ArgStubInstr
 
class  dart::SuspendInstr
 
class  dart::Environment
 
class  dart::Environment::ShallowIterator
 
class  dart::Environment::DeepIterator
 
class  dart::InstructionVisitor
 
class  dart::FlowGraphVisitor
 

Namespaces

namespace  dart
 
namespace  dart::compiler
 

Macros

#define FOR_EACH_INSTRUCTION(M)
 
#define FOR_EACH_ABSTRACT_INSTRUCTION(M)
 
#define FORWARD_DECLARATION(type, attrs)   class type##Instr;
 
#define DEFINE_INSTRUCTION_TYPE_CHECK(type)
 
#define DECLARE_INSTRUCTION_NO_BACKEND(type)
 
#define DECLARE_INSTRUCTION_BACKEND()
 
#define DECLARE_INSTRUCTION(type)
 
#define DECLARE_ABSTRACT_INSTRUCTION(type)
 
#define DECLARE_COMPARISON_METHODS
 
#define DECLARE_COMPARISON_INSTRUCTION(type)
 
#define WRITE_INSTRUCTION_FIELD(type, name)
 
#define READ_INSTRUCTION_FIELD(type, name)
 
#define DECLARE_INSTRUCTION_FIELD(type, name)   type name;
 
#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Instr, BaseClass, FieldList)
 
#define DECLARE_CUSTOM_SERIALIZATION(Instr)
 
#define DECLARE_EMPTY_SERIALIZATION(Instr, BaseClass)
 
#define DECLARE_EXTRA_SERIALIZATION
 
#define PRINT_TO_SUPPORT
 
#define PRINT_OPERANDS_TO_SUPPORT
 
#define DECLARE_ATTRIBUTE(Attribute)
 
#define DECLARE_ATTRIBUTES_NAMED(names, values)
 
#define DECLARE_TAG(type, attrs)   k##type,
 
#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type)
 
#define INSTRUCTION_TYPE_CHECK(Name, Attrs)    DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)
 
#define FIELD_LIST(F)   F(ZoneGrowableArray<PhiInstr*>*, phis_)
 
#define FIELD_LIST(F)   F(double, edge_weight_)
 
#define FIELD_LIST(F)   F(const compiler::ffi::CallbackMarshaller&, marshaller_)
 
#define FIELD_LIST(F)   F(const intptr_t, indirect_id_)
 
#define FOR_EACH_ALIAS_IDENTITY_VALUE(V)
 
#define VALUE_CASE(name, val)
 
#define VALUE_DEFN(name, val)   k##name = val,
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const intptr_t, offset_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const Code&, code_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const compiler::ffi::CallbackMarshaller&, marshaller_)
 
#define FIELD_LIST(F)   F(const TokenPosition, token_pos_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const char*, message_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(ComparisonInstr*, comparison_)
 
#define FIELD_LIST(F)   F(const ICData::DeoptReasonId, deopt_reason_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(Range*, constraint_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const TokenPosition, token_pos_)
 
#define FOR_EACH_ASSERT_ASSIGNABLE_KIND(V)
 
#define KIND_DEFN(name)   k##name,
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const TokenPosition, token_pos_)
 
#define FIELD_LIST(F)   F(const Function&, target_function_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const ZoneGrowableArray<intptr_t>&, cid_results_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const SpeculativeMode, speculative_mode_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(ConstantInstr*, null_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const Field&, field_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const Field&, field_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const intptr_t, cid_)
 
#define FIELD_LIST(F)   F(const Slot&, scan_flags_field_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const intptr_t, num_context_variables_)
 
#define FIELD_LIST(F)   F(const RecordShape, shape_)
 
#define FIELD_LIST(F)   F(const RecordShape, shape_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const classid_t, class_id_)
 
#define FIELD_LIST(F)   F(const intptr_t, offset_)
 
#define FIELD_LIST(F)   F(const intptr_t, index_scale_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const ZoneGrowableArray<const Slot*>&, context_slots_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const Representation, from_representation_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(bool, is_truncating_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const MethodRecognizer::Kind, op_kind_)
 
#define FIELD_LIST(F)   F(const bool, smi_)
 
#define FIELD_LIST(F)   F(const Token::Kind, op_kind_)
 
#define FIELD_LIST(F)   F(const SpeculativeMode, speculative_mode_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(Range*, right_range_)
 
#define FIELD_LIST(F)   F(const SpeculativeMode, speculative_mode_)
 
#define FIELD_LIST(F)   F(Range*, shift_range_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const TokenPosition, token_pos_)
 
#define FIELD_LIST(F)   F(const SpeculativeMode, speculative_mode_)
 
#define FIELD_LIST(F)   F(const MethodRecognizer::Kind, recognized_kind_)
 
#define FIELD_LIST(F)   F(const SpeculativeMode, speculative_mode_)
 
#define FIELD_LIST(F)   F(const Token::Kind, op_kind_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const Representation, from_representation_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(const TokenPosition, token_pos_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)   F(CidRangeValue, cids_)
 
#define FIELD_LIST(F)   F(bool, generalized_)
 
#define FIELD_LIST(F)   F(const Kind, kind_)
 
#define FIELD_LIST(F)   F(ComparisonInstr*, comparison_)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define SIMD_BINARY_OP(M, T, Name)   M(2, _, T##Name, (T, T), T)
 
#define SIMD_BINARY_FLOAT_OP_LIST(M, OP, T)
 
#define SIMD_BINARY_INTEGER_OP_LIST(M, OP, T)
 
#define SIMD_PER_COMPONENT_XYZW(M, Arity, Name, Inputs, Output)
 
#define SIMD_CONVERSION(M, FromType, ToType)    M(1, _, FromType##To##ToType, (FromType), ToType)
 
#define SIMD_OP_LIST(M, BINARY_OP)
 
#define DECLARE_ENUM(Arity, Mask, Name, ...)   k##Name,
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define FIELD_LIST(F)
 
#define DECLARE_VISIT_INSTRUCTION(ShortName, Attrs)    virtual void Visit##ShortName(ShortName##Instr* instr) {}
 
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
 

Typedefs

typedef MallocGrowableArray< CidRangeValuedart::CidRangeVector
 
template<typename T >
using dart::serializable_type_t = typename unwrap_enum< std::remove_cv_t< T >, std::is_enum< T >::value >::type
 
typedef GrowableArray< Value * > dart::InputsArray
 
typedef ZoneGrowableArray< MoveArgumentInstr * > dart::MoveArgumentsArray
 

Enumerations

enum class  dart::InnerPointerAccess { dart::kNotUntagged , dart::kMayBeInnerPointer , dart::kCannotBeInnerPointer }
 
enum  dart::StoreBarrierType { dart::kNoStoreBarrier , dart::kEmitStoreBarrier }
 
enum  dart::AlignmentType { dart::kUnalignedAccess , dart::kAlignedAccess }
 

Macro Definition Documentation

◆ DECLARE_ABSTRACT_INSTRUCTION

#define DECLARE_ABSTRACT_INSTRUCTION (   type)
Value:
/* Prevents allocating an instance of abstract instruction */ \
/* even if it has a concrete base class. */ \
virtual Tag tag() const = 0; \
DEFINE_INSTRUCTION_TYPE_CHECK(type)

Definition at line 602 of file il.h.

618 {};
619
620template <typename T>
621struct unwrap_enum<T, true> {
622 using type = std::underlying_type_t<T>;
623};
624
625template <typename T>
626struct unwrap_enum<T, false> {
627 using type = T;
628};
629
630template <typename T>
632 typename unwrap_enum<std::remove_cv_t<T>, std::is_enum<T>::value>::type;
633
634#define WRITE_INSTRUCTION_FIELD(type, name) \
635 s->Write<serializable_type_t<type>>( \
636 static_cast<serializable_type_t<type>>(name));
637#define READ_INSTRUCTION_FIELD(type, name) \
638 , name(static_cast<std::remove_cv_t<type>>( \
639 d->Read<serializable_type_t<type>>()))
640#define DECLARE_INSTRUCTION_FIELD(type, name) type name;
641
642// Every instruction class should declare its serialization via
643// DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS, DECLARE_EMPTY_SERIALIZATION
644// or DECLARE_CUSTOM_SERIALIZATION.
645// If instruction class has fields which reference other instructions,
646// then it should also use DECLARE_EXTRA_SERIALIZATION and serialize
647// those references in WriteExtra/ReadExtra methods.
648#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Instr, BaseClass, FieldList) \
649 public: \
650 virtual void WriteTo(FlowGraphSerializer* s) { \
651 BaseClass::WriteTo(s); \
652 FieldList(WRITE_INSTRUCTION_FIELD) \
653 } \
654 explicit Instr(FlowGraphDeserializer* d) \
655 : BaseClass(d) FieldList(READ_INSTRUCTION_FIELD) {} \
656 \
657 private: \
658 FieldList(DECLARE_INSTRUCTION_FIELD)
659
660#define DECLARE_CUSTOM_SERIALIZATION(Instr) \
661 public: \
662 virtual void WriteTo(FlowGraphSerializer* s); \
663 explicit Instr(FlowGraphDeserializer* d);
664
665#define DECLARE_EMPTY_SERIALIZATION(Instr, BaseClass) \
666 public: \
667 explicit Instr(FlowGraphDeserializer* d) : BaseClass(d) {}
668
669#define DECLARE_EXTRA_SERIALIZATION \
670 public: \
671 virtual void WriteExtra(FlowGraphSerializer* s); \
672 virtual void ReadExtra(FlowGraphDeserializer* d);
673
674#if defined(INCLUDE_IL_PRINTER)
675#define PRINT_TO_SUPPORT virtual void PrintTo(BaseTextBuffer* f) const;
676#define PRINT_OPERANDS_TO_SUPPORT \
677 virtual void PrintOperandsTo(BaseTextBuffer* f) const;
678// Used for an instruction with a single attribute where the name of the
679// attribute should be derived from the expression. See
680// IlTestPrinter::AttributesSerializer::WriteAttributeName for more info.
681#define DECLARE_ATTRIBUTE(Attribute) \
682 auto GetAttributes() const { \
683 return std::make_tuple(Attribute); \
684 } \
685 static auto GetAttributeNames() { \
686 return std::make_tuple(#Attribute); \
687 }
688// Used for instructions with either multiple attributes or where the name of
689// the attribute should not be derived from the expression.
690#define DECLARE_ATTRIBUTES_NAMED(names, values) \
691 auto GetAttributes() const { \
692 return std::make_tuple values; \
693 } \
694 static auto GetAttributeNames() { \
695 return std::make_tuple names; \
696 }
697#else
698#define PRINT_TO_SUPPORT
699#define PRINT_OPERANDS_TO_SUPPORT
700#define DECLARE_ATTRIBUTE(Attribute)
701#define DECLARE_ATTRIBUTES_NAMED(names, values)
702#endif // defined(INCLUDE_IL_PRINTER)
703
704// Together with CidRange, this represents a mapping from a range of class-ids
705// to a method for a given selector (method name). Also can contain an
706// indication of how frequently a given method has been called at a call site.
707// This information can be harvested from the inline caches (ICs).
708struct TargetInfo : public CidRange {
709 TargetInfo(intptr_t cid_start_arg,
710 intptr_t cid_end_arg,
711 const Function* target_arg,
712 intptr_t count_arg,
713 StaticTypeExactnessState exactness)
714 : CidRange(cid_start_arg, cid_end_arg),
715 target(target_arg),
716 count(count_arg),
717 exactness(exactness) {
718 DEBUG_ASSERT(target->IsNotTemporaryScopedHandle());
719 }
720 const Function* target;
721 intptr_t count;
722 StaticTypeExactnessState exactness;
723
724 DISALLOW_COPY_AND_ASSIGN(TargetInfo);
725};
726
727// A set of class-ids, arranged in ranges. Used for the CheckClass
728// and PolymorphicInstanceCall instructions.
729class Cids : public ZoneAllocated {
730 public:
731 explicit Cids(Zone* zone) : cid_ranges_(zone, 6) {}
732 // Creates the off-heap Cids object that reflects the contents
733 // of the on-VM-heap IC data.
734 // Ranges of Cids are merged if there is only one target function and
735 // it is used for all cids in the gaps between ranges.
736 static Cids* CreateForArgument(Zone* zone,
737 const BinaryFeedback& binary_feedback,
738 int argument_number);
739 static Cids* CreateMonomorphic(Zone* zone, intptr_t cid);
740
741 bool Equals(const Cids& other) const;
742
743 bool HasClassId(intptr_t cid) const;
744
745 void Add(CidRange* target) { cid_ranges_.Add(target); }
746
747 CidRange& operator[](intptr_t index) const { return *cid_ranges_[index]; }
748
749 CidRange* At(int index) const { return cid_ranges_[index]; }
750
751 intptr_t length() const { return cid_ranges_.length(); }
752
753 void SetLength(intptr_t len) { cid_ranges_.SetLength(len); }
754
755 bool is_empty() const { return cid_ranges_.is_empty(); }
756
757 void Sort(int compare(CidRange* const* a, CidRange* const* b)) {
758 cid_ranges_.Sort(compare);
759 }
760
761 bool IsMonomorphic() const;
762 intptr_t MonomorphicReceiverCid() const;
763 intptr_t ComputeLowestCid() const;
764 intptr_t ComputeHighestCid() const;
765
766 protected:
767 GrowableArray<CidRange*> cid_ranges_;
768
769 private:
771};
772
773class CallTargets : public Cids {
774 public:
775 explicit CallTargets(Zone* zone) : Cids(zone) {}
776
777 static const CallTargets* CreateMonomorphic(Zone* zone,
778 intptr_t receiver_cid,
779 const Function& target);
780
781 // Creates the off-heap CallTargets object that reflects the contents
782 // of the on-VM-heap IC data.
783 static const CallTargets* Create(Zone* zone, const ICData& ic_data);
784
785 // This variant also expands the class-ids to neighbouring classes that
786 // inherit the same method.
787 static const CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data);
788
789 TargetInfo* TargetAt(int i) const { return static_cast<TargetInfo*>(At(i)); }
790
791 intptr_t AggregateCallCount() const;
792
793 StaticTypeExactnessState MonomorphicExactness() const;
794 bool HasSingleTarget() const;
795 bool HasSingleRecognizedTarget() const;
796 const Function& FirstTarget() const;
797 const Function& MostPopularTarget() const;
798
799 void Print() const;
800
801 bool ReceiverIs(intptr_t cid) const {
802 return IsMonomorphic() && MonomorphicReceiverCid() == cid;
803 }
804 bool ReceiverIsSmiOrMint() const {
805 if (cid_ranges_.is_empty()) {
806 return false;
807 }
808 for (intptr_t i = 0, n = cid_ranges_.length(); i < n; i++) {
809 for (intptr_t j = cid_ranges_[i]->cid_start; j <= cid_ranges_[i]->cid_end;
810 j++) {
811 if (j != kSmiCid && j != kMintCid) {
812 return false;
813 }
814 }
815 }
816 return true;
817 }
818
819 void Write(FlowGraphSerializer* s) const;
820 explicit CallTargets(FlowGraphDeserializer* d);
821
822 private:
823 void CreateHelper(Zone* zone, const ICData& ic_data);
824 void MergeIntoRanges();
825};
826
827// Represents type feedback for the binary operators, and a few recognized
828// static functions (see MethodRecognizer::NumArgsCheckedForStaticCall).
829class BinaryFeedback : public ZoneAllocated {
830 public:
831 explicit BinaryFeedback(Zone* zone) : feedback_(zone, 2) {}
832
833 static const BinaryFeedback* Create(Zone* zone, const ICData& ic_data);
834 static const BinaryFeedback* CreateMonomorphic(Zone* zone,
835 intptr_t receiver_cid,
836 intptr_t argument_cid);
837
838 bool ArgumentIs(intptr_t cid) const {
839 if (feedback_.is_empty()) {
840 return false;
841 }
842 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
843 if (feedback_[i].second != cid) {
844 return false;
845 }
846 }
847 return true;
848 }
849
850 bool OperandsAreEither(intptr_t cid_a, intptr_t cid_b) const {
851 if (feedback_.is_empty()) {
852 return false;
853 }
854 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
855 if ((feedback_[i].first != cid_a) && (feedback_[i].first != cid_b)) {
856 return false;
857 }
858 if ((feedback_[i].second != cid_a) && (feedback_[i].second != cid_b)) {
859 return false;
860 }
861 }
862 return true;
863 }
864 bool OperandsAreSmiOrNull() const {
865 return OperandsAreEither(kSmiCid, kNullCid);
866 }
867 bool OperandsAreSmiOrMint() const {
868 return OperandsAreEither(kSmiCid, kMintCid);
869 }
870 bool OperandsAreSmiOrDouble() const {
871 return OperandsAreEither(kSmiCid, kDoubleCid);
872 }
873
874 bool OperandsAre(intptr_t cid) const {
875 if (feedback_.length() != 1) return false;
876 return (feedback_[0].first == cid) && (feedback_[0].second == cid);
877 }
878
879 bool IncludesOperands(intptr_t cid) const {
880 for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
881 if ((feedback_[i].first == cid) && (feedback_[i].second == cid)) {
882 return true;
883 }
884 }
885 return false;
886 }
887
888 private:
889 GrowableArray<std::pair<intptr_t, intptr_t>> feedback_;
890
891 friend class Cids;
892};
893
894typedef GrowableArray<Value*> InputsArray;
895typedef ZoneGrowableArray<MoveArgumentInstr*> MoveArgumentsArray;
896
897template <typename Trait>
898class InstructionIndexedPropertyIterable {
899 public:
900 struct Iterator {
901 const Instruction* instr;
902 intptr_t index;
903
904 decltype(Trait::At(instr, index)) operator*() const {
905 return Trait::At(instr, index);
906 }
907 Iterator& operator++() {
908 index++;
909 return *this;
910 }
911
912 bool operator==(const Iterator& other) {
913 return instr == other.instr && index == other.index;
914 }
915
916 bool operator!=(const Iterator& other) { return !(*this == other); }
917 };
918
919 explicit InstructionIndexedPropertyIterable(const Instruction* instr)
920 : instr_(instr) {}
921
922 Iterator begin() const { return {instr_, 0}; }
923 Iterator end() const { return {instr_, Trait::Length(instr_)}; }
924
925 private:
926 const Instruction* instr_;
927};
928
929class ValueListIterable {
930 public:
931 struct Iterator {
932 Value* value;
933
934 Value* operator*() const { return value; }
935
936 Iterator& operator++() {
937 value = value->next_use();
938 return *this;
939 }
940
941 bool operator==(const Iterator& other) { return value == other.value; }
942
943 bool operator!=(const Iterator& other) { return !(*this == other); }
944 };
945
946 explicit ValueListIterable(Value* value) : value_(value) {}
947
948 Iterator begin() const { return {value_}; }
949 Iterator end() const { return {nullptr}; }
950
951 private:
952 Value* value_;
953};
954
955class Instruction : public ZoneAllocated {
956 public:
957#define DECLARE_TAG(type, attrs) k##type,
958 enum Tag { FOR_EACH_INSTRUCTION(DECLARE_TAG) kNumInstructions };
959#undef DECLARE_TAG
960
961 static const intptr_t kInstructionAttrs[kNumInstructions];
962
963 enum SpeculativeMode {
964 // Types of inputs should be checked when unboxing for this instruction.
965 kGuardInputs,
966 // Each input is guaranteed to have a valid type for the input
967 // representation and its type should not be checked when unboxing.
968 kNotSpeculative
969 };
970
971 // If the source has the inlining ID of the root function, then don't set
972 // the inlining ID to that; instead, treat it as unset.
973 explicit Instruction(const InstructionSource& source,
974 intptr_t deopt_id = DeoptId::kNone)
975 : deopt_id_(deopt_id), inlining_id_(source.inlining_id) {}
976
977 explicit Instruction(intptr_t deopt_id = DeoptId::kNone)
978 : Instruction(InstructionSource(), deopt_id) {}
979
980 virtual ~Instruction() {}
981
982 virtual Tag tag() const = 0;
983
984 virtual intptr_t statistics_tag() const { return tag(); }
985
986 intptr_t deopt_id() const {
987 ASSERT(ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() ||
988 CanBecomeDeoptimizationTarget() || MayThrow() ||
989 CompilerState::Current().is_aot());
990 return GetDeoptId();
991 }
992
993 static const ICData* GetICData(
994 const ZoneGrowableArray<const ICData*>& ic_data_array,
995 intptr_t deopt_id,
996 bool is_static_call);
997
998 virtual TokenPosition token_pos() const { return TokenPosition::kNoSource; }
999
1000 // Returns the source information for this instruction.
1001 InstructionSource source() const {
1002 return InstructionSource(token_pos(), inlining_id());
1003 }
1004
1005 virtual intptr_t InputCount() const = 0;
1006 virtual Value* InputAt(intptr_t i) const = 0;
1007 void SetInputAt(intptr_t i, Value* value) {
1008 ASSERT(value != nullptr);
1009 value->set_instruction(this);
1010 value->set_use_index(i);
1011 RawSetInputAt(i, value);
1012 }
1013
1014 struct InputsTrait {
1015 static Definition* At(const Instruction* instr, intptr_t index) {
1016 return instr->InputAt(index)->definition();
1017 }
1018
1019 static intptr_t Length(const Instruction* instr) {
1020 return instr->InputCount();
1021 }
1022 };
1023
1024 using InputsIterable = InstructionIndexedPropertyIterable<InputsTrait>;
1025
1026 InputsIterable inputs() { return InputsIterable(this); }
1027
1028 // Remove all inputs (including in the environment) from their
1029 // definition's use lists.
1030 void UnuseAllInputs();
1031
1032 // Call instructions override this function and return the number of
1033 // pushed arguments.
1034 virtual intptr_t ArgumentCount() const { return 0; }
1035 inline Value* ArgumentValueAt(intptr_t index) const;
1036 inline Definition* ArgumentAt(intptr_t index) const;
1037
1038 // Sets array of MoveArgument instructions.
1039 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
1040 UNREACHABLE();
1041 }
1042 // Returns array of MoveArgument instructions
1043 virtual MoveArgumentsArray* GetMoveArguments() const {
1044 UNREACHABLE();
1045 return nullptr;
1046 }
1047 // Replace inputs with separate MoveArgument instructions detached from call.
1048 virtual void ReplaceInputsWithMoveArguments(
1049 MoveArgumentsArray* move_arguments) {
1050 UNREACHABLE();
1051 }
1052 bool HasMoveArguments() const { return GetMoveArguments() != nullptr; }
1053
1054 // Replaces direct uses of arguments with uses of corresponding MoveArgument
1055 // instructions.
1056 void RepairArgumentUsesInEnvironment() const;
1057
1058 // Returns true, if this instruction can deoptimize with its current inputs.
1059 // This property can change if we add or remove redefinitions that constrain
1060 // the type or the range of input operands during compilation.
1061 virtual bool ComputeCanDeoptimize() const = 0;
1062
1063 virtual bool ComputeCanDeoptimizeAfterCall() const {
1064 // TODO(dartbug.com/45213): Incrementally migrate IR instructions from using
1065 // [ComputeCanDeoptimize] to [ComputeCanDeoptimizeAfterCall] if they
1066 // can only lazy deoptimize.
1067 return false;
1068 }
1069
1070 // Once we removed the deopt environment, we assume that this
1071 // instruction can't deoptimize.
1072 bool CanDeoptimize() const {
1073 return env() != nullptr &&
1074 (ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall());
1075 }
1076
1077 // Visiting support.
1078 virtual void Accept(InstructionVisitor* visitor) = 0;
1079
1080 Instruction* previous() const { return previous_; }
1081 void set_previous(Instruction* instr) {
1082 ASSERT(!IsBlockEntry());
1083 previous_ = instr;
1084 }
1085
1086 Instruction* next() const { return next_; }
1087 void set_next(Instruction* instr) {
1088 ASSERT(!IsGraphEntry());
1089 ASSERT(!IsReturnBase());
1090 ASSERT(!IsBranch() || (instr == nullptr));
1091 ASSERT(!IsPhi());
1092 ASSERT(instr == nullptr || !instr->IsBlockEntry());
1093 // TODO(fschneider): Also add Throw and ReThrow to the list of instructions
1094 // that do not have a successor. Currently, the graph builder will continue
1095 // to append instruction in case of a Throw inside an expression. This
1096 // condition should be handled in the graph builder
1097 next_ = instr;
1098 }
1099
1100 // Link together two instruction.
1101 void LinkTo(Instruction* next) {
1102 ASSERT(this != next);
1103 this->set_next(next);
1104 next->set_previous(this);
1105 }
1106
1107 // Removed this instruction from the graph, after use lists have been
1108 // computed. If the instruction is a definition with uses, those uses are
1109 // unaffected (so the instruction can be reinserted, e.g., hoisting).
1110 Instruction* RemoveFromGraph(bool return_previous = true);
1111
1112 // Normal instructions can have 0 (inside a block) or 1 (last instruction in
1113 // a block) successors. Branch instruction with >1 successors override this
1114 // function.
1115 virtual intptr_t SuccessorCount() const;
1116 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
1117
1118 struct SuccessorsTrait {
1119 static BlockEntryInstr* At(const Instruction* instr, intptr_t index) {
1120 return instr->SuccessorAt(index);
1121 }
1122
1123 static intptr_t Length(const Instruction* instr) {
1124 return instr->SuccessorCount();
1125 }
1126 };
1127
1128 using SuccessorsIterable =
1129 InstructionIndexedPropertyIterable<SuccessorsTrait>;
1130
1131 inline SuccessorsIterable successors() const {
1132 return SuccessorsIterable(this);
1133 }
1134
1135 void Goto(JoinEntryInstr* entry);
1136
1137 virtual const char* DebugName() const = 0;
1138
1139#if defined(DEBUG)
1140 // Checks that the field stored in an instruction has proper form:
1141 // - must be a zone-handle
1142 // - In background compilation, must be cloned.
1143 // Aborts if field is not OK.
1144 void CheckField(const Field& field) const;
1145#else
1146 void CheckField(const Field& field) const {}
1147#endif // DEBUG
1148
1149 // Printing support.
1150 const char* ToCString() const;
1153
1154#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type) \
1155 bool Is##Name() const { return (As##Name() != nullptr); } \
1156 Type* As##Name() { \
1157 auto const_this = static_cast<const Instruction*>(this); \
1158 return const_cast<Type*>(const_this->As##Name()); \
1159 } \
1160 virtual const Type* As##Name() const { return nullptr; }
1161#define INSTRUCTION_TYPE_CHECK(Name, Attrs) \
1162 DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)
1163
1164 DECLARE_INSTRUCTION_TYPE_CHECK(Definition, Definition)
1165 DECLARE_INSTRUCTION_TYPE_CHECK(BlockEntryWithInitialDefs,
1166 BlockEntryWithInitialDefs)
1169
1170#undef INSTRUCTION_TYPE_CHECK
1171#undef DECLARE_INSTRUCTION_TYPE_CHECK
1172
1173 template <typename T>
1174 T* Cast() {
1175 return static_cast<T*>(this);
1176 }
1177
1178 template <typename T>
1179 const T* Cast() const {
1180 return static_cast<const T*>(this);
1181 }
1182
1183 // Returns structure describing location constraints required
1184 // to emit native code for this instruction.
1185 LocationSummary* locs() {
1186 ASSERT(locs_ != nullptr);
1187 return locs_;
1188 }
1189
1190 bool HasLocs() const { return locs_ != nullptr; }
1191
1192 virtual LocationSummary* MakeLocationSummary(Zone* zone,
1193 bool is_optimizing) const = 0;
1194
1195 void InitializeLocationSummary(Zone* zone, bool optimizing) {
1196 ASSERT(locs_ == nullptr);
1197 locs_ = MakeLocationSummary(zone, optimizing);
1198 }
1199
1200 // Makes a new call location summary (or uses `locs`) and initializes the
1201 // output register constraints depending on the representation of [instr].
1202 static LocationSummary* MakeCallSummary(Zone* zone,
1203 const Instruction* instr,
1204 LocationSummary* locs = nullptr);
1205
1206 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); }
1207
1208 Environment* env() const { return env_; }
1209 void SetEnvironment(Environment* deopt_env);
1210 void RemoveEnvironment();
1211 void ReplaceInEnvironment(Definition* current, Definition* replacement);
1212
1213 virtual intptr_t NumberOfInputsConsumedBeforeCall() const { return 0; }
1214
1215 // Different compiler passes can assign pass specific ids to the instruction.
1216 // Only one id can be stored at a time.
1217 intptr_t GetPassSpecificId(CompilerPass::Id pass) const {
1218 return (PassSpecificId::DecodePass(pass_specific_id_) == pass)
1219 ? PassSpecificId::DecodeId(pass_specific_id_)
1220 : PassSpecificId::kNoId;
1221 }
1222 void SetPassSpecificId(CompilerPass::Id pass, intptr_t id) {
1223 pass_specific_id_ = PassSpecificId::Encode(pass, id);
1224 }
1225 bool HasPassSpecificId(CompilerPass::Id pass) const {
1226 return (PassSpecificId::DecodePass(pass_specific_id_) == pass) &&
1227 (PassSpecificId::DecodeId(pass_specific_id_) !=
1228 PassSpecificId::kNoId);
1229 }
1230
1231 bool HasUnmatchedInputRepresentations() const;
1232
1233 // Returns representation expected for the input operand at the given index.
1234 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
1235 return kTagged;
1236 }
1237
1238 SpeculativeMode SpeculativeModeOfInputs() const {
1239 for (intptr_t i = 0; i < InputCount(); i++) {
1240 if (SpeculativeModeOfInput(i) == kGuardInputs) {
1241 return kGuardInputs;
1242 }
1243 }
1244 return kNotSpeculative;
1245 }
1246
1247 // By default, instructions should check types of inputs when unboxing
1248 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
1249 return kGuardInputs;
1250 }
1251
1252 // Representation of the value produced by this computation.
1253 virtual Representation representation() const { return kTagged; }
1254
1255 bool WasEliminated() const { return next() == nullptr; }
1256
1257 // Returns deoptimization id that corresponds to the deoptimization target
1258 // that input operands conversions inserted for this instruction can jump
1259 // to.
1260 virtual intptr_t DeoptimizationTarget() const {
1261 UNREACHABLE();
1262 return DeoptId::kNone;
1263 }
1264
1265 // Returns a replacement for the instruction or nullptr if the instruction can
1266 // be eliminated. By default returns the this instruction which means no
1267 // change.
1268 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
1269
1270 // Insert this instruction before 'next' after use lists are computed.
1271 // Instructions cannot be inserted before a block entry or any other
1272 // instruction without a previous instruction.
1273 void InsertBefore(Instruction* next) { InsertAfter(next->previous()); }
1274
1275 // Insert this instruction after 'prev' after use lists are computed.
1276 void InsertAfter(Instruction* prev);
1277
1278 // Append an instruction to the current one and return the tail.
1279 // This function updated def-use chains of the newly appended
1280 // instruction.
1281 Instruction* AppendInstruction(Instruction* tail);
1282
1283 // Returns true if CSE and LICM are allowed for this instruction.
1284 virtual bool AllowsCSE() const { return false; }
1285
1286 // Returns true if this instruction has any side-effects besides storing.
1287 // See StoreFieldInstr::HasUnknownSideEffects() for rationale.
1288 virtual bool HasUnknownSideEffects() const = 0;
1289
1290 // Whether this instruction can call Dart code without going through
1291 // the runtime.
1292 //
1293 // Must be true for any instruction which can call Dart code without
1294 // first creating an exit frame to transition into the runtime.
1295 //
1296 // See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
1297 virtual bool CanCallDart() const { return false; }
1298
1299 virtual bool CanTriggerGC() const;
1300
1301 // Get the block entry for this instruction.
1302 virtual BlockEntryInstr* GetBlock();
1303
1304 virtual intptr_t inlining_id() const { return inlining_id_; }
1305 virtual void set_inlining_id(intptr_t value) {
1306 ASSERT(value >= 0);
1307 ASSERT(!has_inlining_id() || inlining_id_ == value);
1308 inlining_id_ = value;
1309 }
1310 virtual bool has_inlining_id() const { return inlining_id_ >= 0; }
1311
1312 // Returns a hash code for use with hash maps.
1313 virtual uword Hash() const;
1314
1315 // Compares two instructions. Returns true, iff:
1316 // 1. They have the same tag.
1317 // 2. All input operands are Equals.
1318 // 3. They satisfy AttributesEqual.
1319 bool Equals(const Instruction& other) const;
1320
1321 // Compare attributes of a instructions (except input operands and tag).
1322 // All instructions that participate in CSE have to override this function.
1323 // This function can assume that the argument has the same type as this.
1324 virtual bool AttributesEqual(const Instruction& other) const {
1325 UNREACHABLE();
1326 return false;
1327 }
1328
1329 void InheritDeoptTarget(Zone* zone, Instruction* other);
1330
1331 bool NeedsEnvironment() const {
1332 return ComputeCanDeoptimize() || ComputeCanDeoptimizeAfterCall() ||
1333 CanBecomeDeoptimizationTarget() || MayThrow();
1334 }
1335
1336 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
1337
1338 void InheritDeoptTargetAfter(FlowGraph* flow_graph,
1339 Definition* call,
1340 Definition* result);
1341
1342 virtual bool MayThrow() const = 0;
1343
1344 // Returns true if instruction may have a "visible" effect,
1345 virtual bool MayHaveVisibleEffect() const {
1346 return HasUnknownSideEffects() || MayThrow();
1347 }
1348
1349 // Returns true if this instruction can be eliminated if its result is not
1350 // used without changing the behavior of the program. For Definitions,
1351 // overwrite CanReplaceWithConstant() instead.
1352 virtual bool CanEliminate(const BlockEntryInstr* block) const;
1353 bool CanEliminate() { return CanEliminate(GetBlock()); }
1354
1355 bool IsDominatedBy(Instruction* dom);
1356
1357 void ClearEnv() { env_ = nullptr; }
1358
1359 void Unsupported(FlowGraphCompiler* compiler);
1360
1361 static bool SlowPathSharingSupported(bool is_optimizing) {
1362#if defined(TARGET_ARCH_IA32)
1363 return false;
1364#else
1365 return FLAG_enable_slow_path_sharing && FLAG_precompiled_mode &&
1366 is_optimizing;
1367#endif
1368 }
1369
1370 virtual bool UseSharedSlowPathStub(bool is_optimizing) const { return false; }
1371
1372 // 'RegisterKindForResult()' returns the register kind necessary to hold the
1373 // result.
1374 //
1375 // This is not virtual because instructions should override representation()
1376 // instead.
1377 Location::Kind RegisterKindForResult() const {
1378 const Representation rep = representation();
1379 if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) ||
1380 (rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) ||
1381 (rep == kUnboxedFloat64x2)) {
1382 return Location::kFpuRegister;
1383 }
1384 return Location::kRegister;
1385 }
1386
1387 DECLARE_CUSTOM_SERIALIZATION(Instruction)
1389
1390 protected:
1391 // GetDeoptId and/or CopyDeoptIdFrom.
1392 friend class CallSiteInliner;
1393 friend class LICM;
1394 friend class ComparisonInstr;
1395 friend class Scheduler;
1396 friend class BlockEntryInstr;
1397 friend class CatchBlockEntryInstr; // deopt_id_
1398 friend class DebugStepCheckInstr; // deopt_id_
1399 friend class StrictCompareInstr; // deopt_id_
1400
1401 // Fetch deopt id without checking if this computation can deoptimize.
1402 intptr_t GetDeoptId() const { return deopt_id_; }
1403
1404 virtual void CopyDeoptIdFrom(const Instruction& instr) {
1405 deopt_id_ = instr.deopt_id_;
1406 }
1407
1408 // Write/read locs and environment, but not inputs.
1409 // Used when one instruction embeds another and reuses their inputs
1410 // (e.g. Branch/IfThenElse/CheckCondition wrap Comparison).
1411 void WriteExtraWithoutInputs(FlowGraphSerializer* s);
1412 void ReadExtraWithoutInputs(FlowGraphDeserializer* d);
1413
1414 private:
1415 friend class BranchInstr; // For RawSetInputAt.
1416 friend class IfThenElseInstr; // For RawSetInputAt.
1417 friend class CheckConditionInstr; // For RawSetInputAt.
1418
1419 virtual void RawSetInputAt(intptr_t i, Value* value) = 0;
1420
1421 class PassSpecificId {
1422 public:
1423 static intptr_t Encode(CompilerPass::Id pass, intptr_t id) {
1424 return (id << kPassBits) | pass;
1425 }
1426
1427 static CompilerPass::Id DecodePass(intptr_t value) {
1428 return static_cast<CompilerPass::Id>(value & Utils::NBitMask(kPassBits));
1429 }
1430
1431 static intptr_t DecodeId(intptr_t value) { return (value >> kPassBits); }
1432
1433 static constexpr intptr_t kNoId = -1;
1434
1435 private:
1436 static constexpr intptr_t kPassBits = 8;
1437 static_assert(CompilerPass::kNumPasses <= (1 << kPassBits),
1438 "Pass Id does not fit into the bit field");
1439 };
1440
1441 intptr_t deopt_id_ = DeoptId::kNone;
1442 intptr_t pass_specific_id_ = PassSpecificId::kNoId;
1443 Instruction* previous_ = nullptr;
1444 Instruction* next_ = nullptr;
1445 Environment* env_ = nullptr;
1446 LocationSummary* locs_ = nullptr;
1447 intptr_t inlining_id_;
1448
1449 DISALLOW_COPY_AND_ASSIGN(Instruction);
1450};
1451
1452struct BranchLabels {
1453 compiler::Label* true_label;
1454 compiler::Label* false_label;
1455 compiler::Label* fall_through;
1456};
1457
1458class PureInstruction : public Instruction {
1459 public:
1460 explicit PureInstruction(intptr_t deopt_id) : Instruction(deopt_id) {}
1461 explicit PureInstruction(const InstructionSource& source, intptr_t deopt_id)
1462 : Instruction(source, deopt_id) {}
1463
1464 virtual bool AllowsCSE() const { return true; }
1465 virtual bool HasUnknownSideEffects() const { return false; }
1466
1467 DECLARE_EMPTY_SERIALIZATION(PureInstruction, Instruction)
1468};
1469
1470// Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition.
1471struct Throws {
1472 static constexpr bool kCanThrow = true;
1473};
1474
1475struct NoThrow {
1476 static constexpr bool kCanThrow = false;
1477};
1478
1479// Types to be used as CSETrait for TemplateInstruction/TemplateDefinition.
1480// Pure instructions are those that allow CSE and have no effects and
1481// no dependencies.
1482template <typename DefaultBase, typename PureBase>
1483struct Pure {
1484 typedef PureBase Base;
1485};
1486
1487template <typename DefaultBase, typename PureBase>
1488struct NoCSE {
1489 typedef DefaultBase Base;
1490};
1491
1492template <intptr_t N,
1493 typename ThrowsTrait,
1494 template <typename Default, typename Pure> class CSETrait = NoCSE>
1495class TemplateInstruction
1496 : public CSETrait<Instruction, PureInstruction>::Base {
1497 public:
1498 using BaseClass = typename CSETrait<Instruction, PureInstruction>::Base;
1499
1500 explicit TemplateInstruction(intptr_t deopt_id = DeoptId::kNone)
1501 : BaseClass(deopt_id), inputs_() {}
1502
1503 TemplateInstruction(const InstructionSource& source,
1504 intptr_t deopt_id = DeoptId::kNone)
1505 : BaseClass(source, deopt_id), inputs_() {}
1506
1507 virtual intptr_t InputCount() const { return N; }
1508 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
1509
1510 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
1511
1512 DECLARE_EMPTY_SERIALIZATION(TemplateInstruction, BaseClass)
1513
1514 protected:
1515 EmbeddedArray<Value*, N> inputs_;
1516
1517 private:
1518 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
1519};
1520
1521class MoveOperands : public ZoneAllocated {
1522 public:
1523 MoveOperands(Location dest, Location src) : dest_(dest), src_(src) {}
1524 MoveOperands(const MoveOperands& other)
1525 : ZoneAllocated(), dest_(other.dest_), src_(other.src_) {}
1526
1527 MoveOperands& operator=(const MoveOperands& other) {
1528 dest_ = other.dest_;
1529 src_ = other.src_;
1530 return *this;
1531 }
1532
1533 Location src() const { return src_; }
1534 Location dest() const { return dest_; }
1535
1536 Location* src_slot() { return &src_; }
1537 Location* dest_slot() { return &dest_; }
1538
1539 void set_src(const Location& value) { src_ = value; }
1540 void set_dest(const Location& value) { dest_ = value; }
1541
1542 // The parallel move resolver marks moves as "in-progress" by clearing the
1543 // destination (but not the source).
1544 Location MarkPending() {
1545 ASSERT(!IsPending());
1546 Location dest = dest_;
1547 dest_ = Location::NoLocation();
1548 return dest;
1549 }
1550
1551 void ClearPending(Location dest) {
1552 ASSERT(IsPending());
1553 dest_ = dest;
1554 }
1555
1556 bool IsPending() const {
1557 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1558 return dest_.IsInvalid() && !src_.IsInvalid();
1559 }
1560
1561 // True if this move a move from the given location.
1562 bool Blocks(Location loc) const {
1563 return !IsEliminated() && src_.Equals(loc);
1564 }
1565
1566 // A move is redundant if it's been eliminated, if its source and
1567 // destination are the same, or if its destination is unneeded.
1568 bool IsRedundant() const {
1569 return IsEliminated() || dest_.IsInvalid() || src_.Equals(dest_);
1570 }
1571
1572 // We clear both operands to indicate move that's been eliminated.
1573 void Eliminate() { src_ = dest_ = Location::NoLocation(); }
1574 bool IsEliminated() const {
1575 ASSERT(!src_.IsInvalid() || dest_.IsInvalid());
1576 return src_.IsInvalid();
1577 }
1578
1579 void Write(FlowGraphSerializer* s) const;
1580 explicit MoveOperands(FlowGraphDeserializer* d);
1581
1582 private:
1583 Location dest_;
1584 Location src_;
1585};
1586
1587class ParallelMoveInstr : public TemplateInstruction<0, NoThrow> {
1588 public:
1589 ParallelMoveInstr() : moves_(4) {}
1590
1591 DECLARE_INSTRUCTION(ParallelMove)
1592
1593 virtual bool ComputeCanDeoptimize() const { return false; }
1594
1595 virtual bool HasUnknownSideEffects() const {
1596 UNREACHABLE(); // This instruction never visited by optimization passes.
1597 return false;
1598 }
1599
1600 const GrowableArray<MoveOperands*>& moves() const { return moves_; }
1601
1602 MoveOperands* AddMove(Location dest, Location src) {
1603 MoveOperands* move = new MoveOperands(dest, src);
1604 moves_.Add(move);
1605 return move;
1606 }
1607
1608 MoveOperands* MoveOperandsAt(intptr_t index) const { return moves_[index]; }
1609
1610 intptr_t NumMoves() const { return moves_.length(); }
1611
1612 bool IsRedundant() const;
1613
1614 virtual TokenPosition token_pos() const {
1615 return TokenPosition::kParallelMove;
1616 }
1617
1618 const MoveSchedule& move_schedule() const {
1619 ASSERT(move_schedule_ != nullptr);
1620 return *move_schedule_;
1621 }
1622
1623 void set_move_schedule(const MoveSchedule& schedule) {
1624 move_schedule_ = &schedule;
1625 }
1626
1628 DECLARE_EMPTY_SERIALIZATION(ParallelMoveInstr, TemplateInstruction)
1630
1631 private:
1632 GrowableArray<MoveOperands*> moves_; // Elements cannot be null.
1633 const MoveSchedule* move_schedule_ = nullptr;
1634
1635 DISALLOW_COPY_AND_ASSIGN(ParallelMoveInstr);
1636};
1637
1638// Basic block entries are administrative nodes. There is a distinguished
1639// graph entry with no predecessor. Joins are the only nodes with multiple
1640// predecessors. Targets are all other basic block entries. The types
1641// enforce edge-split form---joins are forbidden as the successors of
1642// branches.
1643class BlockEntryInstr : public TemplateInstruction<0, NoThrow> {
1644 public:
1645 virtual intptr_t PredecessorCount() const = 0;
1646 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const = 0;
1647
1648 intptr_t preorder_number() const { return preorder_number_; }
1649 void set_preorder_number(intptr_t number) { preorder_number_ = number; }
1650
1651 intptr_t postorder_number() const { return postorder_number_; }
1652 void set_postorder_number(intptr_t number) { postorder_number_ = number; }
1653
1654 intptr_t block_id() const { return block_id_; }
1655
1656 // NOTE: These are SSA positions and not token positions. These are used by
1657 // the register allocator.
1658 void set_start_pos(intptr_t pos) { start_pos_ = pos; }
1659 intptr_t start_pos() const { return start_pos_; }
1660 void set_end_pos(intptr_t pos) { end_pos_ = pos; }
1661 intptr_t end_pos() const { return end_pos_; }
1662
1663 BlockEntryInstr* dominator() const { return dominator_; }
1664 BlockEntryInstr* ImmediateDominator() const;
1665
1666 const GrowableArray<BlockEntryInstr*>& dominated_blocks() {
1667 return dominated_blocks_;
1668 }
1669
1670 void AddDominatedBlock(BlockEntryInstr* block) {
1671 ASSERT(!block->IsFunctionEntry() || this->IsGraphEntry());
1672 block->set_dominator(this);
1673 dominated_blocks_.Add(block);
1674 }
1675 void ClearDominatedBlocks() { dominated_blocks_.Clear(); }
1676
1677 bool Dominates(BlockEntryInstr* other) const;
1678
1679 Instruction* last_instruction() const { return last_instruction_; }
1680 void set_last_instruction(Instruction* instr) { last_instruction_ = instr; }
1681
1682 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
1683
1684 bool HasParallelMove() const { return parallel_move_ != nullptr; }
1685
1686 bool HasNonRedundantParallelMove() const {
1687 return HasParallelMove() && !parallel_move()->IsRedundant();
1688 }
1689
1690 ParallelMoveInstr* GetParallelMove() {
1691 if (parallel_move_ == nullptr) {
1692 parallel_move_ = new ParallelMoveInstr();
1693 }
1694 return parallel_move_;
1695 }
1696
1697 // Discover basic-block structure of the current block. Must be called
1698 // on all graph blocks in preorder to yield valid results. As a side effect,
1699 // the block entry instructions in the graph are assigned preorder numbers.
1700 // The array 'preorder' maps preorder block numbers to the block entry
1701 // instruction with that number. The depth first spanning tree is recorded
1702 // in the array 'parent', which maps preorder block numbers to the preorder
1703 // number of the block's spanning-tree parent. As a side effect of this
1704 // function, the set of basic block predecessors (e.g., block entry
1705 // instructions of predecessor blocks) and also the last instruction in the
1706 // block is recorded in each entry instruction. Returns true when called the
1707 // first time on this particular block within one graph traversal, and false
1708 // on all successive calls.
1709 bool DiscoverBlock(BlockEntryInstr* predecessor,
1710 GrowableArray<BlockEntryInstr*>* preorder,
1711 GrowableArray<intptr_t>* parent);
1712
1713 virtual bool CanBecomeDeoptimizationTarget() const {
1714 // BlockEntry environment is copied to Goto and Branch instructions
1715 // when we insert new blocks targeting this block.
1716 return true;
1717 }
1718
1719 virtual bool ComputeCanDeoptimize() const { return false; }
1720
1721 virtual bool HasUnknownSideEffects() const { return false; }
1722
1723 intptr_t try_index() const { return try_index_; }
1724 void set_try_index(intptr_t index) { try_index_ = index; }
1725
1726 // True for blocks inside a try { } region.
1727 bool InsideTryBlock() const { return try_index_ != kInvalidTryIndex; }
1728
1729 // Loop related methods.
1730 LoopInfo* loop_info() const { return loop_info_; }
1731 void set_loop_info(LoopInfo* loop_info) { loop_info_ = loop_info; }
1732 bool IsLoopHeader() const;
1733 intptr_t NestingDepth() const;
1734
1735 virtual BlockEntryInstr* GetBlock() { return this; }
1736
1737 virtual TokenPosition token_pos() const {
1738 return TokenPosition::kControlFlow;
1739 }
1740
1741 // Helper to mutate the graph during inlining. This block should be
1742 // replaced with new_block as a predecessor of all of this block's
1743 // successors.
1744 void ReplaceAsPredecessorWith(BlockEntryInstr* new_block);
1745
1746 void set_block_id(intptr_t block_id) { block_id_ = block_id; }
1747
1748 // Stack-based IR bookkeeping.
1749 intptr_t stack_depth() const { return stack_depth_; }
1750 void set_stack_depth(intptr_t s) { stack_depth_ = s; }
1751
1752 // For all instruction in this block: Remove all inputs (including in the
1753 // environment) from their definition's use lists for all instructions.
1754 void ClearAllInstructions();
1755
1756 class InstructionsIterable {
1757 public:
1758 explicit InstructionsIterable(BlockEntryInstr* block) : block_(block) {}
1759
1760 inline ForwardInstructionIterator begin() const;
1761 inline ForwardInstructionIterator end() const;
1762
1763 private:
1764 BlockEntryInstr* block_;
1765 };
1766
1767 InstructionsIterable instructions() { return InstructionsIterable(this); }
1768
1770
1771 DECLARE_CUSTOM_SERIALIZATION(BlockEntryInstr)
1773
1774 protected:
1775 BlockEntryInstr(intptr_t block_id,
1776 intptr_t try_index,
1777 intptr_t deopt_id,
1778 intptr_t stack_depth)
1779 : TemplateInstruction(deopt_id),
1780 block_id_(block_id),
1781 try_index_(try_index),
1782 stack_depth_(stack_depth),
1783 dominated_blocks_(1) {}
1784
1785 // Perform a depth first search to find OSR entry and
1786 // link it to the given graph entry.
1787 bool FindOsrEntryAndRelink(GraphEntryInstr* graph_entry,
1788 Instruction* parent,
1789 BitVector* block_marks);
1790
1791 private:
1792 virtual void ClearPredecessors() = 0;
1793 virtual void AddPredecessor(BlockEntryInstr* predecessor) = 0;
1794
1795 void set_dominator(BlockEntryInstr* instr) { dominator_ = instr; }
1796
1797 intptr_t block_id_;
1798 intptr_t try_index_;
1799 intptr_t preorder_number_ = -1;
1800 intptr_t postorder_number_ = -1;
1801 // Expected stack depth on entry (for stack-based IR only).
1802 intptr_t stack_depth_;
1803 // Starting and ending lifetime positions for this block. Used by
1804 // the linear scan register allocator.
1805 intptr_t start_pos_ = -1;
1806 intptr_t end_pos_ = -1;
1807 // Immediate dominator, nullptr for graph entry.
1808 BlockEntryInstr* dominator_ = nullptr;
1809 // TODO(fschneider): Optimize the case of one child to save space.
1810 GrowableArray<BlockEntryInstr*> dominated_blocks_;
1811 Instruction* last_instruction_ = nullptr;
1812
1813 // Parallel move that will be used by linear scan register allocator to
1814 // connect live ranges at the start of the block.
1815 ParallelMoveInstr* parallel_move_ = nullptr;
1816
1817 // Closest enveloping loop in loop hierarchy (nullptr at nesting depth 0).
1818 LoopInfo* loop_info_ = nullptr;
1819
1820 DISALLOW_COPY_AND_ASSIGN(BlockEntryInstr);
1821};
1822
1823class ForwardInstructionIterator {
1824 public:
1825 ForwardInstructionIterator(const ForwardInstructionIterator& other) = default;
1826 ForwardInstructionIterator& operator=(
1827 const ForwardInstructionIterator& other) = default;
1828
1829 ForwardInstructionIterator() : current_(nullptr) {}
1830
1831 explicit ForwardInstructionIterator(BlockEntryInstr* block_entry)
1832 : current_(block_entry) {
1833 Advance();
1834 }
1835
1836 void Advance() {
1837 ASSERT(!Done());
1838 current_ = current_->next();
1839 }
1840
1841 bool Done() const { return current_ == nullptr; }
1842
1843 // Removes 'current_' from graph and sets 'current_' to previous instruction.
1844 void RemoveCurrentFromGraph();
1845
1846 Instruction* Current() const { return current_; }
1847
1848 Instruction* operator*() const { return Current(); }
1849
1850 bool operator==(const ForwardInstructionIterator& other) const {
1851 return current_ == other.current_;
1852 }
1853
1854 bool operator!=(const ForwardInstructionIterator& other) const {
1855 return !(*this == other);
1856 }
1857
1858 ForwardInstructionIterator& operator++() {
1859 Advance();
1860 return *this;
1861 }
1862
1863 private:
1864 Instruction* current_;
1865};
1866
1867ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::begin()
1868 const {
1869 return ForwardInstructionIterator(block_);
1870}
1871
1872ForwardInstructionIterator BlockEntryInstr::InstructionsIterable::end() const {
1873 return ForwardInstructionIterator();
1874}
1875
1876class BackwardInstructionIterator : public ValueObject {
1877 public:
1878 explicit BackwardInstructionIterator(BlockEntryInstr* block_entry)
1879 : block_entry_(block_entry), current_(block_entry->last_instruction()) {
1880 ASSERT(block_entry_->previous() == nullptr);
1881 }
1882
1883 void Advance() {
1884 ASSERT(!Done());
1885 current_ = current_->previous();
1886 }
1887
1888 bool Done() const { return current_ == block_entry_; }
1889
1890 void RemoveCurrentFromGraph();
1891
1892 Instruction* Current() const { return current_; }
1893
1894 private:
1895 BlockEntryInstr* block_entry_;
1896 Instruction* current_;
1897};
1898
1899// Base class shared by all block entries which define initial definitions.
1900//
1901// The initial definitions define parameters, special parameters and constants.
1902class BlockEntryWithInitialDefs : public BlockEntryInstr {
1903 public:
1904 BlockEntryWithInitialDefs(intptr_t block_id,
1905 intptr_t try_index,
1906 intptr_t deopt_id,
1907 intptr_t stack_depth)
1908 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth) {}
1909
1910 GrowableArray<Definition*>* initial_definitions() {
1911 return &initial_definitions_;
1912 }
1913 const GrowableArray<Definition*>* initial_definitions() const {
1914 return &initial_definitions_;
1915 }
1916
1917 virtual BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() {
1918 return this;
1919 }
1920 virtual const BlockEntryWithInitialDefs* AsBlockEntryWithInitialDefs() const {
1921 return this;
1922 }
1923
1924 DECLARE_CUSTOM_SERIALIZATION(BlockEntryWithInitialDefs)
1926
1927 protected:
1928 void PrintInitialDefinitionsTo(BaseTextBuffer* f) const;
1929
1930 private:
1931 GrowableArray<Definition*> initial_definitions_;
1932
1933 DISALLOW_COPY_AND_ASSIGN(BlockEntryWithInitialDefs);
1934};
1935
1936class GraphEntryInstr : public BlockEntryWithInitialDefs {
1937 public:
1938 GraphEntryInstr(const ParsedFunction& parsed_function, intptr_t osr_id);
1939
1940 DECLARE_INSTRUCTION(GraphEntry)
1941
1942 virtual intptr_t PredecessorCount() const { return 0; }
1943 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
1944 UNREACHABLE();
1945 return nullptr;
1946 }
1947 virtual intptr_t SuccessorCount() const;
1948 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
1949
1950 void AddCatchEntry(CatchBlockEntryInstr* entry) { catch_entries_.Add(entry); }
1951
1952 CatchBlockEntryInstr* GetCatchEntry(intptr_t index);
1953
1954 void AddIndirectEntry(IndirectEntryInstr* entry) {
1955 indirect_entries_.Add(entry);
1956 }
1957
1958 ConstantInstr* constant_null();
1959
1960 void RelinkToOsrEntry(Zone* zone, intptr_t max_block_id);
1961 bool IsCompiledForOsr() const;
1962 intptr_t osr_id() const { return osr_id_; }
1963
1964 intptr_t entry_count() const { return entry_count_; }
1965 void set_entry_count(intptr_t count) { entry_count_ = count; }
1966
1967 intptr_t spill_slot_count() const { return spill_slot_count_; }
1968 void set_spill_slot_count(intptr_t count) {
1969 ASSERT(count >= 0);
1970 spill_slot_count_ = count;
1971 }
1972
1973 // Returns true if this flow graph needs a stack frame.
1974 bool NeedsFrame() const { return needs_frame_; }
1975 void MarkFrameless() { needs_frame_ = false; }
1976
1977 // Number of stack slots reserved for compiling try-catch. For functions
1978 // without try-catch, this is 0. Otherwise, it is the number of local
1979 // variables.
1980 intptr_t fixed_slot_count() const { return fixed_slot_count_; }
1981 void set_fixed_slot_count(intptr_t count) {
1982 ASSERT(count >= 0);
1983 fixed_slot_count_ = count;
1984 }
1985 FunctionEntryInstr* normal_entry() const { return normal_entry_; }
1986 FunctionEntryInstr* unchecked_entry() const { return unchecked_entry_; }
1987 void set_normal_entry(FunctionEntryInstr* entry) { normal_entry_ = entry; }
1988 void set_unchecked_entry(FunctionEntryInstr* target) {
1989 unchecked_entry_ = target;
1990 }
1991 OsrEntryInstr* osr_entry() const { return osr_entry_; }
1992 void set_osr_entry(OsrEntryInstr* entry) { osr_entry_ = entry; }
1993
1994 const ParsedFunction& parsed_function() const { return parsed_function_; }
1995
1996 const GrowableArray<CatchBlockEntryInstr*>& catch_entries() const {
1997 return catch_entries_;
1998 }
1999
2000 const GrowableArray<IndirectEntryInstr*>& indirect_entries() const {
2001 return indirect_entries_;
2002 }
2003
2004 bool HasSingleEntryPoint() const {
2005 return catch_entries().is_empty() && unchecked_entry() == nullptr;
2006 }
2007
2009 DECLARE_CUSTOM_SERIALIZATION(GraphEntryInstr)
2011
2012 private:
2013 GraphEntryInstr(const ParsedFunction& parsed_function,
2014 intptr_t osr_id,
2015 intptr_t deopt_id);
2016
2017 virtual void ClearPredecessors() {}
2018 virtual void AddPredecessor(BlockEntryInstr* predecessor) { UNREACHABLE(); }
2019
2020 const ParsedFunction& parsed_function_;
2021 FunctionEntryInstr* normal_entry_ = nullptr;
2022 FunctionEntryInstr* unchecked_entry_ = nullptr;
2023 OsrEntryInstr* osr_entry_ = nullptr;
2024 GrowableArray<CatchBlockEntryInstr*> catch_entries_;
2025 // Indirect targets are blocks reachable only through indirect gotos.
2026 GrowableArray<IndirectEntryInstr*> indirect_entries_;
2027 const intptr_t osr_id_;
2028 intptr_t entry_count_;
2029 intptr_t spill_slot_count_;
2030 intptr_t fixed_slot_count_; // For try-catch in optimized code.
2031 bool needs_frame_ = true;
2032
2033 DISALLOW_COPY_AND_ASSIGN(GraphEntryInstr);
2034};
2035
2036class JoinEntryInstr : public BlockEntryInstr {
2037 public:
2038 JoinEntryInstr(intptr_t block_id,
2039 intptr_t try_index,
2040 intptr_t deopt_id,
2041 intptr_t stack_depth = 0)
2042 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
2043 phis_(nullptr),
2044 predecessors_(2) // Two is the assumed to be the common case.
2045 {}
2046
2047 DECLARE_INSTRUCTION(JoinEntry)
2048
2049 virtual intptr_t PredecessorCount() const { return predecessors_.length(); }
2050 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2051 return predecessors_[index];
2052 }
2053
2054 // Returns -1 if pred is not in the list.
2055 intptr_t IndexOfPredecessor(BlockEntryInstr* pred) const;
2056
2057 ZoneGrowableArray<PhiInstr*>* phis() const { return phis_; }
2058
2059 PhiInstr* InsertPhi(intptr_t var_index, intptr_t var_count);
2060 void RemoveDeadPhis(Definition* replacement);
2061
2062 void InsertPhi(PhiInstr* phi);
2063 void RemovePhi(PhiInstr* phi);
2064
2065 virtual bool HasUnknownSideEffects() const { return false; }
2066
2068
2069#define FIELD_LIST(F) F(ZoneGrowableArray<PhiInstr*>*, phis_)
2070
2072 BlockEntryInstr,
2073 FIELD_LIST)
2074#undef FIELD_LIST
2076
2077 private:
2078 // Classes that have access to predecessors_ when inlining.
2079 friend class BlockEntryInstr;
2080 friend class InlineExitCollector;
2081 friend class PolymorphicInliner;
2082 friend class IndirectEntryInstr; // Access in il_printer.cc.
2083
2084 // Direct access to phis_ in order to resize it due to phi elimination.
2085 friend class ConstantPropagator;
2086 friend class DeadCodeElimination;
2087
2088 virtual void ClearPredecessors() { predecessors_.Clear(); }
2089 virtual void AddPredecessor(BlockEntryInstr* predecessor);
2090
2091 GrowableArray<BlockEntryInstr*> predecessors_;
2092
2093 DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr);
2094};
2095
2096class PhiIterator : public ValueObject {
2097 public:
2098 explicit PhiIterator(JoinEntryInstr* join) : phis_(join->phis()), index_(0) {}
2099
2100 void Advance() {
2101 ASSERT(!Done());
2102 index_++;
2103 }
2104
2105 bool Done() const {
2106 return (phis_ == nullptr) || (index_ >= phis_->length());
2107 }
2108
2109 PhiInstr* Current() const { return (*phis_)[index_]; }
2110
2111 // Removes current phi from graph and sets current to previous phi.
2112 void RemoveCurrentFromGraph();
2113
2114 private:
2115 ZoneGrowableArray<PhiInstr*>* phis_;
2116 intptr_t index_;
2117};
2118
2119class TargetEntryInstr : public BlockEntryInstr {
2120 public:
2121 TargetEntryInstr(intptr_t block_id,
2122 intptr_t try_index,
2123 intptr_t deopt_id,
2124 intptr_t stack_depth = 0)
2125 : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
2126 edge_weight_(0.0) {}
2127
2128 DECLARE_INSTRUCTION(TargetEntry)
2129
2130 double edge_weight() const { return edge_weight_; }
2131 void set_edge_weight(double weight) { edge_weight_ = weight; }
2132 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
2133
2134 virtual intptr_t PredecessorCount() const {
2135 return (predecessor_ == nullptr) ? 0 : 1;
2136 }
2137 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2138 ASSERT((index == 0) && (predecessor_ != nullptr));
2139 return predecessor_;
2140 }
2141
2143
2144#define FIELD_LIST(F) F(double, edge_weight_)
2146 BlockEntryInstr,
2147 FIELD_LIST)
2148#undef FIELD_LIST
2149
2150 private:
2151 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
2152
2153 virtual void ClearPredecessors() { predecessor_ = nullptr; }
2154 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2155 ASSERT(predecessor_ == nullptr);
2156 predecessor_ = predecessor;
2157 }
2158
2159 // Not serialized, set in DiscoverBlocks.
2160 BlockEntryInstr* predecessor_ = nullptr;
2161
2162 DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr);
2163};
2164
2165// Represents an entrypoint to a function which callers can invoke (i.e. not
2166// used for OSR entries).
2167//
2168// The flow graph builder might decide to create multiple entrypoints
2169// (e.g. checked/unchecked entrypoints) and will attach those to the
2170// [GraphEntryInstr].
2171//
2172// Every entrypoint has it's own initial definitions. The SSA renaming
2173// will insert phi's for parameter instructions if necessary.
2174class FunctionEntryInstr : public BlockEntryWithInitialDefs {
2175 public:
2176 FunctionEntryInstr(GraphEntryInstr* graph_entry,
2177 intptr_t block_id,
2178 intptr_t try_index,
2179 intptr_t deopt_id)
2180 : BlockEntryWithInitialDefs(block_id,
2181 try_index,
2182 deopt_id,
2183 /*stack_depth=*/0),
2184 graph_entry_(graph_entry) {}
2185
2186 DECLARE_INSTRUCTION(FunctionEntry)
2187
2188 virtual intptr_t PredecessorCount() const {
2189 return (graph_entry_ == nullptr) ? 0 : 1;
2190 }
2191 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2192 ASSERT(index == 0 && graph_entry_ != nullptr);
2193 return graph_entry_;
2194 }
2195
2196 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2197
2199 DECLARE_CUSTOM_SERIALIZATION(FunctionEntryInstr)
2200
2201 private:
2202 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
2203 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2204 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
2205 graph_entry_ = predecessor->AsGraphEntry();
2206 }
2207
2208 GraphEntryInstr* graph_entry_;
2209
2210 DISALLOW_COPY_AND_ASSIGN(FunctionEntryInstr);
2211};
2212
2213// Represents entry into a function from native code.
2214//
2215// Native entries are not allowed to have regular parameters. They should use
2216// NativeParameter instead (which doesn't count as an initial definition).
2217class NativeEntryInstr : public FunctionEntryInstr {
2218 public:
2219 NativeEntryInstr(const compiler::ffi::CallbackMarshaller& marshaller,
2220 GraphEntryInstr* graph_entry,
2221 intptr_t block_id,
2222 intptr_t try_index,
2223 intptr_t deopt_id)
2224 : FunctionEntryInstr(graph_entry, block_id, try_index, deopt_id),
2225 marshaller_(marshaller) {}
2226
2227 DECLARE_INSTRUCTION(NativeEntry)
2228
2230
2231#define FIELD_LIST(F) F(const compiler::ffi::CallbackMarshaller&, marshaller_)
2232
2234 FunctionEntryInstr,
2235 FIELD_LIST)
2236#undef FIELD_LIST
2237
2238 private:
2239 void SaveArguments(FlowGraphCompiler* compiler) const;
2240 void SaveArgument(FlowGraphCompiler* compiler,
2241 const compiler::ffi::NativeLocation& loc) const;
2242};
2243
2244// Represents an OSR entrypoint to a function.
2245//
2246// The OSR entry has it's own initial definitions.
2247class OsrEntryInstr : public BlockEntryWithInitialDefs {
2248 public:
2249 OsrEntryInstr(GraphEntryInstr* graph_entry,
2250 intptr_t block_id,
2251 intptr_t try_index,
2252 intptr_t deopt_id,
2253 intptr_t stack_depth)
2254 : BlockEntryWithInitialDefs(block_id, try_index, deopt_id, stack_depth),
2255 graph_entry_(graph_entry) {}
2256
2257 DECLARE_INSTRUCTION(OsrEntry)
2258
2259 virtual intptr_t PredecessorCount() const {
2260 return (graph_entry_ == nullptr) ? 0 : 1;
2261 }
2262 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2263 ASSERT(index == 0 && graph_entry_ != nullptr);
2264 return graph_entry_;
2265 }
2266
2267 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2268
2270 DECLARE_CUSTOM_SERIALIZATION(OsrEntryInstr)
2271
2272 private:
2273 virtual void ClearPredecessors() { graph_entry_ = nullptr; }
2274 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2275 ASSERT(graph_entry_ == nullptr && predecessor->IsGraphEntry());
2276 graph_entry_ = predecessor->AsGraphEntry();
2277 }
2278
2279 GraphEntryInstr* graph_entry_;
2280
2281 DISALLOW_COPY_AND_ASSIGN(OsrEntryInstr);
2282};
2283
2284class IndirectEntryInstr : public JoinEntryInstr {
2285 public:
2286 IndirectEntryInstr(intptr_t block_id,
2287 intptr_t indirect_id,
2288 intptr_t try_index,
2289 intptr_t deopt_id)
2290 : JoinEntryInstr(block_id, try_index, deopt_id),
2291 indirect_id_(indirect_id) {}
2292
2293 DECLARE_INSTRUCTION(IndirectEntry)
2294
2295 intptr_t indirect_id() const { return indirect_id_; }
2296
2298
2299#define FIELD_LIST(F) F(const intptr_t, indirect_id_)
2300
2301 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IndirectEntryInstr,
2302 JoinEntryInstr,
2303 FIELD_LIST)
2304#undef FIELD_LIST
2305};
2306
2307class CatchBlockEntryInstr : public BlockEntryWithInitialDefs {
2308 public:
2309 CatchBlockEntryInstr(bool is_generated,
2310 intptr_t block_id,
2311 intptr_t try_index,
2312 GraphEntryInstr* graph_entry,
2313 const Array& handler_types,
2314 intptr_t catch_try_index,
2315 bool needs_stacktrace,
2316 intptr_t deopt_id,
2317 const LocalVariable* exception_var,
2318 const LocalVariable* stacktrace_var,
2319 const LocalVariable* raw_exception_var,
2320 const LocalVariable* raw_stacktrace_var)
2321 : BlockEntryWithInitialDefs(block_id,
2322 try_index,
2323 deopt_id,
2324 /*stack_depth=*/0),
2325 graph_entry_(graph_entry),
2326 predecessor_(nullptr),
2327 catch_handler_types_(Array::ZoneHandle(handler_types.ptr())),
2328 catch_try_index_(catch_try_index),
2329 exception_var_(exception_var),
2330 stacktrace_var_(stacktrace_var),
2331 raw_exception_var_(raw_exception_var),
2332 raw_stacktrace_var_(raw_stacktrace_var),
2333 needs_stacktrace_(needs_stacktrace),
2334 is_generated_(is_generated) {}
2335
2336 DECLARE_INSTRUCTION(CatchBlockEntry)
2337
2338 virtual intptr_t PredecessorCount() const {
2339 return (predecessor_ == nullptr) ? 0 : 1;
2340 }
2341 virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
2342 ASSERT((index == 0) && (predecessor_ != nullptr));
2343 return predecessor_;
2344 }
2345
2346 GraphEntryInstr* graph_entry() const { return graph_entry_; }
2347
2348 const LocalVariable* exception_var() const { return exception_var_; }
2349 const LocalVariable* stacktrace_var() const { return stacktrace_var_; }
2350
2351 const LocalVariable* raw_exception_var() const { return raw_exception_var_; }
2352 const LocalVariable* raw_stacktrace_var() const {
2353 return raw_stacktrace_var_;
2354 }
2355
2356 bool needs_stacktrace() const { return needs_stacktrace_; }
2357
2358 bool is_generated() const { return is_generated_; }
2359
2360 // Returns try index for the try block to which this catch handler
2361 // corresponds.
2362 intptr_t catch_try_index() const { return catch_try_index_; }
2363
2364 const Array& catch_handler_types() const { return catch_handler_types_; }
2365
2367 DECLARE_CUSTOM_SERIALIZATION(CatchBlockEntryInstr)
2368
2369 private:
2370 friend class BlockEntryInstr; // Access to predecessor_ when inlining.
2371
2372 virtual void ClearPredecessors() { predecessor_ = nullptr; }
2373 virtual void AddPredecessor(BlockEntryInstr* predecessor) {
2374 ASSERT(predecessor_ == nullptr);
2375 predecessor_ = predecessor;
2376 }
2377
2378 GraphEntryInstr* graph_entry_;
2379 BlockEntryInstr* predecessor_;
2380 const Array& catch_handler_types_;
2381 const intptr_t catch_try_index_;
2382 const LocalVariable* exception_var_;
2383 const LocalVariable* stacktrace_var_;
2384 const LocalVariable* raw_exception_var_;
2385 const LocalVariable* raw_stacktrace_var_;
2386 const bool needs_stacktrace_;
2387 bool is_generated_;
2388
2389 DISALLOW_COPY_AND_ASSIGN(CatchBlockEntryInstr);
2390};
2391
2392// If the result of the allocation is not stored into any field, passed
2393// as an argument or used in a phi then it can't alias with any other
2394// SSA value.
2395class AliasIdentity : public ValueObject {
2396 public:
2397 // It is unknown if value has aliases.
2398 static AliasIdentity Unknown() { return AliasIdentity(kUnknown); }
2399
2400 // It is known that value can have aliases.
2401 static AliasIdentity Aliased() { return AliasIdentity(kAliased); }
2402
2403 // It is known that value has no aliases.
2404 static AliasIdentity NotAliased() { return AliasIdentity(kNotAliased); }
2405
2406 // It is known that value has no aliases and it was selected by
2407 // allocation sinking pass as a candidate.
2408 static AliasIdentity AllocationSinkingCandidate() {
2409 return AliasIdentity(kAllocationSinkingCandidate);
2410 }
2411
2412#define FOR_EACH_ALIAS_IDENTITY_VALUE(V) \
2413 V(Unknown, 0) \
2414 V(NotAliased, 1) \
2415 V(Aliased, 2) \
2416 V(AllocationSinkingCandidate, 3)
2417
2418 const char* ToCString() {
2419 switch (value_) {
2420#define VALUE_CASE(name, val) \
2421 case k##name: \
2422 return #name;
2424#undef VALUE_CASE
2425 default:
2426 UNREACHABLE();
2427 return nullptr;
2428 }
2429 }
2430
2431 bool IsUnknown() const { return value_ == kUnknown; }
2432 bool IsAliased() const { return value_ == kAliased; }
2433 bool IsNotAliased() const { return (value_ & kNotAliased) != 0; }
2434 bool IsAllocationSinkingCandidate() const {
2435 return value_ == kAllocationSinkingCandidate;
2436 }
2437
2438 AliasIdentity(const AliasIdentity& other)
2439 : ValueObject(), value_(other.value_) {}
2440
2441 AliasIdentity& operator=(const AliasIdentity& other) {
2442 value_ = other.value_;
2443 return *this;
2444 }
2445
2446 void Write(FlowGraphSerializer* s) const;
2447 explicit AliasIdentity(FlowGraphDeserializer* d);
2448
2449 private:
2450 explicit AliasIdentity(intptr_t value) : value_(value) {}
2451
2452#define VALUE_DEFN(name, val) k##name = val,
2454#undef VALUE_DEFN
2455
2456// Undef the FOR_EACH helper macro, since the enum is private.
2457#undef FOR_EACH_ALIAS_IDENTITY_VALUE
2458
2459 COMPILE_ASSERT((kUnknown & kNotAliased) == 0);
2460 COMPILE_ASSERT((kAliased & kNotAliased) == 0);
2461 COMPILE_ASSERT((kAllocationSinkingCandidate & kNotAliased) != 0);
2462
2463 intptr_t value_;
2464};
2465
2466// Abstract super-class of all instructions that define a value (Bind, Phi).
2467class Definition : public Instruction {
2468 public:
2469 explicit Definition(intptr_t deopt_id = DeoptId::kNone)
2470 : Instruction(deopt_id) {}
2471
2472 explicit Definition(const InstructionSource& source,
2473 intptr_t deopt_id = DeoptId::kNone)
2474 : Instruction(source, deopt_id) {}
2475
2476 // Overridden by definitions that have call counts.
2477 virtual intptr_t CallCount() const { return -1; }
2478
2479 intptr_t temp_index() const { return temp_index_; }
2480 void set_temp_index(intptr_t index) { temp_index_ = index; }
2481 void ClearTempIndex() { temp_index_ = -1; }
2482 bool HasTemp() const { return temp_index_ >= 0; }
2483
2484 intptr_t ssa_temp_index() const { return ssa_temp_index_; }
2485 void set_ssa_temp_index(intptr_t index) {
2486 ASSERT(index >= 0);
2487 ssa_temp_index_ = index;
2488 }
2489 bool HasSSATemp() const { return ssa_temp_index_ >= 0; }
2490 void ClearSSATempIndex() { ssa_temp_index_ = -1; }
2491
2492 intptr_t vreg(intptr_t index) const {
2493 ASSERT((index >= 0) && (index < location_count()));
2494 if (ssa_temp_index_ == -1) return -1;
2495 return ssa_temp_index_ * kMaxLocationCount + index;
2496 }
2497 intptr_t location_count() const { return LocationCount(representation()); }
2498 bool HasPairRepresentation() const { return location_count() == 2; }
2499
2500 // Compile time type of the definition, which may be requested before type
2501 // propagation during graph building.
2502 CompileType* Type() {
2503 if (type_ == nullptr) {
2504 auto type = new CompileType(ComputeType());
2505 type->set_owner(this);
2506 set_type(type);
2507 }
2508 return type_;
2509 }
2510
2511 bool HasType() const { return (type_ != nullptr); }
2512
2513 inline bool IsInt64Definition();
2514
2515 bool IsInt32Definition() {
2516 return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() ||
2517 IsIntConverter();
2518 }
2519
2520 // Compute compile type for this definition. It is safe to use this
2521 // approximation even before type propagator was run (e.g. during graph
2522 // building).
2523 virtual CompileType ComputeType() const {
2524 // TODO(vegorov) use range information to improve type if available.
2525 return CompileType::FromRepresentation(representation());
2526 }
2527
2528 // Update CompileType of the definition. Returns true if the type has changed.
2529 virtual bool RecomputeType() { return false; }
2530
2533
2534 bool UpdateType(CompileType new_type) {
2535 if (type_ == nullptr) {
2536 auto type = new CompileType(new_type);
2537 type->set_owner(this);
2538 set_type(type);
2539 return true;
2540 }
2541
2542 if (type_->IsNone() || !type_->IsEqualTo(&new_type)) {
2543 *type_ = new_type;
2544 return true;
2545 }
2546
2547 return false;
2548 }
2549
2550 bool HasUses() const {
2551 return (input_use_list_ != nullptr) || (env_use_list_ != nullptr);
2552 }
2553 bool HasOnlyUse(Value* use) const;
2554 bool HasOnlyInputUse(Value* use) const;
2555
2556 Value* input_use_list() const { return input_use_list_; }
2557 void set_input_use_list(Value* head) { input_use_list_ = head; }
2558
2559 Value* env_use_list() const { return env_use_list_; }
2560 void set_env_use_list(Value* head) { env_use_list_ = head; }
2561
2562 ValueListIterable input_uses() const {
2563 return ValueListIterable(input_use_list_);
2564 }
2565
2566 void AddInputUse(Value* value) { Value::AddToList(value, &input_use_list_); }
2567 void AddEnvUse(Value* value) { Value::AddToList(value, &env_use_list_); }
2568
2569 // Whether an instruction may create an untagged pointer to memory within
2570 // a GC-movable object. If so, then there must be no GC-triggering
2571 // instructions between the result and its uses.
2572 virtual bool MayCreateUnsafeUntaggedPointer() const {
2573 // To ensure the default is safe, conservatively assume any untagged
2574 // result may be a GC-movable address.
2575 return representation() == kUntagged;
2576 }
2577
2578 // Returns true if the definition can be replaced with a constant without
2579 // changing the behavior of the program.
2580 virtual bool CanReplaceWithConstant() const {
2581 return !MayHaveVisibleEffect() && !CanDeoptimize();
2582 }
2583
2584 virtual bool CanEliminate(const BlockEntryInstr* block) const {
2585 // Basic blocks should not end in a definition, so treat this as replacing
2586 // the definition with a constant (that is then unused).
2587 return CanReplaceWithConstant();
2588 }
2589
2590 // Replace uses of this definition with uses of other definition or value.
2591 // Precondition: use lists must be properly calculated.
2592 // Postcondition: use lists and use values are still valid.
2593 void ReplaceUsesWith(Definition* other);
2594
2595 // Replace this definition with another instruction. Use the provided result
2596 // definition to replace uses of the original definition. If replacing during
2597 // iteration, pass the iterator so that the instruction can be replaced
2598 // without affecting iteration order, otherwise pass a nullptr iterator.
2599 void ReplaceWithResult(Instruction* replacement,
2600 Definition* replacement_for_uses,
2601 ForwardInstructionIterator* iterator);
2602
2603 // Replace this definition and all uses with another definition. If
2604 // replacing during iteration, pass the iterator so that the instruction
2605 // can be replaced without affecting iteration order, otherwise pass a
2606 // nullptr iterator.
2607 void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator);
2608
2609 // A value in the constant propagation lattice.
2610 // - non-constant sentinel
2611 // - a constant (any non-sentinel value)
2612 // - unknown sentinel
2613 Object& constant_value();
2614
2615 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2616
2617 Range* range() const { return range_; }
2618 void set_range(const Range&);
2619
2620 // Definitions can be canonicalized only into definitions to ensure
2621 // this check statically we override base Canonicalize with a Canonicalize
2622 // returning Definition (return type is covariant).
2623 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2624
2625 static constexpr intptr_t kReplacementMarker = -2;
2626
2627 Definition* Replacement() {
2628 if (ssa_temp_index_ == kReplacementMarker) {
2629 return reinterpret_cast<Definition*>(temp_index_);
2630 }
2631 return this;
2632 }
2633
2634 void SetReplacement(Definition* other) {
2635 ASSERT(ssa_temp_index_ >= 0);
2636 ASSERT(WasEliminated());
2637 ssa_temp_index_ = kReplacementMarker;
2638 temp_index_ = reinterpret_cast<intptr_t>(other);
2639 }
2640
2641 virtual AliasIdentity Identity() const { return AliasIdentity::Unknown(); }
2642
2643 virtual void SetIdentity(AliasIdentity identity) { UNREACHABLE(); }
2644
2645 // Find the original definition of [this] by following through any
2646 // redefinition and check instructions.
2647 Definition* OriginalDefinition();
2648
2649 // If this definition is a redefinition (in a broad sense, this includes
2650 // CheckArrayBound and CheckNull instructions) return [Value] corresponding
2651 // to the input which is being redefined.
2652 // Otherwise return [nullptr].
2653 virtual Value* RedefinedValue() const;
2654
2655 // Find the original definition of [this].
2656 //
2657 // This is an extension of [OriginalDefinition] which also follows through any
2658 // boxing/unboxing and constraint instructions.
2659 Definition* OriginalDefinitionIgnoreBoxingAndConstraints();
2660
2661 // Helper method to determine if definition denotes an array length.
2662 static bool IsArrayLength(Definition* def);
2663
2664 virtual Definition* AsDefinition() { return this; }
2665 virtual const Definition* AsDefinition() const { return this; }
2666
2668
2669 protected:
2670 friend class RangeAnalysis;
2671 friend class Value;
2672
2673 Range* range_ = nullptr;
2674
2675 void set_type(CompileType* type) {
2676 ASSERT(type->owner() == this);
2677 type_ = type;
2678 }
2679
2680#if defined(INCLUDE_IL_PRINTER)
2681 const char* TypeAsCString() const {
2682 return HasType() ? type_->ToCString() : "";
2683 }
2684#endif
2685
2686 private:
2687 intptr_t temp_index_ = -1;
2688 intptr_t ssa_temp_index_ = -1;
2689 Value* input_use_list_ = nullptr;
2690 Value* env_use_list_ = nullptr;
2691
2692 Object* constant_value_ = nullptr;
2693 CompileType* type_ = nullptr;
2694
2695 DISALLOW_COPY_AND_ASSIGN(Definition);
2696};
2697
2698// Change a value's definition after use lists have been computed.
2699inline void Value::BindTo(Definition* def) {
2700 RemoveFromUseList();
2701 set_definition(def);
2702 def->AddInputUse(this);
2703}
2704
2705inline void Value::BindToEnvironment(Definition* def) {
2706 RemoveFromUseList();
2707 set_definition(def);
2708 def->AddEnvUse(this);
2709}
2710
2711class PureDefinition : public Definition {
2712 public:
2713 explicit PureDefinition(intptr_t deopt_id) : Definition(deopt_id) {}
2714 explicit PureDefinition(const InstructionSource& source, intptr_t deopt_id)
2715 : Definition(source, deopt_id) {}
2716
2717 virtual bool AllowsCSE() const { return true; }
2718 virtual bool HasUnknownSideEffects() const { return false; }
2719
2720 DECLARE_EMPTY_SERIALIZATION(PureDefinition, Definition)
2721};
2722
2723template <intptr_t N,
2724 typename ThrowsTrait,
2725 template <typename Impure, typename Pure> class CSETrait = NoCSE>
2726class TemplateDefinition : public CSETrait<Definition, PureDefinition>::Base {
2727 public:
2728 using BaseClass = typename CSETrait<Definition, PureDefinition>::Base;
2729
2730 explicit TemplateDefinition(intptr_t deopt_id = DeoptId::kNone)
2731 : BaseClass(deopt_id), inputs_() {}
2732 TemplateDefinition(const InstructionSource& source,
2733 intptr_t deopt_id = DeoptId::kNone)
2734 : BaseClass(source, deopt_id), inputs_() {}
2735
2736 virtual intptr_t InputCount() const { return N; }
2737 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
2738
2739 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
2740
2741 DECLARE_EMPTY_SERIALIZATION(TemplateDefinition, BaseClass)
2742 protected:
2743 EmbeddedArray<Value*, N> inputs_;
2744
2745 private:
2746 friend class BranchInstr;
2747 friend class IfThenElseInstr;
2748
2749 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2750};
2751
2752class VariadicDefinition : public Definition {
2753 public:
2754 explicit VariadicDefinition(InputsArray&& inputs,
2755 intptr_t deopt_id = DeoptId::kNone)
2756 : Definition(deopt_id), inputs_(std::move(inputs)) {
2757 for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) {
2758 SetInputAt(i, inputs_[i]);
2759 }
2760 }
2761 VariadicDefinition(InputsArray&& inputs,
2762 const InstructionSource& source,
2763 intptr_t deopt_id = DeoptId::kNone)
2764 : Definition(source, deopt_id), inputs_(std::move(inputs)) {
2765 for (intptr_t i = 0, n = inputs_.length(); i < n; ++i) {
2766 SetInputAt(i, inputs_[i]);
2767 }
2768 }
2769 explicit VariadicDefinition(const intptr_t num_inputs,
2770 intptr_t deopt_id = DeoptId::kNone)
2771 : Definition(deopt_id), inputs_(num_inputs) {
2772 inputs_.EnsureLength(num_inputs, nullptr);
2773 }
2774
2775 intptr_t InputCount() const { return inputs_.length(); }
2776 Value* InputAt(intptr_t i) const { return inputs_[i]; }
2777
2778 DECLARE_CUSTOM_SERIALIZATION(VariadicDefinition)
2779
2780 protected:
2781 InputsArray inputs_;
2782
2783 private:
2784 void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
2785};
2786
2787class PhiInstr : public VariadicDefinition {
2788 public:
2789 PhiInstr(JoinEntryInstr* block, intptr_t num_inputs)
2790 : VariadicDefinition(num_inputs),
2791 block_(block),
2792 representation_(kTagged),
2793 is_alive_(false),
2794 is_receiver_(kUnknownReceiver) {}
2795
2796 // Get the block entry for that instruction.
2797 virtual BlockEntryInstr* GetBlock() { return block(); }
2798 JoinEntryInstr* block() const { return block_; }
2799
2800 virtual CompileType ComputeType() const;
2801 virtual bool RecomputeType();
2802
2803 virtual bool ComputeCanDeoptimize() const { return false; }
2804
2805 virtual bool HasUnknownSideEffects() const { return false; }
2806
2807 // Phi is alive if it reaches a non-environment use.
2808 bool is_alive() const { return is_alive_; }
2809 void mark_alive() { is_alive_ = true; }
2810 void mark_dead() { is_alive_ = false; }
2811
2812 virtual Representation RequiredInputRepresentation(intptr_t i) const {
2813 return representation_;
2814 }
2815
2816 virtual Representation representation() const { return representation_; }
2817
2818 virtual bool MayCreateUnsafeUntaggedPointer() const {
2819 // Unsafe untagged pointers should never escape the basic block in which
2820 // they are defined, so they should never be the input to a Phi node.
2821 // (This is checked in the FlowGraphChecker.)
2822 return false;
2823 }
2824
2825 virtual void set_representation(Representation r) { representation_ = r; }
2826
2827 // Only Int32 phis in JIT mode are unboxed optimistically.
2828 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
2829 return (CompilerState::Current().is_aot() ||
2830 (representation_ != kUnboxedInt32))
2831 ? kNotSpeculative
2832 : kGuardInputs;
2833 }
2834
2835 virtual uword Hash() const {
2836 UNREACHABLE();
2837 return 0;
2838 }
2839
2841
2842 virtual void InferRange(RangeAnalysis* analysis, Range* range);
2843
2844 BitVector* reaching_defs() const { return reaching_defs_; }
2845
2846 void set_reaching_defs(BitVector* reaching_defs) {
2847 reaching_defs_ = reaching_defs;
2848 }
2849
2850 virtual bool MayThrow() const { return false; }
2851
2852 // A phi is redundant if all input operands are the same.
2853 bool IsRedundant() const;
2854
2855 // A phi is redundant if all input operands are redefinitions of the same
2856 // value. Returns the replacement for this phi if it is redundant.
2857 // The replacement is selected among values redefined by inputs.
2858 Definition* GetReplacementForRedundantPhi() const;
2859
2860 virtual Definition* Canonicalize(FlowGraph* flow_graph);
2861
2864
2865 enum ReceiverType { kUnknownReceiver = -1, kNotReceiver = 0, kReceiver = 1 };
2866
2867 ReceiverType is_receiver() const {
2868 return static_cast<ReceiverType>(is_receiver_);
2869 }
2870
2871 void set_is_receiver(ReceiverType is_receiver) { is_receiver_ = is_receiver; }
2872
2873 private:
2874 // Direct access to inputs_ in order to resize it due to unreachable
2875 // predecessors.
2876 friend class ConstantPropagator;
2877
2878 JoinEntryInstr* block_;
2879 Representation representation_;
2880 BitVector* reaching_defs_ = nullptr;
2881 bool is_alive_;
2882 int8_t is_receiver_;
2883
2884 DISALLOW_COPY_AND_ASSIGN(PhiInstr);
2885};
2886
2887// This instruction represents an incoming parameter for a function entry,
2888// or incoming value for OSR entry or incoming value for a catch entry.
2889//
2890// [env_index] is a position of the parameter in the flow graph environment.
2891//
2892// [param_index] is a position of the function parameter, or
2893// kNotFunctionParameter if this instruction doesn't correspond to a real
2894// function parameter.
2895//
2896// [loc] specifies where where the incomming value is located on entry to
2897// the block. Note: for compound values (e.g. unboxed integers on 32-bit
2898// values) this will be a Pair location.
2899class ParameterInstr : public TemplateDefinition<0, NoThrow> {
2900 public:
2901 // [param_index] when ParameterInstr doesn't correspond to
2902 // a function parameter.
2903 static constexpr intptr_t kNotFunctionParameter = -1;
2904
2905 ParameterInstr(BlockEntryInstr* block,
2906 intptr_t env_index,
2907 intptr_t param_index,
2908 const Location& loc,
2909 Representation representation)
2910 : env_index_(env_index),
2911 param_index_(param_index),
2912 representation_(representation),
2913 block_(block),
2914 location_(loc) {}
2915
2916 DECLARE_INSTRUCTION(Parameter)
2917 DECLARE_ATTRIBUTES_NAMED(("index", "location"), (index(), location()))
2918
2919 // Index of the parameter in the flow graph environment.
2920 intptr_t env_index() const { return env_index_; }
2921 intptr_t index() const { return env_index(); }
2922
2923 // Index of the real function parameter
2924 // (between 0 and function.NumParameters()), or -1.
2925 intptr_t param_index() const { return param_index_; }
2926
2927 const Location& location() const { return location_; }
2928
2929 // Get the block entry for that instruction.
2930 virtual BlockEntryInstr* GetBlock() { return block_; }
2931 void set_block(BlockEntryInstr* block) { block_ = block; }
2932
2933 virtual Representation representation() const { return representation_; }
2934
2935 virtual Representation RequiredInputRepresentation(intptr_t index) const {
2936 UNREACHABLE();
2937 return kTagged;
2938 }
2939
2940 virtual bool ComputeCanDeoptimize() const { return false; }
2941
2942 virtual bool HasUnknownSideEffects() const { return false; }
2943
2944 virtual uword Hash() const {
2945 UNREACHABLE();
2946 return 0;
2947 }
2948
2949 virtual CompileType ComputeType() const;
2950
2952
2953#define FIELD_LIST(F) \
2954 F(const intptr_t, env_index_) \
2955 F(const intptr_t, param_index_) \
2956 F(const Representation, representation_)
2957
2959 TemplateDefinition,
2960 FIELD_LIST)
2962#undef FIELD_LIST
2963
2964 private:
2965 BlockEntryInstr* block_ = nullptr;
2966 Location location_;
2967
2968 DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
2969};
2970
2971// Native parameters are not treated as initial definitions because they cannot
2972// be inlined and are only usable in optimized code. The location must be a
2973// stack location relative to the position of the stack (SPREG) after
2974// register-based arguments have been saved on entry to a native call. See
2975// NativeEntryInstr::EmitNativeCode for more details.
2976//
2977// TOOD(33549): Unify with ParameterInstr.
2978class NativeParameterInstr : public TemplateDefinition<0, NoThrow> {
2979 public:
2980 NativeParameterInstr(const compiler::ffi::CallbackMarshaller& marshaller,
2981 intptr_t def_index)
2982 : marshaller_(marshaller), def_index_(def_index) {}
2983
2984 DECLARE_INSTRUCTION(NativeParameter)
2985
2986 virtual Representation representation() const {
2987 return marshaller_.RepInFfiCall(def_index_);
2988 }
2989
2990 virtual bool MayCreateUnsafeUntaggedPointer() const {
2991 // Untagged values flowing into Dart code via callbacks are external
2992 // pointers that are then converted into Dart objects in the IL.
2993 return false;
2994 }
2995
2996 virtual bool ComputeCanDeoptimize() const { return false; }
2997
2998 virtual bool HasUnknownSideEffects() const { return false; }
2999
3001
3002#define FIELD_LIST(F) \
3003 F(const compiler::ffi::CallbackMarshaller&, marshaller_) \
3004 F(const intptr_t, def_index_)
3005
3006 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeParameterInstr,
3007 TemplateDefinition,
3008 FIELD_LIST)
3009#undef FIELD_LIST
3010
3011 private:
3012 DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr);
3013};
3014
3015// Stores a tagged pointer to a slot accessible from a fixed register. It has
3016// the form:
3017//
3018// base_reg[index + #constant] = value
3019//
3020// Input 0: A tagged Smi [index]
3021// Input 1: A tagged pointer [value]
3022// offset: A signed constant offset which fits into 8 bits
3023//
3024// Currently this instruction uses pinpoints the register to be FP.
3025//
3026// This low-level instruction is non-inlinable since it makes assumptions about
3027// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
3028class StoreIndexedUnsafeInstr : public TemplateInstruction<2, NoThrow> {
3029 public:
3030 StoreIndexedUnsafeInstr(Value* index, Value* value, intptr_t offset)
3031 : offset_(offset) {
3032 SetInputAt(kIndexPos, index);
3033 SetInputAt(kValuePos, value);
3034 }
3035
3036 enum { kIndexPos = 0, kValuePos = 1 };
3037
3038 DECLARE_INSTRUCTION(StoreIndexedUnsafe)
3039
3040 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3041 ASSERT(index == kIndexPos || index == kValuePos);
3042 return kTagged;
3043 }
3044 virtual bool ComputeCanDeoptimize() const { return false; }
3045 virtual bool HasUnknownSideEffects() const { return false; }
3046 virtual bool MayHaveVisibleEffect() const { return true; }
3047
3048 virtual bool AttributesEqual(const Instruction& other) const {
3049 return other.AsStoreIndexedUnsafe()->offset() == offset();
3050 }
3051
3052 Value* index() const { return inputs_[kIndexPos]; }
3053 Value* value() const { return inputs_[kValuePos]; }
3054 Register base_reg() const { return FPREG; }
3055 intptr_t offset() const { return offset_; }
3056
3058
3059#define FIELD_LIST(F) F(const intptr_t, offset_)
3060
3061 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreIndexedUnsafeInstr,
3062 TemplateInstruction,
3063 FIELD_LIST)
3064#undef FIELD_LIST
3065
3066 private:
3067 DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr);
3068};
3069
3070// Loads a value from slot accessable from a fixed register. It has
3071// the form:
3072//
3073// base_reg[index + #constant]
3074//
3075// Input 0: A tagged Smi [index]
3076// offset: A signed constant offset which fits into 8 bits
3077//
3078// Currently this instruction uses pinpoints the register to be FP.
3079//
3080// This lowlevel instruction is non-inlinable since it makes assumptions about
3081// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
3082class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
3083 public:
3084 LoadIndexedUnsafeInstr(Value* index,
3085 intptr_t offset,
3086 CompileType result_type,
3087 Representation representation = kTagged)
3088 : offset_(offset), representation_(representation) {
3089 UpdateType(result_type);
3090 SetInputAt(0, index);
3091 }
3092
3093 DECLARE_INSTRUCTION(LoadIndexedUnsafe)
3094
3095 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3096 ASSERT(index == 0);
3097 return kTagged;
3098 }
3099 virtual bool ComputeCanDeoptimize() const { return false; }
3100 virtual bool HasUnknownSideEffects() const { return false; }
3101
3102 virtual bool AttributesEqual(const Instruction& other) const {
3103 return other.AsLoadIndexedUnsafe()->offset() == offset();
3104 }
3105
3106 virtual Representation representation() const { return representation_; }
3107
3108 Value* index() const { return InputAt(0); }
3109 Register base_reg() const { return FPREG; }
3110 intptr_t offset() const { return offset_; }
3111
3113
3114#define FIELD_LIST(F) \
3115 F(const intptr_t, offset_) \
3116 F(const Representation, representation_)
3117
3118 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadIndexedUnsafeInstr,
3119 TemplateDefinition,
3120 FIELD_LIST)
3121#undef FIELD_LIST
3122
3123 private:
3124 DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
3125};
3126
3127class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
3128 public:
3129 MemoryCopyInstr(Value* src,
3130 classid_t src_cid,
3131 Value* dest,
3132 classid_t dest_cid,
3133 Value* src_start,
3134 Value* dest_start,
3135 Value* length,
3136 bool unboxed_inputs,
3137 bool can_overlap = true)
3138 : src_cid_(src_cid),
3139 dest_cid_(dest_cid),
3140 element_size_(Instance::ElementSizeFor(src_cid)),
3141 unboxed_inputs_(unboxed_inputs),
3142 can_overlap_(can_overlap) {
3143 ASSERT(IsArrayTypeSupported(src_cid));
3144 ASSERT(IsArrayTypeSupported(dest_cid));
3145 ASSERT_EQUAL(Instance::ElementSizeFor(src_cid),
3146 Instance::ElementSizeFor(dest_cid));
3147 SetInputAt(kSrcPos, src);
3148 SetInputAt(kDestPos, dest);
3149 SetInputAt(kSrcStartPos, src_start);
3150 SetInputAt(kDestStartPos, dest_start);
3151 SetInputAt(kLengthPos, length);
3152 }
3153
3154 enum {
3155 kSrcPos = 0,
3156 kDestPos = 1,
3157 kSrcStartPos = 2,
3158 kDestStartPos = 3,
3159 kLengthPos = 4
3160 };
3161
3162 DECLARE_INSTRUCTION(MemoryCopy)
3163
3164 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3165 if (index == kSrcPos || index == kDestPos) {
3166 // Can be either tagged or untagged.
3167 return kNoRepresentation;
3168 }
3169 ASSERT(index <= kLengthPos);
3170 return unboxed_inputs() ? kUnboxedIntPtr : kTagged;
3171 }
3172
3173 virtual bool ComputeCanDeoptimize() const { return false; }
3174 virtual bool HasUnknownSideEffects() const { return true; }
3175
3176 virtual bool AttributesEqual(const Instruction& other) const {
3177 if (auto* const copy = other.AsMemoryCopy()) {
3178 if (element_size_ != copy->element_size_) return false;
3179 if (unboxed_inputs_ != copy->unboxed_inputs_) return false;
3180 if (can_overlap_ != copy->can_overlap_) return false;
3181 if (src_cid_ != copy->src_cid_) return false;
3182 if (dest_cid_ != copy->dest_cid_) return false;
3183 return true;
3184 }
3185 return false;
3186 }
3187
3188 Value* src() const { return inputs_[kSrcPos]; }
3189 Value* dest() const { return inputs_[kDestPos]; }
3190 Value* src_start() const { return inputs_[kSrcStartPos]; }
3191 Value* dest_start() const { return inputs_[kDestStartPos]; }
3192 Value* length() const { return inputs_[kLengthPos]; }
3193
3194 classid_t src_cid() const { return src_cid_; }
3195 classid_t dest_cid() const { return dest_cid_; }
3196 intptr_t element_size() const { return element_size_; }
3197 bool unboxed_inputs() const { return unboxed_inputs_; }
3198 bool can_overlap() const { return can_overlap_; }
3199
3200 // Optimizes MemoryCopyInstr with constant parameters to use larger moves.
3201 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
3202
3204
3206
3207#define FIELD_LIST(F) \
3208 F(const classid_t, src_cid_) \
3209 F(const classid_t, dest_cid_) \
3210 F(intptr_t, element_size_) \
3211 F(bool, unboxed_inputs_) \
3212 F(const bool, can_overlap_)
3213
3215 TemplateInstruction,
3216 FIELD_LIST)
3217#undef FIELD_LIST
3218
3219 private:
3220 // Set payload_reg to point to the index indicated by start (contained in
3221 // start_loc) of the typed data or string in array (contained in array_reg).
3222 // If array_rep is tagged, then the payload address is retrieved according
3223 // to array_cid, otherwise the register is assumed to already have the
3224 // payload address.
3225 void EmitComputeStartPointer(FlowGraphCompiler* compiler,
3226 classid_t array_cid,
3227 Register array_reg,
3228 Register payload_reg,
3229 Representation array_rep,
3230 Location start_loc);
3231
3232 // Generates an unrolled loop for copying a known amount of data from
3233 // src to dest.
3234 void EmitUnrolledCopy(FlowGraphCompiler* compiler,
3235 Register dest_reg,
3236 Register src_reg,
3237 intptr_t num_elements,
3238 bool reversed);
3239
3240 // Called prior to EmitLoopCopy() to adjust the length register as needed
3241 // for the code emitted by EmitLoopCopy. May jump to done if the emitted
3242 // loop(s) should be skipped.
3243 void PrepareLengthRegForLoop(FlowGraphCompiler* compiler,
3244 Register length_reg,
3245 compiler::Label* done);
3246
3247 // Generates a loop for copying the data from src to dest, for cases where
3248 // either the length is not known at compile time or too large to unroll.
3249 //
3250 // copy_forwards is only provided (not nullptr) when a backwards loop is
3251 // requested. May jump to copy_forwards if backwards iteration is slower than
3252 // forwards iteration and the emitted code verifies no actual overlap exists.
3253 //
3254 // May jump to done if no copying is needed.
3255 //
3256 // Assumes that PrepareLengthRegForLoop() has been called beforehand.
3257 void EmitLoopCopy(FlowGraphCompiler* compiler,
3258 Register dest_reg,
3259 Register src_reg,
3260 Register length_reg,
3261 compiler::Label* done,
3262 compiler::Label* copy_forwards = nullptr);
3263
3264 static bool IsArrayTypeSupported(classid_t array_cid) {
3265 // We don't handle clamping negative values in this instruction, instead
3266 // those are handled via a native call.
3267 if (IsClampedTypedDataBaseClassId(array_cid)) return false;
3268 // We don't support the following cids for the given reasons:
3269 // * kStringCid: doesn't give element size information or information
3270 // about how the payload address is calculated.
3271 // * kPointerCid: doesn't give element size or signedness information.
3272 if (array_cid == kPointerCid || array_cid == kStringCid) return false;
3273 return IsTypedDataBaseClassId(array_cid) || IsStringClassId(array_cid);
3274 }
3275
3276 DISALLOW_COPY_AND_ASSIGN(MemoryCopyInstr);
3277};
3278
3279// Unwinds the current frame and tail calls a target.
3280//
3281// The return address saved by the original caller of this frame will be in it's
3282// usual location (stack or LR). The arguments descriptor supplied by the
3283// original caller will be put into ARGS_DESC_REG.
3284//
3285// This lowlevel instruction is non-inlinable since it makes assumptions about
3286// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
3287class TailCallInstr : public TemplateInstruction<1, Throws, Pure> {
3288 public:
3289 TailCallInstr(const Code& code, Value* arg_desc) : code_(code) {
3290 SetInputAt(0, arg_desc);
3291 }
3292
3293 DECLARE_INSTRUCTION(TailCall)
3294
3295 const Code& code() const { return code_; }
3296
3297 // Two tailcalls can be canonicalized into one instruction if both have the
3298 // same destination.
3299 virtual bool AttributesEqual(const Instruction& other) const {
3300 return &other.AsTailCall()->code() == &code();
3301 }
3302
3303 // Since no code after this instruction will be executed, there will be no
3304 // side-effects for the following code.
3305 virtual bool HasUnknownSideEffects() const { return false; }
3306 virtual bool ComputeCanDeoptimize() const { return false; }
3307
3309
3310#define FIELD_LIST(F) F(const Code&, code_)
3311
3313 TemplateInstruction,
3314 FIELD_LIST)
3315#undef FIELD_LIST
3316
3317 private:
3318 DISALLOW_COPY_AND_ASSIGN(TailCallInstr);
3319};
3320
3321// Move the given argument value into the place where callee expects it.
3322//
3323// [location] is expected to either be an SP relative stack slot or a
3324// machine register.
3325//
3326// On 32-bit targets [location] might also be a pair of stack slots or a
3327// pair of machine registers.
3328class MoveArgumentInstr : public TemplateDefinition<1, NoThrow> {
3329 public:
3330 explicit MoveArgumentInstr(Value* value,
3331 Representation representation,
3332 Location location)
3333 : representation_(representation),
3334 is_register_move_(IsRegisterMove(location)),
3335 location_(location) {
3336 ASSERT(IsSupportedLocation(location));
3337 SetInputAt(0, value);
3338 }
3339
3340 DECLARE_INSTRUCTION(MoveArgument)
3341
3342 bool is_register_move() const { return is_register_move_; }
3343
3344 // For stack locations returns the SP relative index corresponding
3345 // to the first slot allocated for the argument.
3346 intptr_t sp_relative_index() const {
3347 ASSERT(!is_register_move());
3348 Location loc = location();
3349 if (loc.IsPairLocation()) {
3350 loc = loc.AsPairLocation()->At(0);
3351 }
3352 return loc.stack_index();
3353 }
3354
3355 Location location() const { return location_; }
3356 Location* location_slot() { return &location_; }
3357
3358 Value* value() const { return InputAt(0); }
3359
3360 virtual bool ComputeCanDeoptimize() const { return false; }
3361
3362 virtual bool HasUnknownSideEffects() const { return false; }
3363
3364 virtual TokenPosition token_pos() const {
3365 return TokenPosition::kMoveArgument;
3366 }
3367
3368 virtual Representation representation() const { return representation_; }
3369
3370 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3371 ASSERT(index == 0);
3372 return representation();
3373 }
3374
3376
3377#define FIELD_LIST(F) \
3378 F(const Representation, representation_) \
3379 F(const bool, is_register_move_)
3380
3382 TemplateDefinition,
3383 FIELD_LIST)
3384#undef FIELD_LIST
3385
3387
3388 private:
3389 static bool IsSupportedLocation(Location loc, bool can_be_fpu_value = true) {
3390#if defined(TARGET_ARCH_IS_32_BIT)
3391 if (loc.IsPairLocation()) {
3392 auto pair_loc = loc.AsPairLocation();
3393 return IsSupportedLocation(pair_loc->At(0), /*can_be_fpu_value=*/false) &&
3394 IsSupportedLocation(pair_loc->At(1), /*can_be_fpu_value=*/false);
3395 }
3396#endif
3397 if (loc.IsStackSlot() || (can_be_fpu_value && loc.IsDoubleStackSlot())) {
3398 return loc.base_reg() == SPREG;
3399 } else if (loc.IsRegister() || (can_be_fpu_value && loc.IsFpuRegister())) {
3400 return true;
3401 }
3402 return false;
3403 }
3404
3405 static bool IsRegisterMove(Location loc) {
3406 return loc.IsMachineRegister() ||
3407 (loc.IsPairLocation() &&
3408 loc.AsPairLocation()->At(0).IsMachineRegister());
3409 }
3410
3411 Location location_;
3412
3413 DISALLOW_COPY_AND_ASSIGN(MoveArgumentInstr);
3414};
3415
3416inline Value* Instruction::ArgumentValueAt(intptr_t index) const {
3417 MoveArgumentsArray* move_arguments = GetMoveArguments();
3418 return move_arguments != nullptr ? (*move_arguments)[index]->value()
3419 : InputAt(index);
3420}
3421
3422inline Definition* Instruction::ArgumentAt(intptr_t index) const {
3423 return ArgumentValueAt(index)->definition();
3424}
3425
3426class ReturnBaseInstr : public Instruction {
3427 public:
3428 explicit ReturnBaseInstr(const InstructionSource& source,
3429 intptr_t deopt_id = DeoptId::kNone)
3430 : Instruction(source, deopt_id) {}
3431
3432 ReturnBaseInstr() : Instruction(DeoptId::kNone) {}
3433
3434 virtual bool ComputeCanDeoptimize() const { return false; }
3435
3436 virtual bool HasUnknownSideEffects() const { return false; }
3437
3438 virtual bool MayThrow() const { return false; }
3439
3440 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
3441 return kNotSpeculative;
3442 }
3443
3445
3446 DECLARE_EMPTY_SERIALIZATION(ReturnBaseInstr, Instruction)
3447
3448 private:
3449 DISALLOW_COPY_AND_ASSIGN(ReturnBaseInstr);
3450};
3451
3452class DartReturnInstr : public ReturnBaseInstr {
3453 public:
3454 DartReturnInstr(const InstructionSource& source,
3455 Value* value,
3456 intptr_t deopt_id,
3457 Representation representation = kTagged)
3458 : ReturnBaseInstr(source, deopt_id),
3459 token_pos_(source.token_pos),
3460 representation_(representation) {
3461 SetInputAt(0, value);
3462 }
3463
3464 DECLARE_INSTRUCTION(DartReturn)
3465
3466 virtual TokenPosition token_pos() const { return token_pos_; }
3467 Value* value() const { return inputs_[0]; }
3468
3469 virtual bool CanBecomeDeoptimizationTarget() const {
3470 // Return instruction might turn into a Goto instruction after inlining.
3471 // Every Goto must have an environment.
3472 return true;
3473 }
3474
3475 virtual bool AttributesEqual(const Instruction& other) const {
3476 auto const other_return = other.AsDartReturn();
3477 return token_pos() == other_return->token_pos();
3478 }
3479
3480 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
3481
3482 virtual Representation representation() const { return representation_; }
3483
3484 virtual Representation RequiredInputRepresentation(intptr_t index) const {
3485 ASSERT(index == 0);
3486 return representation_;
3487 }
3488
3489 virtual intptr_t InputCount() const { return 1; }
3490
3491 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
3492
3493#define FIELD_LIST(F) \
3494 F(const TokenPosition, token_pos_) \
3495 F(const Representation, representation_)
3496
3498 ReturnBaseInstr,
3499 FIELD_LIST)
3500#undef FIELD_LIST
3501
3502 protected:
3503 EmbeddedArray<Value*, 1> inputs_;
3504
3505 private:
3506 const Code& GetReturnStub(FlowGraphCompiler* compiler) const;
3507
3508 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
3509
3510 DISALLOW_COPY_AND_ASSIGN(DartReturnInstr);
3511};
3512
3513// Represents a return from a Dart function into native code.
3514class NativeReturnInstr : public ReturnBaseInstr {
3515 public:
3516 NativeReturnInstr(Value* value,
3517 const compiler::ffi::CallbackMarshaller& marshaller)
3518 : ReturnBaseInstr(), marshaller_(marshaller) {
3519 SetInputAt(0, value);
3520 inputs_[1] = nullptr;
3521 }
3522
3523 NativeReturnInstr(Value* typed_data_base,
3524 Value* offset,
3525 const compiler::ffi::CallbackMarshaller& marshaller)
3526 : ReturnBaseInstr(), marshaller_(marshaller) {
3527 SetInputAt(0, typed_data_base);
3528 SetInputAt(1, offset);
3529 }
3530
3531 DECLARE_INSTRUCTION(NativeReturn)
3532
3534
3535 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3536 if (idx == 0) {
3537 return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex);
3538 } else {
3539 ASSERT_EQUAL(idx, 1);
3540 ASSERT_EQUAL(InputCount(), 2);
3541 // Offset in bytes for compounds.
3542 return kUnboxedWord;
3543 }
3544 }
3545
3546 virtual bool CanBecomeDeoptimizationTarget() const {
3547 // Unlike DartReturnInstr, NativeReturnInstr cannot be inlined (because it's
3548 // returning into native code).
3549 return false;
3550 }
3551
3552 virtual intptr_t InputCount() const {
3553 return marshaller_.NumReturnDefinitions();
3554 }
3555
3556 virtual bool AttributesEqual(const Instruction& other) const {
3557 auto const other_return = other.AsNativeReturn();
3558 return token_pos() == other_return->token_pos();
3559 }
3560
3561 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
3562
3563#define FIELD_LIST(F) F(const compiler::ffi::CallbackMarshaller&, marshaller_)
3564
3566 ReturnBaseInstr,
3567 FIELD_LIST)
3568#undef FIELD_LIST
3569
3570 protected:
3571 EmbeddedArray<Value*, 2> inputs_;
3572
3573 private:
3574 void EmitReturnMoves(FlowGraphCompiler* compiler);
3575
3576 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
3577
3578 DISALLOW_COPY_AND_ASSIGN(NativeReturnInstr);
3579};
3580
3581class ThrowInstr : public TemplateInstruction<1, Throws> {
3582 public:
3583 explicit ThrowInstr(const InstructionSource& source,
3584 intptr_t deopt_id,
3585 Value* exception)
3586 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
3587 SetInputAt(0, exception);
3588 }
3589
3590 DECLARE_INSTRUCTION(Throw)
3591
3592 virtual TokenPosition token_pos() const { return token_pos_; }
3593 Value* exception() const { return inputs_[0]; }
3594
3595 virtual bool ComputeCanDeoptimize() const { return false; }
3596 virtual bool ComputeCanDeoptimizeAfterCall() const {
3597 return !CompilerState::Current().is_aot();
3598 }
3599
3600 virtual bool HasUnknownSideEffects() const { return false; }
3601
3602#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
3603
3605 TemplateInstruction,
3606 FIELD_LIST)
3607#undef FIELD_LIST
3608
3609 private:
3610 DISALLOW_COPY_AND_ASSIGN(ThrowInstr);
3611};
3612
3613class ReThrowInstr : public TemplateInstruction<2, Throws> {
3614 public:
3615 // 'catch_try_index' can be kInvalidTryIndex if the
3616 // rethrow has been artificially generated by the parser.
3617 ReThrowInstr(const InstructionSource& source,
3618 intptr_t catch_try_index,
3619 intptr_t deopt_id,
3620 Value* exception,
3621 Value* stacktrace)
3622 : TemplateInstruction(source, deopt_id),
3623 token_pos_(source.token_pos),
3624 catch_try_index_(catch_try_index) {
3625 SetInputAt(0, exception);
3626 SetInputAt(1, stacktrace);
3627 }
3628
3629 DECLARE_INSTRUCTION(ReThrow)
3630
3631 virtual TokenPosition token_pos() const { return token_pos_; }
3632 intptr_t catch_try_index() const { return catch_try_index_; }
3633 Value* exception() const { return inputs_[0]; }
3634 Value* stacktrace() const { return inputs_[1]; }
3635
3636 virtual bool ComputeCanDeoptimize() const { return false; }
3637 virtual bool ComputeCanDeoptimizeAfterCall() const {
3638 return !CompilerState::Current().is_aot();
3639 }
3640
3641 virtual bool HasUnknownSideEffects() const { return false; }
3642
3643#define FIELD_LIST(F) \
3644 F(const TokenPosition, token_pos_) \
3645 F(const intptr_t, catch_try_index_)
3646
3648 TemplateInstruction,
3649 FIELD_LIST)
3650#undef FIELD_LIST
3651
3652 private:
3653 DISALLOW_COPY_AND_ASSIGN(ReThrowInstr);
3654};
3655
3656class StopInstr : public TemplateInstruction<0, NoThrow> {
3657 public:
3658 explicit StopInstr(const char* message) : message_(message) {
3659 ASSERT(message != nullptr);
3660 }
3661
3662 const char* message() const { return message_; }
3663
3664 DECLARE_INSTRUCTION(Stop);
3665
3666 virtual bool ComputeCanDeoptimize() const { return false; }
3667
3668 virtual bool HasUnknownSideEffects() const { return false; }
3669
3670#define FIELD_LIST(F) F(const char*, message_)
3671
3673 TemplateInstruction,
3674 FIELD_LIST)
3675#undef FIELD_LIST
3676
3677 private:
3678 DISALLOW_COPY_AND_ASSIGN(StopInstr);
3679};
3680
3681class GotoInstr : public TemplateInstruction<0, NoThrow> {
3682 public:
3683 explicit GotoInstr(JoinEntryInstr* entry, intptr_t deopt_id)
3684 : TemplateInstruction(deopt_id),
3685 edge_weight_(0.0),
3686 parallel_move_(nullptr),
3687 successor_(entry) {}
3688
3690
3691 BlockEntryInstr* block() const { return block_; }
3692 void set_block(BlockEntryInstr* block) { block_ = block; }
3693
3694 JoinEntryInstr* successor() const { return successor_; }
3695 void set_successor(JoinEntryInstr* successor) { successor_ = successor; }
3696 virtual intptr_t SuccessorCount() const;
3697 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
3698
3699 double edge_weight() const { return edge_weight_; }
3700 void set_edge_weight(double weight) { edge_weight_ = weight; }
3701 void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
3702
3703 virtual bool CanBecomeDeoptimizationTarget() const {
3704 // Goto instruction can be used as a deoptimization target when LICM
3705 // hoists instructions out of the loop.
3706 return true;
3707 }
3708
3709 // May require a deoptimization target for int32 Phi input conversions.
3710 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
3711
3712 virtual bool ComputeCanDeoptimize() const { return false; }
3713
3714 virtual bool HasUnknownSideEffects() const { return false; }
3715
3716 ParallelMoveInstr* parallel_move() const { return parallel_move_; }
3717
3718 bool HasParallelMove() const { return parallel_move_ != nullptr; }
3719
3720 bool HasNonRedundantParallelMove() const {
3721 return HasParallelMove() && !parallel_move()->IsRedundant();
3722 }
3723
3724 ParallelMoveInstr* GetParallelMove() {
3725 if (parallel_move_ == nullptr) {
3726 parallel_move_ = new ParallelMoveInstr();
3727 }
3728 return parallel_move_;
3729 }
3730
3731 virtual TokenPosition token_pos() const {
3732 return TokenPosition::kControlFlow;
3733 }
3734
3736
3737#define FIELD_LIST(F) \
3738 F(double, edge_weight_) \
3739 /* Parallel move that will be used by linear scan register allocator to */ \
3740 /* connect live ranges at the end of the block and resolve phis. */ \
3741 F(ParallelMoveInstr*, parallel_move_)
3742
3744 TemplateInstruction,
3745 FIELD_LIST)
3746#undef FIELD_LIST
3748
3749 private:
3750 BlockEntryInstr* block_ = nullptr;
3751 JoinEntryInstr* successor_ = nullptr;
3752
3753 DISALLOW_COPY_AND_ASSIGN(GotoInstr);
3754};
3755
3756// IndirectGotoInstr represents a dynamically computed jump. Only
3757// IndirectEntryInstr targets are valid targets of an indirect goto. The
3758// concrete target index to jump to is given as a parameter to the indirect
3759// goto.
3760//
3761// In order to preserve split-edge form, an indirect goto does not itself point
3762// to its targets. Instead, for each possible target, the successors_ field
3763// will contain an ordinary goto instruction that jumps to the target.
3764// TODO(zerny): Implement direct support instead of embedding gotos.
3765//
3766// The input to the [IndirectGotoInstr] is the target index to jump to.
3767// All targets of the [IndirectGotoInstr] are added via [AddSuccessor] and get
3768// increasing indices.
3769//
3770// The FlowGraphCompiler will - as a post-processing step - invoke
3771// [ComputeOffsetTable] of all [IndirectGotoInstr]s. In there we initialize a
3772// TypedDataInt32Array containing offsets of all [IndirectEntryInstr]s (the
3773// offsets are relative to start of the instruction payload).
3774//
3775// => See `FlowGraphCompiler::CompileGraph()`
3776// => See `IndirectGotoInstr::ComputeOffsetTable`
3777class IndirectGotoInstr : public TemplateInstruction<1, NoThrow> {
3778 public:
3779 IndirectGotoInstr(intptr_t target_count, Value* target_index)
3780 : offsets_(TypedData::ZoneHandle(TypedData::New(kTypedDataInt32ArrayCid,
3781 target_count,
3782 Heap::kOld))) {
3783 SetInputAt(0, target_index);
3784 }
3785
3786 DECLARE_INSTRUCTION(IndirectGoto)
3787
3788 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
3789 ASSERT(idx == 0);
3790 return kTagged;
3791 }
3792
3793 void AddSuccessor(TargetEntryInstr* successor) {
3794 ASSERT(successor->next()->IsGoto());
3795 ASSERT(successor->next()->AsGoto()->successor()->IsIndirectEntry());
3796 successors_.Add(successor);
3797 }
3798
3799 virtual intptr_t SuccessorCount() const { return successors_.length(); }
3800 virtual TargetEntryInstr* SuccessorAt(intptr_t index) const {
3801 ASSERT(index < SuccessorCount());
3802 return successors_[index];
3803 }
3804
3805 virtual bool ComputeCanDeoptimize() const { return false; }
3806 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
3807
3808 virtual bool HasUnknownSideEffects() const { return false; }
3809
3810 Value* offset() const { return inputs_[0]; }
3811 void ComputeOffsetTable(FlowGraphCompiler* compiler);
3812
3814
3815 DECLARE_CUSTOM_SERIALIZATION(IndirectGotoInstr)
3817
3818 private:
3819 GrowableArray<TargetEntryInstr*> successors_;
3820 const TypedData& offsets_;
3821
3822 DISALLOW_COPY_AND_ASSIGN(IndirectGotoInstr);
3823};
3824
3825class ComparisonInstr : public Definition {
3826 public:
3827 Value* left() const { return InputAt(0); }
3828 Value* right() const { return InputAt(1); }
3829
3830 virtual TokenPosition token_pos() const { return token_pos_; }
3831 Token::Kind kind() const { return kind_; }
3832 DECLARE_ATTRIBUTE(kind())
3833
3834 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right) = 0;
3835
3836 // Emits instructions to do the comparison and branch to the true or false
3837 // label depending on the result. This implementation will call
3838 // EmitComparisonCode and then generate the branch instructions afterwards.
3839 virtual void EmitBranchCode(FlowGraphCompiler* compiler, BranchInstr* branch);
3840
3841 // Used by EmitBranchCode and EmitNativeCode depending on whether the boolean
3842 // is to be turned into branches or instantiated. May return a valid
3843 // condition in which case the caller is expected to emit a branch to the
3844 // true label based on that condition (or a branch to the false label on the
3845 // opposite condition). May also branch directly to the labels.
3846 virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler,
3847 BranchLabels labels) = 0;
3848
3849 // Emits code that generates 'true' or 'false', depending on the comparison.
3850 // This implementation will call EmitComparisonCode. If EmitComparisonCode
3851 // does not use the labels (merely returning a condition) then EmitNativeCode
3852 // may be able to use the condition to avoid a branch.
3853 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
3854
3855 void SetDeoptId(const Instruction& instr) { CopyDeoptIdFrom(instr); }
3856
3857 // Operation class id is computed from collected ICData.
3858 void set_operation_cid(intptr_t value) { operation_cid_ = value; }
3859 intptr_t operation_cid() const { return operation_cid_; }
3860
3861 virtual void NegateComparison() { kind_ = Token::NegateComparison(kind_); }
3862
3863 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
3864 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
3865
3866 virtual bool AttributesEqual(const Instruction& other) const {
3867 auto const other_comparison = other.AsComparison();
3868 return kind() == other_comparison->kind() &&
3869 (operation_cid() == other_comparison->operation_cid());
3870 }
3871
3872 // Detects comparison with a constant and returns constant and the other
3873 // operand.
3874 bool IsComparisonWithConstant(Value** other_operand,
3875 ConstantInstr** constant_operand) {
3876 if (right()->BindsToConstant(constant_operand)) {
3877 *other_operand = left();
3878 return true;
3879 } else if (left()->BindsToConstant(constant_operand)) {
3880 *other_operand = right();
3881 return true;
3882 } else {
3883 return false;
3884 }
3885 }
3886
3888
3889#define FIELD_LIST(F) \
3890 F(const TokenPosition, token_pos_) \
3891 F(Token::Kind, kind_) \
3892 /* Set by optimizer. */ \
3893 F(intptr_t, operation_cid_)
3894
3896 Definition,
3897 FIELD_LIST)
3898#undef FIELD_LIST
3899
3900 protected:
3901 ComparisonInstr(const InstructionSource& source,
3902 Token::Kind kind,
3903 intptr_t deopt_id = DeoptId::kNone)
3904 : Definition(source, deopt_id),
3905 token_pos_(source.token_pos),
3906 kind_(kind),
3907 operation_cid_(kIllegalCid) {}
3908
3909 private:
3910 DISALLOW_COPY_AND_ASSIGN(ComparisonInstr);
3911};
3912
3913class PureComparison : public ComparisonInstr {
3914 public:
3915 virtual bool AllowsCSE() const { return true; }
3916 virtual bool HasUnknownSideEffects() const { return false; }
3917
3918 DECLARE_EMPTY_SERIALIZATION(PureComparison, ComparisonInstr)
3919 protected:
3920 PureComparison(const InstructionSource& source,
3921 Token::Kind kind,
3922 intptr_t deopt_id)
3923 : ComparisonInstr(source, kind, deopt_id) {}
3924};
3925
3926template <intptr_t N,
3927 typename ThrowsTrait,
3928 template <typename Impure, typename Pure> class CSETrait = NoCSE>
3929class TemplateComparison
3930 : public CSETrait<ComparisonInstr, PureComparison>::Base {
3931 public:
3932 using BaseClass = typename CSETrait<ComparisonInstr, PureComparison>::Base;
3933
3934 TemplateComparison(const InstructionSource& source,
3935 Token::Kind kind,
3936 intptr_t deopt_id = DeoptId::kNone)
3937 : BaseClass(source, kind, deopt_id), inputs_() {}
3938
3939 virtual intptr_t InputCount() const { return N; }
3940 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
3941
3942 virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
3943
3944 DECLARE_EMPTY_SERIALIZATION(TemplateComparison, BaseClass)
3945
3946 protected:
3947 EmbeddedArray<Value*, N> inputs_;
3948
3949 private:
3950 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
3951};
3952
3953class BranchInstr : public Instruction {
3954 public:
3955 explicit BranchInstr(ComparisonInstr* comparison, intptr_t deopt_id)
3956 : Instruction(deopt_id), comparison_(comparison) {
3957 ASSERT(comparison->env() == nullptr);
3958 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
3959 comparison->InputAt(i)->set_instruction(this);
3960 }
3961 }
3962
3963 DECLARE_INSTRUCTION(Branch)
3964
3965 virtual intptr_t ArgumentCount() const {
3966 return comparison()->ArgumentCount();
3967 }
3968 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
3969 comparison()->SetMoveArguments(move_arguments);
3970 }
3971 virtual MoveArgumentsArray* GetMoveArguments() const {
3972 return comparison()->GetMoveArguments();
3973 }
3974
3975 intptr_t InputCount() const { return comparison()->InputCount(); }
3976
3977 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
3978
3979 virtual TokenPosition token_pos() const { return comparison_->token_pos(); }
3980 virtual intptr_t inlining_id() const { return comparison_->inlining_id(); }
3981 virtual void set_inlining_id(intptr_t value) {
3982 return comparison_->set_inlining_id(value);
3983 }
3984 virtual bool has_inlining_id() const {
3985 return comparison_->has_inlining_id();
3986 }
3987
3988 virtual bool ComputeCanDeoptimize() const {
3989 return comparison()->ComputeCanDeoptimize();
3990 }
3991
3992 virtual bool CanBecomeDeoptimizationTarget() const {
3993 return comparison()->CanBecomeDeoptimizationTarget();
3994 }
3995
3996 virtual bool HasUnknownSideEffects() const {
3997 return comparison()->HasUnknownSideEffects();
3998 }
3999
4000 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
4001
4002 ComparisonInstr* comparison() const { return comparison_; }
4003 void SetComparison(ComparisonInstr* comp);
4004
4005 virtual intptr_t DeoptimizationTarget() const {
4006 return comparison()->DeoptimizationTarget();
4007 }
4008
4009 virtual Representation RequiredInputRepresentation(intptr_t i) const {
4010 return comparison()->RequiredInputRepresentation(i);
4011 }
4012
4013 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
4014
4015 void set_constant_target(TargetEntryInstr* target) {
4016 ASSERT(target == true_successor() || target == false_successor());
4017 constant_target_ = target;
4018 }
4019 TargetEntryInstr* constant_target() const { return constant_target_; }
4020
4021 virtual void CopyDeoptIdFrom(const Instruction& instr) {
4022 Instruction::CopyDeoptIdFrom(instr);
4023 comparison()->CopyDeoptIdFrom(instr);
4024 }
4025
4026 virtual bool MayThrow() const { return comparison()->MayThrow(); }
4027
4028 TargetEntryInstr* true_successor() const { return true_successor_; }
4029 TargetEntryInstr* false_successor() const { return false_successor_; }
4030
4031 TargetEntryInstr** true_successor_address() { return &true_successor_; }
4032 TargetEntryInstr** false_successor_address() { return &false_successor_; }
4033
4034 virtual intptr_t SuccessorCount() const;
4035 virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
4036
4038
4039#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
4040
4041 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BranchInstr, Instruction, FIELD_LIST)
4042#undef FIELD_LIST
4044
4045 private:
4046 virtual void RawSetInputAt(intptr_t i, Value* value) {
4047 comparison()->RawSetInputAt(i, value);
4048 }
4049
4050 TargetEntryInstr* true_successor_ = nullptr;
4051 TargetEntryInstr* false_successor_ = nullptr;
4052 TargetEntryInstr* constant_target_ = nullptr;
4053
4054 DISALLOW_COPY_AND_ASSIGN(BranchInstr);
4055};
4056
4057class DeoptimizeInstr : public TemplateInstruction<0, NoThrow, Pure> {
4058 public:
4059 DeoptimizeInstr(ICData::DeoptReasonId deopt_reason, intptr_t deopt_id)
4060 : TemplateInstruction(deopt_id), deopt_reason_(deopt_reason) {}
4061
4062 virtual bool ComputeCanDeoptimize() const { return true; }
4063
4064 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4065
4066 DECLARE_INSTRUCTION(Deoptimize)
4067
4068#define FIELD_LIST(F) F(const ICData::DeoptReasonId, deopt_reason_)
4069
4071 TemplateInstruction,
4072 FIELD_LIST)
4073#undef FIELD_LIST
4074
4075 private:
4076 DISALLOW_COPY_AND_ASSIGN(DeoptimizeInstr);
4077};
4078
4079class RedefinitionInstr : public TemplateDefinition<1, NoThrow> {
4080 public:
4081 explicit RedefinitionInstr(Value* value,
4082 bool inserted_by_constant_propagation = false)
4083 : constrained_type_(nullptr),
4084 inserted_by_constant_propagation_(inserted_by_constant_propagation) {
4085 SetInputAt(0, value);
4086 }
4087
4088 DECLARE_INSTRUCTION(Redefinition)
4089
4090 Value* value() const { return inputs_[0]; }
4091
4092 virtual CompileType ComputeType() const;
4093 virtual bool RecomputeType();
4094
4095 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4096
4097 void set_constrained_type(CompileType* type) { constrained_type_ = type; }
4098 CompileType* constrained_type() const { return constrained_type_; }
4099
4100 bool inserted_by_constant_propagation() const {
4101 return inserted_by_constant_propagation_;
4102 }
4103
4104 virtual bool ComputeCanDeoptimize() const { return false; }
4105 virtual bool HasUnknownSideEffects() const { return false; }
4106
4107 virtual Value* RedefinedValue() const;
4108
4110
4111#define FIELD_LIST(F) \
4112 F(CompileType*, constrained_type_) \
4113 F(bool, inserted_by_constant_propagation_)
4114
4116 TemplateDefinition,
4117 FIELD_LIST)
4118#undef FIELD_LIST
4119
4120 private:
4121 DISALLOW_COPY_AND_ASSIGN(RedefinitionInstr);
4122};
4123
4124// Keeps the value alive til after this point.
4125//
4126// The fence cannot be moved.
4127class ReachabilityFenceInstr : public TemplateInstruction<1, NoThrow> {
4128 public:
4129 explicit ReachabilityFenceInstr(Value* value) { SetInputAt(0, value); }
4130
4131 DECLARE_INSTRUCTION(ReachabilityFence)
4132
4133 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
4134 return kNoRepresentation;
4135 }
4136
4137 Value* value() const { return inputs_[0]; }
4138
4139 virtual bool ComputeCanDeoptimize() const { return false; }
4140 virtual bool HasUnknownSideEffects() const { return false; }
4141
4142 virtual bool CanEliminate(const BlockEntryInstr* block) const {
4143 return false;
4144 }
4145
4147
4148 DECLARE_EMPTY_SERIALIZATION(ReachabilityFenceInstr, TemplateInstruction)
4149
4150 private:
4151 DISALLOW_COPY_AND_ASSIGN(ReachabilityFenceInstr);
4152};
4153
4154class ConstraintInstr : public TemplateDefinition<1, NoThrow> {
4155 public:
4156 ConstraintInstr(Value* value, Range* constraint) : constraint_(constraint) {
4157 SetInputAt(0, value);
4158 }
4159
4160 DECLARE_INSTRUCTION(Constraint)
4161
4162 virtual CompileType ComputeType() const;
4163
4164 virtual bool ComputeCanDeoptimize() const { return false; }
4165
4166 virtual bool HasUnknownSideEffects() const { return false; }
4167
4168 virtual bool AttributesEqual(const Instruction& other) const {
4169 UNREACHABLE();
4170 return false;
4171 }
4172
4173 Value* value() const { return inputs_[0]; }
4174 Range* constraint() const { return constraint_; }
4175
4176 virtual void InferRange(RangeAnalysis* analysis, Range* range);
4177
4178 // Constraints for branches have their target block stored in order
4179 // to find the comparison that generated the constraint:
4180 // target->predecessor->last_instruction->comparison.
4181 void set_target(TargetEntryInstr* target) { target_ = target; }
4182 TargetEntryInstr* target() const { return target_; }
4183
4185
4186#define FIELD_LIST(F) F(Range*, constraint_)
4187
4189 TemplateDefinition,
4190 FIELD_LIST)
4191#undef FIELD_LIST
4193
4194 private:
4195 TargetEntryInstr* target_ = nullptr;
4196
4197 DISALLOW_COPY_AND_ASSIGN(ConstraintInstr);
4198};
4199
4200class ConstantInstr : public TemplateDefinition<0, NoThrow, Pure> {
4201 public:
4202 explicit ConstantInstr(const Object& value)
4203 : ConstantInstr(value, InstructionSource(TokenPosition::kConstant)) {}
4204 ConstantInstr(const Object& value, const InstructionSource& source);
4205
4206 DECLARE_INSTRUCTION(Constant)
4207 virtual CompileType ComputeType() const;
4208
4209 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4210
4211 const Object& value() const { return value_; }
4212
4213 bool IsSmi() const { return compiler::target::IsSmi(value()); }
4214
4215 bool HasZeroRepresentation() const {
4216 switch (representation()) {
4217 case kTagged:
4218 case kUnboxedUint8:
4219 case kUnboxedUint16:
4220 case kUnboxedUint32:
4221 case kUnboxedInt32:
4222 case kUnboxedInt64:
4223 return IsSmi() && compiler::target::SmiValue(value()) == 0;
4224 case kUnboxedDouble:
4225 return compiler::target::IsDouble(value()) &&
4226 bit_cast<uint64_t>(compiler::target::DoubleValue(value())) == 0;
4227 default:
4228 return false;
4229 }
4230 }
4231
4232 virtual bool ComputeCanDeoptimize() const { return false; }
4233
4234 virtual void InferRange(RangeAnalysis* analysis, Range* range);
4235
4236 virtual bool AttributesEqual(const Instruction& other) const;
4237
4238 virtual TokenPosition token_pos() const { return token_pos_; }
4239
4240 void EmitMoveToLocation(FlowGraphCompiler* compiler,
4241 const Location& destination,
4242 Register tmp = kNoRegister,
4243 intptr_t pair_index = 0);
4244
4247
4248#define FIELD_LIST(F) \
4249 F(const Object&, value_) \
4250 F(const TokenPosition, token_pos_)
4251
4253 TemplateDefinition,
4254 FIELD_LIST)
4255#undef FIELD_LIST
4256
4257 private:
4258 DISALLOW_COPY_AND_ASSIGN(ConstantInstr);
4259};
4260
4261// Merged ConstantInstr -> UnboxedXXX into UnboxedConstantInstr.
4262// TODO(srdjan): Implemented currently for doubles only, should implement
4263// for other unboxing instructions.
4264class UnboxedConstantInstr : public ConstantInstr {
4265 public:
4266 explicit UnboxedConstantInstr(const Object& value,
4267 Representation representation);
4268
4269 virtual Representation representation() const { return representation_; }
4270
4271 // Either nullptr or the address of the unboxed constant.
4272 uword constant_address() const { return constant_address_; }
4273
4274 DECLARE_INSTRUCTION(UnboxedConstant)
4275 DECLARE_CUSTOM_SERIALIZATION(UnboxedConstantInstr)
4276
4277 DECLARE_ATTRIBUTES_NAMED(("value", "representation"),
4278 (&value(), representation()))
4279
4280 private:
4281 const Representation representation_;
4282 uword
4283 constant_address_; // Either nullptr or points to the untagged constant.
4284
4285 DISALLOW_COPY_AND_ASSIGN(UnboxedConstantInstr);
4286};
4287
4288// Checks that one type is a subtype of another (e.g. for type parameter bounds
4289// checking). Throws a TypeError otherwise. Both types are instantiated at
4290// runtime as necessary.
4291class AssertSubtypeInstr : public TemplateInstruction<5, Throws, Pure> {
4292 public:
4293 enum {
4294 kInstantiatorTAVPos = 0,
4295 kFunctionTAVPos = 1,
4296 kSubTypePos = 2,
4297 kSuperTypePos = 3,
4298 kDstNamePos = 4,
4299 };
4300
4301 AssertSubtypeInstr(const InstructionSource& source,
4302 Value* instantiator_type_arguments,
4303 Value* function_type_arguments,
4304 Value* sub_type,
4305 Value* super_type,
4306 Value* dst_name,
4307 intptr_t deopt_id)
4308 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
4309 SetInputAt(kInstantiatorTAVPos, instantiator_type_arguments);
4310 SetInputAt(kFunctionTAVPos, function_type_arguments);
4311 SetInputAt(kSubTypePos, sub_type);
4312 SetInputAt(kSuperTypePos, super_type);
4313 SetInputAt(kDstNamePos, dst_name);
4314 }
4315
4316 DECLARE_INSTRUCTION(AssertSubtype);
4317
4318 Value* instantiator_type_arguments() const {
4319 return inputs_[kInstantiatorTAVPos];
4320 }
4321 Value* function_type_arguments() const { return inputs_[kFunctionTAVPos]; }
4322 Value* sub_type() const { return inputs_[kSubTypePos]; }
4323 Value* super_type() const { return inputs_[kSuperTypePos]; }
4324 Value* dst_name() const { return inputs_[kDstNamePos]; }
4325
4326 virtual TokenPosition token_pos() const { return token_pos_; }
4327
4328 virtual bool ComputeCanDeoptimize() const { return false; }
4329 virtual bool ComputeCanDeoptimizeAfterCall() const {
4330 return !CompilerState::Current().is_aot();
4331 }
4332 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4333 return InputCount();
4334 }
4335
4336 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
4337
4338 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
4339
4340 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4341
4343
4344#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
4345
4346 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertSubtypeInstr,
4347 TemplateInstruction,
4348 FIELD_LIST)
4349#undef FIELD_LIST
4350
4351 private:
4352 DISALLOW_COPY_AND_ASSIGN(AssertSubtypeInstr);
4353};
4354
4355class AssertAssignableInstr : public TemplateDefinition<4, Throws, Pure> {
4356 public:
4357#define FOR_EACH_ASSERT_ASSIGNABLE_KIND(V) \
4358 V(ParameterCheck) \
4359 V(InsertedByFrontend) \
4360 V(FromSource) \
4361 V(Unknown)
4362
4363#define KIND_DEFN(name) k##name,
4365#undef KIND_DEFN
4366
4367 static const char* KindToCString(Kind kind);
4368 static bool ParseKind(const char* str, Kind* out);
4369
4370 enum {
4371 kInstancePos = 0,
4372 kDstTypePos = 1,
4373 kInstantiatorTAVPos = 2,
4374 kFunctionTAVPos = 3,
4375 kNumInputs = 4,
4376 };
4377
4378 AssertAssignableInstr(const InstructionSource& source,
4379 Value* value,
4380 Value* dst_type,
4381 Value* instantiator_type_arguments,
4382 Value* function_type_arguments,
4383 const String& dst_name,
4384 intptr_t deopt_id,
4385 Kind kind = kUnknown)
4386 : TemplateDefinition(source, deopt_id),
4387 token_pos_(source.token_pos),
4388 dst_name_(dst_name),
4389 kind_(kind) {
4390 ASSERT(!dst_name.IsNull());
4391 SetInputAt(kInstancePos, value);
4392 SetInputAt(kDstTypePos, dst_type);
4393 SetInputAt(kInstantiatorTAVPos, instantiator_type_arguments);
4394 SetInputAt(kFunctionTAVPos, function_type_arguments);
4395 }
4396
4397 virtual intptr_t statistics_tag() const;
4398
4399 DECLARE_INSTRUCTION(AssertAssignable)
4400 virtual CompileType ComputeType() const;
4401 virtual bool RecomputeType();
4402
4403 Value* value() const { return inputs_[kInstancePos]; }
4404 Value* dst_type() const { return inputs_[kDstTypePos]; }
4405 Value* instantiator_type_arguments() const {
4406 return inputs_[kInstantiatorTAVPos];
4407 }
4408 Value* function_type_arguments() const { return inputs_[kFunctionTAVPos]; }
4409
4410 virtual TokenPosition token_pos() const { return token_pos_; }
4411 const String& dst_name() const { return dst_name_; }
4412
4413 virtual bool ComputeCanDeoptimize() const { return false; }
4414 virtual bool ComputeCanDeoptimizeAfterCall() const {
4415 return !CompilerState::Current().is_aot();
4416 }
4417 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4418#if !defined(TARGET_ARCH_IA32)
4419 return InputCount();
4420#else
4421 // The ia32 implementation calls the stub by pushing the input registers
4422 // in the same order onto the stack thereby making the deopt-env correct.
4423 // (Due to lack of registers we cannot use all-argument calling convention
4424 // as in other architectures.)
4425 return 0;
4426#endif
4427 }
4428
4429 virtual bool CanBecomeDeoptimizationTarget() const {
4430 // AssertAssignable instructions that are specialized by the optimizer
4431 // (e.g. replaced with CheckClass) need a deoptimization descriptor before.
4432 return true;
4433 }
4434
4435 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4436
4437 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4438
4439 virtual Value* RedefinedValue() const;
4440
4441 virtual void InferRange(RangeAnalysis* analysis, Range* range);
4442
4444
4445#define FIELD_LIST(F) \
4446 F(const TokenPosition, token_pos_) \
4447 F(const String&, dst_name_) \
4448 F(const Kind, kind_)
4449
4450 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertAssignableInstr,
4451 TemplateDefinition,
4452 FIELD_LIST)
4453#undef FIELD_LIST
4454
4455 private:
4456 DISALLOW_COPY_AND_ASSIGN(AssertAssignableInstr);
4457};
4458
4459class AssertBooleanInstr : public TemplateDefinition<1, Throws, Pure> {
4460 public:
4461 AssertBooleanInstr(const InstructionSource& source,
4462 Value* value,
4463 intptr_t deopt_id)
4464 : TemplateDefinition(source, deopt_id), token_pos_(source.token_pos) {
4465 SetInputAt(0, value);
4466 }
4467
4468 DECLARE_INSTRUCTION(AssertBoolean)
4469 virtual CompileType ComputeType() const;
4470
4471 virtual TokenPosition token_pos() const { return token_pos_; }
4472 Value* value() const { return inputs_[0]; }
4473
4474 virtual bool ComputeCanDeoptimize() const { return false; }
4475 virtual bool ComputeCanDeoptimizeAfterCall() const {
4476 return !CompilerState::Current().is_aot();
4477 }
4478 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4479 return InputCount();
4480 }
4481
4482 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4483
4484 virtual bool AttributesEqual(const Instruction& other) const { return true; }
4485
4486 virtual Value* RedefinedValue() const;
4487
4489
4490#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
4491
4492 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertBooleanInstr,
4493 TemplateDefinition,
4494 FIELD_LIST)
4495#undef FIELD_LIST
4496
4497 private:
4498 DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
4499};
4500
4501struct ArgumentsInfo {
4502 ArgumentsInfo(intptr_t type_args_len,
4503 intptr_t count_with_type_args,
4504 intptr_t size_with_type_args,
4505 const Array& argument_names)
4506 : type_args_len(type_args_len),
4507 count_with_type_args(count_with_type_args),
4508 size_with_type_args(size_with_type_args),
4509 count_without_type_args(count_with_type_args -
4510 (type_args_len > 0 ? 1 : 0)),
4511 size_without_type_args(size_with_type_args -
4512 (type_args_len > 0 ? 1 : 0)),
4513 argument_names(argument_names) {}
4514
4515 ArrayPtr ToArgumentsDescriptor() const {
4516 return ArgumentsDescriptor::New(type_args_len, count_without_type_args,
4517 size_without_type_args, argument_names);
4518 }
4519
4520 const intptr_t type_args_len;
4521 const intptr_t count_with_type_args;
4522 const intptr_t size_with_type_args;
4523 const intptr_t count_without_type_args;
4524 const intptr_t size_without_type_args;
4525 const Array& argument_names;
4526};
4527
4528template <intptr_t kExtraInputs>
4529class TemplateDartCall : public VariadicDefinition {
4530 public:
4531 TemplateDartCall(intptr_t deopt_id,
4532 intptr_t type_args_len,
4533 const Array& argument_names,
4534 InputsArray&& inputs,
4535 const InstructionSource& source)
4536 : VariadicDefinition(std::move(inputs), source, deopt_id),
4537 type_args_len_(type_args_len),
4538 argument_names_(argument_names),
4539 token_pos_(source.token_pos) {
4540 DEBUG_ASSERT(argument_names.IsNotTemporaryScopedHandle());
4541 ASSERT(InputCount() >= kExtraInputs);
4542 }
4543
4544 inline StringPtr Selector();
4545
4546 virtual bool MayThrow() const { return true; }
4547 virtual bool CanCallDart() const { return true; }
4548
4549 virtual bool ComputeCanDeoptimize() const { return false; }
4550 virtual bool ComputeCanDeoptimizeAfterCall() const {
4551 return !CompilerState::Current().is_aot();
4552 }
4553 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
4554 return kExtraInputs;
4555 }
4556
4557 intptr_t FirstArgIndex() const { return type_args_len_ > 0 ? 1 : 0; }
4558 Value* Receiver() const { return this->ArgumentValueAt(FirstArgIndex()); }
4559 intptr_t ArgumentCountWithoutTypeArgs() const {
4560 return ArgumentCount() - FirstArgIndex();
4561 }
4562 intptr_t ArgumentsSizeWithoutTypeArgs() const {
4563 return ArgumentsSize() - FirstArgIndex();
4564 }
4565 // ArgumentCount() includes the type argument vector if any.
4566 // Caution: Must override Instruction::ArgumentCount().
4567 intptr_t ArgumentCount() const {
4568 return move_arguments_ != nullptr ? move_arguments_->length()
4569 : InputCount() - kExtraInputs;
4570 }
4571 virtual intptr_t ArgumentsSize() const { return ArgumentCount(); }
4572
4573 virtual void SetMoveArguments(MoveArgumentsArray* move_arguments) {
4574 ASSERT(move_arguments_ == nullptr);
4575 move_arguments_ = move_arguments;
4576 }
4577 virtual MoveArgumentsArray* GetMoveArguments() const {
4578 return move_arguments_;
4579 }
4580 virtual void ReplaceInputsWithMoveArguments(
4581 MoveArgumentsArray* move_arguments) {
4582 ASSERT(move_arguments_ == nullptr);
4583 ASSERT(move_arguments->length() == ArgumentCount());
4584 SetMoveArguments(move_arguments);
4585 ASSERT(InputCount() == ArgumentCount() + kExtraInputs);
4586 const intptr_t extra_inputs_base = InputCount() - kExtraInputs;
4587 for (intptr_t i = 0, n = ArgumentCount(); i < n; ++i) {
4588 InputAt(i)->RemoveFromUseList();
4589 }
4590 for (intptr_t i = 0; i < kExtraInputs; ++i) {
4591 SetInputAt(i, InputAt(extra_inputs_base + i));
4592 }
4593 inputs_.TruncateTo(kExtraInputs);
4594 }
4595 intptr_t type_args_len() const { return type_args_len_; }
4596 const Array& argument_names() const { return argument_names_; }
4597 virtual TokenPosition token_pos() const { return token_pos_; }
4598 ArrayPtr GetArgumentsDescriptor() const {
4599 return ArgumentsDescriptor::New(
4600 type_args_len(), ArgumentCountWithoutTypeArgs(),
4601 ArgumentsSizeWithoutTypeArgs(), argument_names());
4602 }
4603
4604 DECLARE_CUSTOM_SERIALIZATION(TemplateDartCall)
4606
4607 private:
4608 const intptr_t type_args_len_;
4609 const Array& argument_names_;
4610 const TokenPosition token_pos_;
4611 MoveArgumentsArray* move_arguments_ = nullptr;
4612
4613 DISALLOW_COPY_AND_ASSIGN(TemplateDartCall);
4614};
4615
4616class ClosureCallInstr : public TemplateDartCall<1> {
4617 public:
4618 ClosureCallInstr(const Function& target_function,
4619 InputsArray&& inputs,
4620 intptr_t type_args_len,
4621 const Array& argument_names,
4622 const InstructionSource& source,
4623 intptr_t deopt_id)
4624 : TemplateDartCall(deopt_id,
4625 type_args_len,
4626 argument_names,
4627 std::move(inputs),
4628 source),
4629 target_function_(target_function) {
4630 DEBUG_ASSERT(target_function.IsNotTemporaryScopedHandle());
4631 }
4632
4633 DECLARE_INSTRUCTION(ClosureCall)
4634
4635 const Function& target_function() const { return target_function_; }
4636
4637 // TODO(kmillikin): implement exact call counts for closure calls.
4638 virtual intptr_t CallCount() const { return 1; }
4639
4640 virtual bool HasUnknownSideEffects() const { return true; }
4641
4643
4644#define FIELD_LIST(F) F(const Function&, target_function_)
4646 TemplateDartCall,
4647 FIELD_LIST)
4648#undef FIELD_LIST
4649
4650 private:
4651 DISALLOW_COPY_AND_ASSIGN(ClosureCallInstr);
4652};
4653
4654// Common base class for various kinds of instance call instructions
4655// (InstanceCallInstr, PolymorphicInstanceCallInstr).
4656class InstanceCallBaseInstr : public TemplateDartCall<0> {
4657 public:
4658 InstanceCallBaseInstr(const InstructionSource& source,
4659 const String& function_name,
4660 Token::Kind token_kind,
4661 InputsArray&& arguments,
4662 intptr_t type_args_len,
4663 const Array& argument_names,
4664 const ICData* ic_data,
4665 intptr_t deopt_id,
4666 const Function& interface_target,
4667 const Function& tearoff_interface_target)
4668 : TemplateDartCall(deopt_id,
4669 type_args_len,
4670 argument_names,
4671 std::move(arguments),
4672 source),
4673 ic_data_(ic_data),
4674 function_name_(function_name),
4675 token_kind_(token_kind),
4676 interface_target_(interface_target),
4677 tearoff_interface_target_(tearoff_interface_target),
4678 result_type_(nullptr),
4679 has_unique_selector_(false),
4680 entry_kind_(Code::EntryKind::kNormal),
4681 receiver_is_not_smi_(false),
4682 is_call_on_this_(false) {
4683 DEBUG_ASSERT(function_name.IsNotTemporaryScopedHandle());
4684 DEBUG_ASSERT(interface_target.IsNotTemporaryScopedHandle());
4685 DEBUG_ASSERT(tearoff_interface_target.IsNotTemporaryScopedHandle());
4686 ASSERT(InputCount() > 0);
4687 ASSERT(Token::IsBinaryOperator(token_kind) ||
4688 Token::IsEqualityOperator(token_kind) ||
4689 Token::IsRelationalOperator(token_kind) ||
4690 Token::IsUnaryOperator(token_kind) ||
4691 Token::IsIndexOperator(token_kind) ||
4692 Token::IsTypeTestOperator(token_kind) ||
4693 Token::IsTypeCastOperator(token_kind) || token_kind == Token::kGET ||
4694 token_kind == Token::kSET || token_kind == Token::kILLEGAL);
4695 }
4696
4697 const ICData* ic_data() const { return ic_data_; }
4698 bool HasICData() const {
4699 return (ic_data() != nullptr) && !ic_data()->IsNull();
4700 }
4701
4702 // ICData can be replaced by optimizer.
4703 void set_ic_data(const ICData* value) { ic_data_ = value; }
4704
4705 const String& function_name() const { return function_name_; }
4706 Token::Kind token_kind() const { return token_kind_; }
4707 const Function& interface_target() const { return interface_target_; }
4708 const Function& tearoff_interface_target() const {
4709 return tearoff_interface_target_;
4710 }
4711
4712 bool has_unique_selector() const { return has_unique_selector_; }
4713 void set_has_unique_selector(bool b) { has_unique_selector_ = b; }
4714
4715 virtual CompileType ComputeType() const;
4716
4717 virtual bool CanBecomeDeoptimizationTarget() const {
4718 // Instance calls that are specialized by the optimizer need a
4719 // deoptimization descriptor before the call.
4720 return true;
4721 }
4722
4723 virtual bool HasUnknownSideEffects() const { return true; }
4724
4725 void SetResultType(Zone* zone, CompileType new_type) {
4726 result_type_ = new (zone) CompileType(new_type);
4727 }
4728
4729 CompileType* result_type() const { return result_type_; }
4730
4731 intptr_t result_cid() const {
4732 if (result_type_ == nullptr) {
4733 return kDynamicCid;
4734 }
4735 return result_type_->ToCid();
4736 }
4737
4738 FunctionPtr ResolveForReceiverClass(const Class& cls, bool allow_add = true);
4739
4740 Code::EntryKind entry_kind() const { return entry_kind_; }
4741 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
4742
4743 void mark_as_call_on_this() { is_call_on_this_ = true; }
4744 bool is_call_on_this() const { return is_call_on_this_; }
4745
4746 DECLARE_ABSTRACT_INSTRUCTION(InstanceCallBase);
4747
4748 bool receiver_is_not_smi() const { return receiver_is_not_smi_; }
4749 void set_receiver_is_not_smi(bool value) { receiver_is_not_smi_ = value; }
4750
4751 // Tries to prove that the receiver will not be a Smi based on the
4752 // interface target, CompileType and hints from TFA.
4753 void UpdateReceiverSminess(Zone* zone);
4754
4755 bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone* zone) const;
4756
4757 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
4758 if (type_args_len() > 0) {
4759 if (idx == 0) {
4760 return kGuardInputs;
4761 }
4762 idx--;
4763 }
4764 if (interface_target_.IsNull()) return kGuardInputs;
4765 return interface_target_.is_unboxed_parameter_at(idx) ? kNotSpeculative
4766 : kGuardInputs;
4767 }
4768
4769 virtual intptr_t ArgumentsSize() const;
4770
4771 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
4772
4773 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
4774
4775 virtual Representation representation() const;
4776
4777#define FIELD_LIST(F) \
4778 F(const ICData*, ic_data_) \
4779 F(const String&, function_name_) \
4780 /* Binary op, unary op, kGET or kILLEGAL. */ \
4781 F(const Token::Kind, token_kind_) \
4782 F(const Function&, interface_target_) \
4783 F(const Function&, tearoff_interface_target_) \
4784 /* Inferred result type. */ \
4785 F(CompileType*, result_type_) \
4786 F(bool, has_unique_selector_) \
4787 F(Code::EntryKind, entry_kind_) \
4788 F(bool, receiver_is_not_smi_) \
4789 F(bool, is_call_on_this_)
4790
4791 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallBaseInstr,
4792 TemplateDartCall,
4793 FIELD_LIST)
4794#undef FIELD_LIST
4795
4796 protected:
4797 friend class CallSpecializer;
4798 void set_ic_data(ICData* value) { ic_data_ = value; }
4799 void set_result_type(CompileType* result_type) { result_type_ = result_type; }
4800
4801 private:
4802 DISALLOW_COPY_AND_ASSIGN(InstanceCallBaseInstr);
4803};
4804
4805class InstanceCallInstr : public InstanceCallBaseInstr {
4806 public:
4807 InstanceCallInstr(
4808 const InstructionSource& source,
4809 const String& function_name,
4810 Token::Kind token_kind,
4811 InputsArray&& arguments,
4812 intptr_t type_args_len,
4813 const Array& argument_names,
4814 intptr_t checked_argument_count,
4815 const ZoneGrowableArray<const ICData*>& ic_data_array,
4816 intptr_t deopt_id,
4817 const Function& interface_target = Function::null_function(),
4818 const Function& tearoff_interface_target = Function::null_function())
4819 : InstanceCallBaseInstr(
4820 source,
4822 token_kind,
4823 std::move(arguments),
4824 type_args_len,
4825 argument_names,
4826 GetICData(ic_data_array, deopt_id, /*is_static_call=*/false),
4827 deopt_id,
4828 interface_target,
4829 tearoff_interface_target),
4830 checked_argument_count_(checked_argument_count),
4831 receivers_static_type_(nullptr) {}
4832
4833 InstanceCallInstr(
4834 const InstructionSource& source,
4835 const String& function_name,
4836 Token::Kind token_kind,
4837 InputsArray&& arguments,
4838 intptr_t type_args_len,
4839 const Array& argument_names,
4840 intptr_t checked_argument_count,
4841 intptr_t deopt_id,
4842 const Function& interface_target = Function::null_function(),
4843 const Function& tearoff_interface_target = Function::null_function())
4844 : InstanceCallBaseInstr(source,
4846 token_kind,
4847 std::move(arguments),
4848 type_args_len,
4849 argument_names,
4850 /*ic_data=*/nullptr,
4851 deopt_id,
4852 interface_target,
4853 tearoff_interface_target),
4854 checked_argument_count_(checked_argument_count),
4855 receivers_static_type_(nullptr) {}
4856
4857 DECLARE_INSTRUCTION(InstanceCall)
4858
4859 intptr_t checked_argument_count() const { return checked_argument_count_; }
4860
4861 virtual intptr_t CallCount() const {
4862 return ic_data() == nullptr ? 0 : ic_data()->AggregateCount();
4863 }
4864
4865 void set_receivers_static_type(const AbstractType* receiver_type) {
4866 ASSERT(receiver_type != nullptr);
4867 receivers_static_type_ = receiver_type;
4868 }
4869
4870 virtual Definition* Canonicalize(FlowGraph* flow_graph);
4871
4873
4874 bool MatchesCoreName(const String& name);
4875
4876 const class BinaryFeedback& BinaryFeedback();
4877 void SetBinaryFeedback(const class BinaryFeedback* binary) {
4878 binary_ = binary;
4879 }
4880
4881 const CallTargets& Targets();
4882 void SetTargets(const CallTargets* targets) { targets_ = targets; }
4883
4884 void EnsureICData(FlowGraph* graph);
4885
4886#define FIELD_LIST(F) \
4887 F(const intptr_t, checked_argument_count_) \
4888 F(const AbstractType*, receivers_static_type_)
4889
4891 InstanceCallBaseInstr,
4892 FIELD_LIST)
4893#undef FIELD_LIST
4894
4895 private:
4896 const CallTargets* targets_ = nullptr;
4897 const class BinaryFeedback* binary_ = nullptr;
4898
4899 DISALLOW_COPY_AND_ASSIGN(InstanceCallInstr);
4900};
4901
4902class PolymorphicInstanceCallInstr : public InstanceCallBaseInstr {
4903 public:
4904 // Generate a replacement polymorphic call instruction.
4905 static PolymorphicInstanceCallInstr* FromCall(Zone* zone,
4906 InstanceCallBaseInstr* call,
4907 const CallTargets& targets,
4908 bool complete) {
4909 ASSERT(!call->HasMoveArguments());
4910 InputsArray args(zone, call->ArgumentCount());
4911 for (intptr_t i = 0, n = call->ArgumentCount(); i < n; ++i) {
4912 args.Add(call->ArgumentValueAt(i)->CopyWithType(zone));
4913 }
4914 auto new_call = new (zone) PolymorphicInstanceCallInstr(
4915 call->source(), call->function_name(), call->token_kind(),
4916 std::move(args), call->type_args_len(), call->argument_names(),
4917 call->ic_data(), call->deopt_id(), call->interface_target(),
4918 call->tearoff_interface_target(), targets, complete);
4919 new_call->set_result_type(call->result_type());
4920 new_call->set_entry_kind(call->entry_kind());
4921 new_call->set_has_unique_selector(call->has_unique_selector());
4922 if (call->is_call_on_this()) {
4923 new_call->mark_as_call_on_this();
4924 }
4925 return new_call;
4926 }
4927
4928 bool complete() const { return complete_; }
4929
4930 virtual CompileType ComputeType() const;
4931
4932 bool HasOnlyDispatcherOrImplicitAccessorTargets() const;
4933
4934 const CallTargets& targets() const { return targets_; }
4935 intptr_t NumberOfChecks() const { return targets_.length(); }
4936
4937 bool IsSureToCallSingleRecognizedTarget() const;
4938
4939 virtual intptr_t CallCount() const;
4940
4941 // If this polymorphic call site was created to cover the remaining cids after
4942 // inlining then we need to keep track of the total number of calls including
4943 // the ones that we inlined. This is different from the CallCount above: Eg
4944 // if there were 100 calls originally, distributed across three class-ids in
4945 // the ratio 50, 40, 7, 3. The first two were inlined, so now we have only
4946 // 10 calls in the CallCount above, but the heuristics need to know that the
4947 // last two cids cover 7% and 3% of the calls, not 70% and 30%.
4948 intptr_t total_call_count() { return total_call_count_; }
4949
4950 void set_total_call_count(intptr_t count) { total_call_count_ = count; }
4951
4952 DECLARE_INSTRUCTION(PolymorphicInstanceCall)
4953
4954 virtual Definition* Canonicalize(FlowGraph* graph);
4955
4956 static TypePtr ComputeRuntimeType(const CallTargets& targets);
4957
4959
4960#define FIELD_LIST(F) \
4961 F(const CallTargets&, targets_) \
4962 F(const bool, complete_) \
4963 F(intptr_t, total_call_count_)
4964
4965 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(PolymorphicInstanceCallInstr,
4966 InstanceCallBaseInstr,
4967 FIELD_LIST)
4968#undef FIELD_LIST
4969
4970 private:
4971 PolymorphicInstanceCallInstr(const InstructionSource& source,
4972 const String& function_name,
4973 Token::Kind token_kind,
4974 InputsArray&& arguments,
4975 intptr_t type_args_len,
4976 const Array& argument_names,
4977 const ICData* ic_data,
4978 intptr_t deopt_id,
4979 const Function& interface_target,
4980 const Function& tearoff_interface_target,
4981 const CallTargets& targets,
4982 bool complete)
4983 : InstanceCallBaseInstr(source,
4985 token_kind,
4986 std::move(arguments),
4987 type_args_len,
4988 argument_names,
4989 ic_data,
4990 deopt_id,
4991 interface_target,
4992 tearoff_interface_target),
4993 targets_(targets),
4994 complete_(complete) {
4995 ASSERT(targets.length() != 0);
4996 total_call_count_ = CallCount();
4997 }
4998
4999 friend class PolymorphicInliner;
5000
5001 DISALLOW_COPY_AND_ASSIGN(PolymorphicInstanceCallInstr);
5002};
5003
5004// Instance call using the global dispatch table.
5005//
5006// Takes untagged ClassId of the receiver as extra input.
5007class DispatchTableCallInstr : public TemplateDartCall<1> {
5008 public:
5009 DispatchTableCallInstr(const InstructionSource& source,
5010 const Function& interface_target,
5011 const compiler::TableSelector* selector,
5012 InputsArray&& arguments,
5013 intptr_t type_args_len,
5014 const Array& argument_names)
5015 : TemplateDartCall(DeoptId::kNone,
5016 type_args_len,
5017 argument_names,
5018 std::move(arguments),
5019 source),
5020 interface_target_(interface_target),
5021 selector_(selector) {
5022 ASSERT(selector != nullptr);
5023 DEBUG_ASSERT(interface_target_.IsNotTemporaryScopedHandle());
5024 ASSERT(InputCount() > 0);
5025 }
5026
5027 static DispatchTableCallInstr* FromCall(
5028 Zone* zone,
5029 const InstanceCallBaseInstr* call,
5030 Value* cid,
5031 const Function& interface_target,
5032 const compiler::TableSelector* selector);
5033
5034 DECLARE_INSTRUCTION(DispatchTableCall)
5035 DECLARE_ATTRIBUTE(selector_name())
5036
5037 const Function& interface_target() const { return interface_target_; }
5038 const compiler::TableSelector* selector() const { return selector_; }
5039 const char* selector_name() const {
5040 return String::Handle(interface_target().name()).ToCString();
5041 }
5042
5043 Value* class_id() const { return InputAt(InputCount() - 1); }
5044
5045 virtual CompileType ComputeType() const;
5046
5047 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5048
5049 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
5050
5051 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
5052
5053 virtual bool HasUnknownSideEffects() const { return true; }
5054
5055 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
5056 if (type_args_len() > 0) {
5057 if (idx == 0) {
5058 return kGuardInputs;
5059 }
5060 idx--;
5061 }
5062 return interface_target_.is_unboxed_parameter_at(idx) ? kNotSpeculative
5063 : kGuardInputs;
5064 }
5065
5066 virtual intptr_t ArgumentsSize() const;
5067
5068 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5069
5070 virtual Representation representation() const;
5071
5073
5074#define FIELD_LIST(F) \
5075 F(const Function&, interface_target_) \
5076 F(const compiler::TableSelector*, selector_)
5077
5078 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DispatchTableCallInstr,
5079 TemplateDartCall,
5080 FIELD_LIST)
5081#undef FIELD_LIST
5082
5083 private:
5084 DISALLOW_COPY_AND_ASSIGN(DispatchTableCallInstr);
5085};
5086
5087class StrictCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
5088 public:
5089 StrictCompareInstr(const InstructionSource& source,
5090 Token::Kind kind,
5091 Value* left,
5092 Value* right,
5093 bool needs_number_check,
5094 intptr_t deopt_id);
5095
5096 DECLARE_COMPARISON_INSTRUCTION(StrictCompare)
5097
5098 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5099
5100 virtual CompileType ComputeType() const;
5101
5102 virtual bool ComputeCanDeoptimize() const { return false; }
5103
5104 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5105
5106 bool needs_number_check() const { return needs_number_check_; }
5107 void set_needs_number_check(bool value) { needs_number_check_ = value; }
5108
5109 bool AttributesEqual(const Instruction& other) const;
5110
5112
5113#define FIELD_LIST(F) \
5114 /* True if the comparison must check for double or Mint and */ \
5115 /* use value comparison instead. */ \
5116 F(bool, needs_number_check_)
5117
5118 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr,
5119 TemplateComparison,
5120 FIELD_LIST)
5121#undef FIELD_LIST
5122
5123 private:
5124 Condition EmitComparisonCodeRegConstant(FlowGraphCompiler* compiler,
5125 BranchLabels labels,
5126 Register reg,
5127 const Object& obj);
5128 bool TryEmitBoolTest(FlowGraphCompiler* compiler,
5129 BranchLabels labels,
5130 intptr_t input_index,
5131 const Object& obj,
5132 Condition* condition_out);
5133
5134 DISALLOW_COPY_AND_ASSIGN(StrictCompareInstr);
5135};
5136
5137// Comparison instruction that is equivalent to the (left & right) == 0
5138// comparison pattern.
5139class TestSmiInstr : public TemplateComparison<2, NoThrow, Pure> {
5140 public:
5141 TestSmiInstr(const InstructionSource& source,
5142 Token::Kind kind,
5143 Value* left,
5144 Value* right)
5145 : TemplateComparison(source, kind) {
5146 ASSERT(kind == Token::kEQ || kind == Token::kNE);
5147 SetInputAt(0, left);
5148 SetInputAt(1, right);
5149 }
5150
5152
5153 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5154
5155 virtual CompileType ComputeType() const;
5156
5157 virtual bool ComputeCanDeoptimize() const { return false; }
5158
5159 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5160 return kTagged;
5161 }
5162
5163 DECLARE_EMPTY_SERIALIZATION(TestSmiInstr, TemplateComparison)
5164
5165 private:
5166 DISALLOW_COPY_AND_ASSIGN(TestSmiInstr);
5167};
5168
5169// Checks the input value cid against cids stored in a table and returns either
5170// a result or deoptimizes. If the cid is not in the list and there is a deopt
5171// id, then the instruction deoptimizes. If there is no deopt id, all the
5172// results must be the same (all true or all false) and the instruction returns
5173// the opposite for cids not on the list. The first element in the table must
5174// always be the result for the Smi class-id and is allowed to differ from the
5175// other results even in the no-deopt case.
5176class TestCidsInstr : public TemplateComparison<1, NoThrow, Pure> {
5177 public:
5178 TestCidsInstr(const InstructionSource& source,
5179 Token::Kind kind,
5180 Value* value,
5181 const ZoneGrowableArray<intptr_t>& cid_results,
5182 intptr_t deopt_id);
5183
5184 const ZoneGrowableArray<intptr_t>& cid_results() const {
5185 return cid_results_;
5186 }
5187
5189
5190 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5191
5192 virtual CompileType ComputeType() const;
5193
5194 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5195
5196 virtual bool ComputeCanDeoptimize() const {
5197 return GetDeoptId() != DeoptId::kNone;
5198 }
5199
5200 Value* value() const { return inputs_[0]; }
5201 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5202 return kTagged;
5203 }
5204
5205 virtual bool AttributesEqual(const Instruction& other) const;
5206
5208
5209#define FIELD_LIST(F) F(const ZoneGrowableArray<intptr_t>&, cid_results_)
5210
5212 TemplateComparison,
5213 FIELD_LIST)
5214#undef FIELD_LIST
5215
5216 private:
5217 DISALLOW_COPY_AND_ASSIGN(TestCidsInstr);
5218};
5219
5220class TestRangeInstr : public TemplateComparison<1, NoThrow, Pure> {
5221 public:
5222 TestRangeInstr(const InstructionSource& source,
5223 Value* value,
5224 uword lower,
5225 uword upper,
5226 Representation value_representation);
5227
5229
5230 uword lower() const { return lower_; }
5231 uword upper() const { return upper_; }
5232
5233 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5234
5235 virtual CompileType ComputeType() const;
5236
5237 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5238
5239 virtual bool ComputeCanDeoptimize() const { return false; }
5240
5241 Value* value() const { return inputs_[0]; }
5242 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5243 return value_representation_;
5244 }
5245
5246 virtual bool AttributesEqual(const Instruction& other) const;
5247
5249
5250#define FIELD_LIST(F) \
5251 F(const uword, lower_) \
5252 F(const uword, upper_) \
5253 F(const Representation, value_representation_)
5254
5256 TemplateComparison,
5257 FIELD_LIST)
5258#undef FIELD_LIST
5259
5260 private:
5261 DISALLOW_COPY_AND_ASSIGN(TestRangeInstr);
5262};
5263
5264class EqualityCompareInstr : public TemplateComparison<2, NoThrow, Pure> {
5265 public:
5266 EqualityCompareInstr(const InstructionSource& source,
5267 Token::Kind kind,
5268 Value* left,
5269 Value* right,
5270 intptr_t cid,
5271 intptr_t deopt_id,
5272 bool null_aware = false,
5273 SpeculativeMode speculative_mode = kGuardInputs)
5274 : TemplateComparison(source, kind, deopt_id),
5275 null_aware_(null_aware),
5276 speculative_mode_(speculative_mode) {
5277 ASSERT(Token::IsEqualityOperator(kind));
5278 SetInputAt(0, left);
5279 SetInputAt(1, right);
5280 set_operation_cid(cid);
5281 }
5282
5283 DECLARE_COMPARISON_INSTRUCTION(EqualityCompare)
5284
5285 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5286
5287 virtual CompileType ComputeType() const;
5288
5289 virtual bool ComputeCanDeoptimize() const { return false; }
5290
5291 bool is_null_aware() const { return null_aware_; }
5292 void set_null_aware(bool value) { null_aware_ = value; }
5293
5294 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5295 ASSERT((idx == 0) || (idx == 1));
5296 if (is_null_aware()) return kTagged;
5297 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
5298 if (operation_cid() == kMintCid) return kUnboxedInt64;
5299 if (operation_cid() == kIntegerCid) return kUnboxedUword;
5300 return kTagged;
5301 }
5302
5303 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5304 return speculative_mode_;
5305 }
5306
5307 virtual bool AttributesEqual(const Instruction& other) const {
5308 return ComparisonInstr::AttributesEqual(other) &&
5309 (null_aware_ == other.AsEqualityCompare()->null_aware_) &&
5310 (speculative_mode_ == other.AsEqualityCompare()->speculative_mode_);
5311 }
5312
5313 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5314
5316
5317#define FIELD_LIST(F) \
5318 F(bool, null_aware_) \
5319 F(const SpeculativeMode, speculative_mode_)
5320
5321 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(EqualityCompareInstr,
5322 TemplateComparison,
5323 FIELD_LIST)
5324#undef FIELD_LIST
5325
5326 private:
5327 DISALLOW_COPY_AND_ASSIGN(EqualityCompareInstr);
5328};
5329
5330class RelationalOpInstr : public TemplateComparison<2, NoThrow, Pure> {
5331 public:
5332 RelationalOpInstr(const InstructionSource& source,
5333 Token::Kind kind,
5334 Value* left,
5335 Value* right,
5336 intptr_t cid,
5337 intptr_t deopt_id,
5338 SpeculativeMode speculative_mode = kGuardInputs)
5339 : TemplateComparison(source, kind, deopt_id),
5340 speculative_mode_(speculative_mode) {
5341 ASSERT(Token::IsRelationalOperator(kind));
5342 SetInputAt(0, left);
5343 SetInputAt(1, right);
5344 set_operation_cid(cid);
5345 }
5346
5347 DECLARE_COMPARISON_INSTRUCTION(RelationalOp)
5348
5349 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
5350
5351 virtual CompileType ComputeType() const;
5352
5353 virtual bool ComputeCanDeoptimize() const { return false; }
5354
5355 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
5356 ASSERT((idx == 0) || (idx == 1));
5357 if (operation_cid() == kDoubleCid) return kUnboxedDouble;
5358 if (operation_cid() == kMintCid) return kUnboxedInt64;
5359 return kTagged;
5360 }
5361
5362 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
5363 return speculative_mode_;
5364 }
5365
5366 virtual bool AttributesEqual(const Instruction& other) const {
5367 return ComparisonInstr::AttributesEqual(other) &&
5368 (speculative_mode_ == other.AsRelationalOp()->speculative_mode_);
5369 }
5370
5372
5373#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
5374
5376 TemplateComparison,
5377 FIELD_LIST)
5378#undef FIELD_LIST
5379
5380 private:
5381 DISALLOW_COPY_AND_ASSIGN(RelationalOpInstr);
5382};
5383
5384// TODO(vegorov): ComparisonInstr should be switched to use IfTheElseInstr for
5385// materialization of true and false constants.
5386class IfThenElseInstr : public Definition {
5387 public:
5388 IfThenElseInstr(ComparisonInstr* comparison,
5389 Value* if_true,
5390 Value* if_false,
5391 intptr_t deopt_id)
5392 : Definition(deopt_id),
5393 comparison_(comparison),
5394 if_true_(Smi::Cast(if_true->BoundConstant()).Value()),
5395 if_false_(Smi::Cast(if_false->BoundConstant()).Value()) {
5396 // Adjust uses at the comparison.
5397 ASSERT(comparison->env() == nullptr);
5398 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
5399 comparison->InputAt(i)->set_instruction(this);
5400 }
5401 }
5402
5403 // Returns true if this combination of comparison and values flowing on
5404 // the true and false paths is supported on the current platform.
5405 static bool Supports(ComparisonInstr* comparison, Value* v1, Value* v2);
5406
5407 DECLARE_INSTRUCTION(IfThenElse)
5408
5409 intptr_t InputCount() const { return comparison()->InputCount(); }
5410
5411 Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
5412
5413 virtual bool ComputeCanDeoptimize() const {
5414 return comparison()->ComputeCanDeoptimize();
5415 }
5416
5417 virtual bool CanBecomeDeoptimizationTarget() const {
5418 return comparison()->CanBecomeDeoptimizationTarget();
5419 }
5420
5421 virtual intptr_t DeoptimizationTarget() const {
5422 return comparison()->DeoptimizationTarget();
5423 }
5424
5425 virtual Representation RequiredInputRepresentation(intptr_t i) const {
5426 return comparison()->RequiredInputRepresentation(i);
5427 }
5428
5429 virtual CompileType ComputeType() const;
5430
5431 virtual void InferRange(RangeAnalysis* analysis, Range* range);
5432
5433 ComparisonInstr* comparison() const { return comparison_; }
5434 intptr_t if_true() const { return if_true_; }
5435 intptr_t if_false() const { return if_false_; }
5436
5437 virtual bool AllowsCSE() const { return comparison()->AllowsCSE(); }
5438 virtual bool HasUnknownSideEffects() const {
5439 return comparison()->HasUnknownSideEffects();
5440 }
5441 virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
5442
5443 virtual bool AttributesEqual(const Instruction& other) const {
5444 auto const other_if_then_else = other.AsIfThenElse();
5445 return (comparison()->tag() == other_if_then_else->comparison()->tag()) &&
5446 comparison()->AttributesEqual(*other_if_then_else->comparison()) &&
5447 (if_true_ == other_if_then_else->if_true_) &&
5448 (if_false_ == other_if_then_else->if_false_);
5449 }
5450
5451 virtual bool MayThrow() const { return comparison()->MayThrow(); }
5452
5453 virtual void CopyDeoptIdFrom(const Instruction& instr) {
5454 Definition::CopyDeoptIdFrom(instr);
5455 comparison()->CopyDeoptIdFrom(instr);
5456 }
5457
5459
5460#define FIELD_LIST(F) \
5461 F(ComparisonInstr*, comparison_) \
5462 F(const intptr_t, if_true_) \
5463 F(const intptr_t, if_false_)
5464
5466 Definition,
5467 FIELD_LIST)
5468#undef FIELD_LIST
5470
5471 private:
5472 virtual void RawSetInputAt(intptr_t i, Value* value) {
5473 comparison()->RawSetInputAt(i, value);
5474 }
5475
5476 DISALLOW_COPY_AND_ASSIGN(IfThenElseInstr);
5477};
5478
5479class StaticCallInstr : public TemplateDartCall<0> {
5480 public:
5481 StaticCallInstr(const InstructionSource& source,
5482 const Function& function,
5483 intptr_t type_args_len,
5484 const Array& argument_names,
5485 InputsArray&& arguments,
5486 const ZoneGrowableArray<const ICData*>& ic_data_array,
5487 intptr_t deopt_id,
5488 ICData::RebindRule rebind_rule)
5489 : TemplateDartCall(deopt_id,
5490 type_args_len,
5491 argument_names,
5492 std::move(arguments),
5493 source),
5494 ic_data_(GetICData(ic_data_array, deopt_id, /*is_static_call=*/true)),
5495 call_count_(0),
5496 function_(function),
5497 rebind_rule_(rebind_rule),
5498 result_type_(nullptr),
5499 is_known_list_constructor_(false),
5500 entry_kind_(Code::EntryKind::kNormal),
5501 identity_(AliasIdentity::Unknown()) {
5502 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5503 ASSERT(!function.IsNull());
5504 }
5505
5506 StaticCallInstr(const InstructionSource& source,
5507 const Function& function,
5508 intptr_t type_args_len,
5509 const Array& argument_names,
5510 InputsArray&& arguments,
5511 intptr_t deopt_id,
5512 intptr_t call_count,
5513 ICData::RebindRule rebind_rule)
5514 : TemplateDartCall(deopt_id,
5515 type_args_len,
5516 argument_names,
5517 std::move(arguments),
5518 source),
5519 ic_data_(nullptr),
5520 call_count_(call_count),
5521 function_(function),
5522 rebind_rule_(rebind_rule),
5523 result_type_(nullptr),
5524 is_known_list_constructor_(false),
5525 entry_kind_(Code::EntryKind::kNormal),
5526 identity_(AliasIdentity::Unknown()) {
5527 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5528 ASSERT(!function.IsNull());
5529 }
5530
5531 // Generate a replacement call instruction for an instance call which
5532 // has been found to have only one target.
5533 template <class C>
5534 static StaticCallInstr* FromCall(Zone* zone,
5535 const C* call,
5536 const Function& target,
5537 intptr_t call_count) {
5538 ASSERT(!call->HasMoveArguments());
5539 InputsArray args(zone, call->ArgumentCount());
5540 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
5541 args.Add(call->ArgumentValueAt(i)->CopyWithType());
5542 }
5543 StaticCallInstr* new_call = new (zone) StaticCallInstr(
5544 call->source(), target, call->type_args_len(), call->argument_names(),
5545 std::move(args), call->deopt_id(), call_count, ICData::kNoRebind);
5546 if (call->result_type() != nullptr) {
5547 new_call->result_type_ = call->result_type();
5548 }
5549 new_call->set_entry_kind(call->entry_kind());
5550 return new_call;
5551 }
5552
5553 // ICData for static calls carries call count.
5554 const ICData* ic_data() const { return ic_data_; }
5555 bool HasICData() const {
5556 return (ic_data() != nullptr) && !ic_data()->IsNull();
5557 }
5558
5559 void set_ic_data(const ICData* value) { ic_data_ = value; }
5560
5561 DECLARE_INSTRUCTION(StaticCall)
5562 DECLARE_ATTRIBUTE(&function())
5563
5564 virtual CompileType ComputeType() const;
5565 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5566 bool Evaluate(FlowGraph* flow_graph, const Object& argument, Object* result);
5567 bool Evaluate(FlowGraph* flow_graph,
5568 const Object& argument1,
5569 const Object& argument2,
5570 Object* result);
5571
5572 // Accessors forwarded to the AST node.
5573 const Function& function() const { return function_; }
5574
5575 virtual intptr_t CallCount() const {
5576 return ic_data() == nullptr ? call_count_ : ic_data()->AggregateCount();
5577 }
5578
5579 virtual bool ComputeCanDeoptimize() const { return false; }
5580 virtual bool ComputeCanDeoptimizeAfterCall() const {
5581 return !CompilerState::Current().is_aot();
5582 }
5583
5584 virtual bool CanBecomeDeoptimizationTarget() const {
5585 // Static calls that are specialized by the optimizer (e.g. sqrt) need a
5586 // deoptimization descriptor before the call.
5587 return true;
5588 }
5589
5590 virtual bool HasUnknownSideEffects() const { return true; }
5591 virtual bool CanCallDart() const { return true; }
5592
5593 // Initialize result type of this call instruction if target is a recognized
5594 // method or has pragma annotation.
5595 // Returns true on success, false if result type is still unknown.
5596 bool InitResultType(Zone* zone);
5597
5598 void SetResultType(Zone* zone, CompileType new_type) {
5599 result_type_ = new (zone) CompileType(new_type);
5600 }
5601
5602 CompileType* result_type() const { return result_type_; }
5603
5604 intptr_t result_cid() const {
5605 if (result_type_ == nullptr) {
5606 return kDynamicCid;
5607 }
5608 return result_type_->ToCid();
5609 }
5610
5611 bool is_known_list_constructor() const { return is_known_list_constructor_; }
5612 void set_is_known_list_constructor(bool value) {
5613 is_known_list_constructor_ = value;
5614 }
5615
5616 Code::EntryKind entry_kind() const { return entry_kind_; }
5617
5618 void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
5619
5620 bool IsRecognizedFactory() const { return is_known_list_constructor(); }
5621
5622 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
5623 if (type_args_len() > 0 || function().IsFactory()) {
5624 if (idx == 0) {
5625 return kGuardInputs;
5626 }
5627 idx--;
5628 }
5629 return function_.is_unboxed_parameter_at(idx) ? kNotSpeculative
5630 : kGuardInputs;
5631 }
5632
5633 virtual intptr_t ArgumentsSize() const;
5634
5635 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5636
5637 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
5638
5639 virtual Representation representation() const;
5640
5641 virtual AliasIdentity Identity() const { return identity_; }
5642 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
5643
5644 const CallTargets& Targets();
5645 const class BinaryFeedback& BinaryFeedback();
5646
5648
5649#define FIELD_LIST(F) \
5650 F(const ICData*, ic_data_) \
5651 F(const intptr_t, call_count_) \
5652 F(const Function&, function_) \
5653 F(const ICData::RebindRule, rebind_rule_) \
5654 /* Known or inferred result type. */ \
5655 F(CompileType*, result_type_) \
5656 /* 'True' for recognized list constructors. */ \
5657 F(bool, is_known_list_constructor_) \
5658 F(Code::EntryKind, entry_kind_) \
5659 F(AliasIdentity, identity_)
5660
5662 TemplateDartCall,
5663 FIELD_LIST)
5664#undef FIELD_LIST
5665
5666 private:
5667 const CallTargets* targets_ = nullptr;
5668 const class BinaryFeedback* binary_ = nullptr;
5669
5670 DISALLOW_COPY_AND_ASSIGN(StaticCallInstr);
5671};
5672
5673// A call to a function which has no side effects and of which the result can
5674// be cached.
5675//
5676// The arguments flowing into this call must be const.
5677//
5678// The result is cached in the pool. Hence this instruction is not supported
5679// on IA32.
5680class CachableIdempotentCallInstr : public TemplateDartCall<0> {
5681 public:
5682 // Instead of inputs to this IL instruction we should pass a
5683 // `GrowableArray<const Object&>` and only push & pop them in the slow path.
5684 // (Right now the inputs are eagerly pushed and therefore have to be also
5685 // poped on the fast path.)
5686 CachableIdempotentCallInstr(const InstructionSource& source,
5687 Representation representation,
5688 const Function& function,
5689 intptr_t type_args_len,
5690 const Array& argument_names,
5691 InputsArray&& arguments,
5692 intptr_t deopt_id);
5693
5694 DECLARE_INSTRUCTION(CachableIdempotentCall)
5695
5696 const Function& function() const { return function_; }
5697
5698 virtual Definition* Canonicalize(FlowGraph* flow_graph);
5699
5700 virtual bool MayCreateUnsafeUntaggedPointer() const {
5701 // Either this is a pragma-annotated function, in which case the result
5702 // is not an untagged address, or it's a call to the FFI resolver, in
5703 // which case the returned value is not GC-movable.
5704 return false;
5705 }
5706
5707 virtual bool ComputeCanDeoptimize() const { return false; }
5708
5709 virtual bool ComputeCanDeoptimizeAfterCall() const { return false; }
5710
5711 virtual bool CanBecomeDeoptimizationTarget() const { return false; }
5712
5713 virtual bool HasUnknownSideEffects() const { return true; }
5714
5715 virtual bool CanCallDart() const { return true; }
5716
5717 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
5718 if (type_args_len() > 0) {
5719 if (idx == 0) {
5720 return kGuardInputs;
5721 }
5722 idx--;
5723 }
5724 return function_.is_unboxed_parameter_at(idx) ? kNotSpeculative
5725 : kGuardInputs;
5726 }
5727
5728 virtual intptr_t ArgumentsSize() const;
5729
5730 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
5731
5732 virtual Representation representation() const { return representation_; }
5733
5734 virtual AliasIdentity Identity() const { return identity_; }
5735 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
5736
5738
5739#define FIELD_LIST(F) \
5740 F(const Representation, representation_) \
5741 F(const Function&, function_) \
5742 F(AliasIdentity, identity_)
5743
5744 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CachableIdempotentCallInstr,
5745 TemplateDartCall,
5746 FIELD_LIST)
5747#undef FIELD_LIST
5748
5749 private:
5750 DISALLOW_COPY_AND_ASSIGN(CachableIdempotentCallInstr);
5751};
5752
5753class LoadLocalInstr : public TemplateDefinition<0, NoThrow> {
5754 public:
5755 LoadLocalInstr(const LocalVariable& local, const InstructionSource& source)
5756 : TemplateDefinition(source),
5757 local_(local),
5758 is_last_(false),
5759 token_pos_(source.token_pos) {}
5760
5761 DECLARE_INSTRUCTION(LoadLocal)
5762 virtual CompileType ComputeType() const;
5763
5764 const LocalVariable& local() const { return local_; }
5765
5766 virtual bool ComputeCanDeoptimize() const { return false; }
5767
5768 virtual bool HasUnknownSideEffects() const {
5769 UNREACHABLE(); // Eliminated by SSA construction.
5770 return false;
5771 }
5772
5773 void mark_last() { is_last_ = true; }
5774 bool is_last() const { return is_last_; }
5775
5776 virtual TokenPosition token_pos() const { return token_pos_; }
5777
5779
5780#define FIELD_LIST(F) \
5781 F(const LocalVariable&, local_) \
5782 F(bool, is_last_) \
5783 F(const TokenPosition, token_pos_)
5784
5786 TemplateDefinition,
5787 FIELD_LIST)
5788#undef FIELD_LIST
5789
5790 private:
5791 DISALLOW_COPY_AND_ASSIGN(LoadLocalInstr);
5792};
5793
5794class DropTempsInstr : public Definition {
5795 public:
5796 DropTempsInstr(intptr_t num_temps, Value* value)
5797 : num_temps_(num_temps), has_input_(value != nullptr) {
5798 if (has_input_) {
5799 SetInputAt(0, value);
5800 }
5801 }
5802
5803 DECLARE_INSTRUCTION(DropTemps)
5804
5805 virtual intptr_t InputCount() const { return has_input_ ? 1 : 0; }
5806 virtual Value* InputAt(intptr_t i) const {
5807 ASSERT(has_input_ && (i == 0));
5808 return value_;
5809 }
5810
5811 Value* value() const { return value_; }
5812
5813 intptr_t num_temps() const { return num_temps_; }
5814
5815 virtual CompileType ComputeType() const;
5816
5817 virtual bool ComputeCanDeoptimize() const { return false; }
5818
5819 virtual bool HasUnknownSideEffects() const {
5820 UNREACHABLE(); // Eliminated by SSA construction.
5821 return false;
5822 }
5823
5824 virtual bool MayThrow() const { return false; }
5825
5826 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
5827
5829
5830#define FIELD_LIST(F) \
5831 F(const intptr_t, num_temps_) \
5832 F(const bool, has_input_)
5833
5835 Definition,
5836 FIELD_LIST)
5837#undef FIELD_LIST
5838
5839 private:
5840 virtual void RawSetInputAt(intptr_t i, Value* value) {
5841 ASSERT(has_input_);
5842 value_ = value;
5843 }
5844
5845 Value* value_ = nullptr;
5846
5847 DISALLOW_COPY_AND_ASSIGN(DropTempsInstr);
5848};
5849
5850// This instruction is used to reserve a space on the expression stack
5851// that later would be filled with StoreLocal. Reserved space would be
5852// filled with a null value initially.
5853//
5854// Note: One must not use Constant(#null) to reserve expression stack space
5855// because it would lead to an incorrectly compiled unoptimized code. Graph
5856// builder would set Constant(#null) as an input definition to the instruction
5857// that consumes this value from the expression stack - not knowing that
5858// this value represents a placeholder - which might lead issues if instruction
5859// has specialization for constant inputs (see https://dartbug.com/33195).
5860class MakeTempInstr : public TemplateDefinition<0, NoThrow, Pure> {
5861 public:
5862 explicit MakeTempInstr(Zone* zone)
5863 : null_(new(zone) ConstantInstr(Object::ZoneHandle())) {
5864 // Note: We put ConstantInstr inside MakeTemp to simplify code generation:
5865 // having ConstantInstr allows us to use Location::Constant(null_) as an
5866 // output location for this instruction.
5867 }
5868
5869 DECLARE_INSTRUCTION(MakeTemp)
5870
5871 virtual bool ComputeCanDeoptimize() const { return false; }
5872
5873 virtual bool HasUnknownSideEffects() const {
5874 UNREACHABLE(); // Eliminated by SSA construction.
5875 return false;
5876 }
5877
5878 virtual bool MayThrow() const { return false; }
5879
5880 virtual TokenPosition token_pos() const { return TokenPosition::kTempMove; }
5881
5883
5884#define FIELD_LIST(F) F(ConstantInstr*, null_)
5885
5887 TemplateDefinition,
5888 FIELD_LIST)
5889#undef FIELD_LIST
5891
5892 private:
5893 DISALLOW_COPY_AND_ASSIGN(MakeTempInstr);
5894};
5895
5896class StoreLocalInstr : public TemplateDefinition<1, NoThrow> {
5897 public:
5898 StoreLocalInstr(const LocalVariable& local,
5899 Value* value,
5900 const InstructionSource& source)
5901 : TemplateDefinition(source),
5902 local_(local),
5903 is_dead_(false),
5904 is_last_(false),
5905 token_pos_(source.token_pos) {
5906 SetInputAt(0, value);
5907 }
5908
5909 DECLARE_INSTRUCTION(StoreLocal)
5910 virtual CompileType ComputeType() const;
5911
5912 const LocalVariable& local() const { return local_; }
5913 Value* value() const { return inputs_[0]; }
5914
5915 virtual bool ComputeCanDeoptimize() const { return false; }
5916
5917 void mark_dead() { is_dead_ = true; }
5918 bool is_dead() const { return is_dead_; }
5919
5920 void mark_last() { is_last_ = true; }
5921 bool is_last() const { return is_last_; }
5922
5923 virtual bool HasUnknownSideEffects() const {
5924 UNREACHABLE(); // Eliminated by SSA construction.
5925 return false;
5926 }
5927
5928 virtual TokenPosition token_pos() const { return token_pos_; }
5929
5931
5932#define FIELD_LIST(F) \
5933 F(const LocalVariable&, local_) \
5934 F(bool, is_dead_) \
5935 F(bool, is_last_) \
5936 F(const TokenPosition, token_pos_)
5937
5939 TemplateDefinition,
5940 FIELD_LIST)
5941#undef FIELD_LIST
5942
5943 private:
5944 DISALLOW_COPY_AND_ASSIGN(StoreLocalInstr);
5945};
5946
5947class NativeCallInstr : public TemplateDartCall<0> {
5948 public:
5949 NativeCallInstr(const String& name,
5950 const Function& function,
5951 bool link_lazily,
5952 const InstructionSource& source,
5953 InputsArray&& args)
5954 : TemplateDartCall(DeoptId::kNone,
5955 0,
5956 Array::null_array(),
5957 std::move(args),
5958 source),
5959 native_name_(name),
5960 function_(function),
5961 token_pos_(source.token_pos),
5962 link_lazily_(link_lazily) {
5963 DEBUG_ASSERT(name.IsNotTemporaryScopedHandle());
5964 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5965 // +1 for return value placeholder.
5966 ASSERT(ArgumentCount() ==
5967 function.NumParameters() + (function.IsGeneric() ? 1 : 0) + 1);
5968 }
5969
5970 DECLARE_INSTRUCTION(NativeCall)
5971
5972 const String& native_name() const { return native_name_; }
5973 const Function& function() const { return function_; }
5974 NativeFunction native_c_function() const { return native_c_function_; }
5975 bool is_bootstrap_native() const { return is_bootstrap_native_; }
5976 bool is_auto_scope() const { return is_auto_scope_; }
5977 bool link_lazily() const { return link_lazily_; }
5978 virtual TokenPosition token_pos() const { return token_pos_; }
5979
5980 virtual bool ComputeCanDeoptimize() const { return false; }
5981
5982 virtual bool HasUnknownSideEffects() const { return true; }
5983
5984 // Always creates an exit frame before more Dart code can be called.
5985 virtual bool CanCallDart() const { return false; }
5986
5987 void SetupNative();
5988
5990
5991#define FIELD_LIST(F) \
5992 F(const String&, native_name_) \
5993 F(const Function&, function_) \
5994 F(const TokenPosition, token_pos_)
5995
5997 TemplateDartCall,
5998 FIELD_LIST)
5999#undef FIELD_LIST
6000
6001 private:
6002 void set_native_c_function(NativeFunction value) {
6003 native_c_function_ = value;
6004 }
6005
6006 void set_is_bootstrap_native(bool value) { is_bootstrap_native_ = value; }
6007 void set_is_auto_scope(bool value) { is_auto_scope_ = value; }
6008
6009 // These fields are not serialized.
6010 // IL serialization only supports lazy linking of native functions.
6011 NativeFunction native_c_function_ = nullptr;
6012 bool is_bootstrap_native_ = false;
6013 bool is_auto_scope_ = true;
6014 bool link_lazily_ = true;
6015
6016 DISALLOW_COPY_AND_ASSIGN(NativeCallInstr);
6017};
6018
6019// Performs a call to native C code. In contrast to NativeCall, the arguments
6020// are unboxed and passed through the native calling convention. However, not
6021// all dart objects can be passed as arguments. Please see the FFI documentation
6022// for more details.
6023//
6024// Arguments to FfiCallInstr:
6025// - The arguments to the native call, marshalled in IL as far as possible.
6026// - The argument address.
6027// - A TypedData for the return value to populate in machine code (optional).
6028class FfiCallInstr : public VariadicDefinition {
6029 public:
6030 FfiCallInstr(intptr_t deopt_id,
6031 const compiler::ffi::CallMarshaller& marshaller,
6032 bool is_leaf,
6033 InputsArray&& inputs)
6034 : VariadicDefinition(std::move(inputs), deopt_id),
6035 marshaller_(marshaller),
6036 is_leaf_(is_leaf) {
6037#if defined(DEBUG)
6038 ASSERT_EQUAL(InputCount(), InputCountForMarshaller(marshaller));
6039 // No argument to an FfiCall should be an unsafe untagged pointer,
6040 // including the target address.
6041 for (intptr_t i = 0; i < InputCount(); i++) {
6042 ASSERT(!InputAt(i)->definition()->MayCreateUnsafeUntaggedPointer());
6043 }
6044#endif
6045 }
6046
6047 DECLARE_INSTRUCTION(FfiCall)
6048
6049 // Input index of the function pointer to invoke.
6050 intptr_t TargetAddressIndex() const {
6051 return marshaller_.NumArgumentDefinitions();
6052 }
6053
6054 // Input index of the typed data to populate if return value is struct.
6055 intptr_t CompoundReturnTypedDataIndex() const {
6056 ASSERT(marshaller_.ReturnsCompound());
6057 return marshaller_.NumArgumentDefinitions() + 1;
6058 }
6059
6060 virtual bool MayThrow() const {
6061 // By Dart_PropagateError.
6062 return true;
6063 }
6064
6065 virtual bool MayCreateUnsafeUntaggedPointer() const {
6066 // The only case where we have an untagged result is when the return
6067 // value is a pointer, which is then stored in a newly allocated FFI
6068 // Pointer object by the generated IL, so the C code must return an
6069 // external (not GC-movable) address to Dart.
6070 return false;
6071 }
6072
6073 // FfiCallInstr calls C code, which can call back into Dart.
6074 virtual bool ComputeCanDeoptimize() const { return false; }
6075 virtual bool ComputeCanDeoptimizeAfterCall() const {
6076 return !CompilerState::Current().is_aot();
6077 }
6078
6079 virtual bool HasUnknownSideEffects() const { return true; }
6080
6081 // Always creates an exit frame before more Dart code can be called.
6082 virtual bool CanCallDart() const { return false; }
6083
6084 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
6085 virtual Representation representation() const;
6086
6087 // Returns true if we can assume generated code will be executable during a
6088 // safepoint.
6089 //
6090 // TODO(#37739): This should be true when dual-mapping is enabled as well, but
6091 // there are some bugs where it still switches code protections currently.
6092 static bool CanExecuteGeneratedCodeInSafepoint() {
6093 return FLAG_precompiled_mode;
6094 }
6095
6096 static intptr_t InputCountForMarshaller(
6097 const compiler::ffi::CallMarshaller& marshaller) {
6098 return marshaller.NumArgumentDefinitions() + 1 +
6099 (marshaller.ReturnsCompound() ? 1 : 0);
6100 }
6101
6103
6104#define FIELD_LIST(F) \
6105 F(const compiler::ffi::CallMarshaller&, marshaller_) \
6106 F(bool, is_leaf_)
6107
6109 VariadicDefinition,
6110 FIELD_LIST)
6111#undef FIELD_LIST
6112
6113 private:
6114 LocationSummary* MakeLocationSummaryInternal(Zone* zone,
6115 bool is_optimizing,
6116 const RegList temps) const;
6117
6118 // Clobbers the first two given registers.
6119 // `saved_fp` is used as the frame base to rebase off of.
6120 // `temp1` is only used in case of PointerToMemoryLocation.
6121 void EmitParamMoves(FlowGraphCompiler* compiler,
6122 const Register saved_fp,
6123 const Register temp0,
6124 const Register temp1);
6125 // Clobbers both given temp registers.
6126 void EmitReturnMoves(FlowGraphCompiler* compiler,
6127 const Register temp0,
6128 const Register temp1);
6129
6130 DISALLOW_COPY_AND_ASSIGN(FfiCallInstr);
6131};
6132
6133// Has the target address in a register passed as the last input in IL.
6134class LeafRuntimeCallInstr : public VariadicDefinition {
6135 public:
6136 static LeafRuntimeCallInstr* Make(
6137 Zone* zone,
6138 Representation return_representation,
6139 const ZoneGrowableArray<Representation>& argument_representations,
6140 InputsArray&& inputs);
6141
6142 DECLARE_INSTRUCTION(LeafRuntimeCall)
6143
6144 LocationSummary* MakeLocationSummaryInternal(Zone* zone,
6145 const RegList temps) const;
6146
6147 // Input index of the function pointer to invoke.
6148 intptr_t TargetAddressIndex() const {
6149 return argument_representations_.length();
6150 }
6151
6152 virtual bool MayThrow() const { return false; }
6153
6154 virtual bool ComputeCanDeoptimize() const { return false; }
6155
6156 virtual bool HasUnknownSideEffects() const { return true; }
6157
6158 virtual bool CanCallDart() const { return false; }
6159
6160 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6161 if (idx < argument_representations_.length()) {
6162 return argument_representations_.At(idx);
6163 }
6164 ASSERT_EQUAL(idx, TargetAddressIndex());
6165 return kUntagged;
6166 }
6167
6168 virtual bool MayCreateUnsafeUntaggedPointer() const {
6169 if (representation() != kUntagged) return false;
6170 // Returns true iff any of the inputs to the target may be an unsafe
6171 // untagged pointer.
6172 //
6173 // This assumes that the inputs to the target function are only used during
6174 // the dynamic extent of the call and not cached/stored somehow.
6175 for (intptr_t i = 0; i < TargetAddressIndex(); i++) {
6176 if (InputAt(i)->definition()->MayCreateUnsafeUntaggedPointer()) {
6177 return true;
6178 }
6179 }
6180 return false;
6181 }
6182
6183 virtual Representation representation() const {
6184 return return_representation_;
6185 }
6186
6187 virtual CompileType ComputeType() const {
6188 return RepresentationUtils::IsUnboxed(representation())
6189 ? CompileType::FromUnboxedRepresentation(representation())
6190 : CompileType::Object();
6191 }
6192
6193 void EmitParamMoves(FlowGraphCompiler* compiler,
6194 Register saved_fp,
6195 Register temp0);
6196
6198
6199 DECLARE_CUSTOM_SERIALIZATION(LeafRuntimeCallInstr)
6200
6201 private:
6202 LeafRuntimeCallInstr(
6203 Representation return_representation,
6204 const ZoneGrowableArray<Representation>& argument_representations,
6205 const compiler::ffi::NativeCallingConvention& native_calling_convention,
6206 InputsArray&& inputs);
6207
6208 // Serialized in the custom serializer.
6209 const Representation return_representation_;
6210 const ZoneGrowableArray<Representation>& argument_representations_;
6211 // Not serialized.
6212 const compiler::ffi::NativeCallingConvention& native_calling_convention_;
6213 DISALLOW_COPY_AND_ASSIGN(LeafRuntimeCallInstr);
6214};
6215
6216class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
6217 public:
6218 DebugStepCheckInstr(const InstructionSource& source,
6219 UntaggedPcDescriptors::Kind stub_kind,
6220 intptr_t deopt_id)
6221 : TemplateInstruction(source, deopt_id),
6222 token_pos_(source.token_pos),
6223 stub_kind_(stub_kind) {}
6224
6225 DECLARE_INSTRUCTION(DebugStepCheck)
6226
6227 virtual TokenPosition token_pos() const { return token_pos_; }
6228 virtual bool ComputeCanDeoptimize() const { return false; }
6229 virtual bool HasUnknownSideEffects() const { return true; }
6230 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6231
6232#define FIELD_LIST(F) \
6233 F(const TokenPosition, token_pos_) \
6234 F(const UntaggedPcDescriptors::Kind, stub_kind_)
6235
6236 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DebugStepCheckInstr,
6237 TemplateInstruction,
6238 FIELD_LIST)
6239#undef FIELD_LIST
6240
6241 private:
6242 DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
6243};
6244
6245enum class InnerPointerAccess {
6249};
6250
6252
6253// StoreField instruction represents a store of the given [value] into
6254// the specified [slot] on the [instance] object. [emit_store_barrier] allows to
6255// specify whether the store should omit the write barrier. [kind] specifies
6256// whether this store is an initializing store, i.e. the first store into a
6257// field after the allocation.
6258//
6259// In JIT mode a slot might be a subject to the field unboxing optimization:
6260// if field type profiling shows that this slot always contains a double or SIMD
6261// value then this field becomes "unboxed" - in this case when storing into
6262// such field we update the payload of the box referenced by the field, rather
6263// than updating the field itself.
6264//
6265// Note: even if [emit_store_barrier] is set to [kEmitStoreBarrier] the store
6266// can still omit the barrier if it establishes that it is not needed.
6267//
6268// Note: stores generated from the constructor initializer list and from
6269// field initializers *must* be marked as initializing. Initializing stores
6270// into unboxed fields are responsible for allocating the mutable box which
6271// would be mutated by subsequent stores.
6272//
6273// Note: If the value to store is an unboxed derived pointer (e.g. pointer to
6274// start of internal typed data array backing) then this instruction cannot be
6275// moved across instructions which can trigger GC, to ensure that
6276//
6277// LoadUntagged + Arithmetic + StoreField
6278//
6279// are performed as an effectively atomic set of instructions.
6280//
6281// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
6282class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
6283 public:
6284 enum class Kind {
6285 // Store is known to be the first store into a slot of an object after
6286 // object was allocated and before it escapes (e.g. stores in constructor
6287 // initializer list).
6288 kInitializing,
6289
6290 // All other stores.
6291 kOther,
6292 };
6293
6294 StoreFieldInstr(const Slot& slot,
6295 Value* instance,
6296 Value* value,
6297 StoreBarrierType emit_store_barrier,
6298 InnerPointerAccess stores_inner_pointer,
6299 const InstructionSource& source,
6300 Kind kind = Kind::kOther,
6301 compiler::Assembler::MemoryOrder memory_order =
6302 compiler::Assembler::kRelaxedNonAtomic)
6303 : TemplateInstruction(source),
6304 slot_(slot),
6305 emit_store_barrier_(emit_store_barrier),
6306 memory_order_(memory_order),
6307 token_pos_(source.token_pos),
6308 is_initialization_(kind == Kind::kInitializing),
6309 stores_inner_pointer_(stores_inner_pointer) {
6310 switch (stores_inner_pointer) {
6311 case InnerPointerAccess::kNotUntagged:
6312 ASSERT(slot.representation() != kUntagged);
6313 break;
6314 case InnerPointerAccess::kMayBeInnerPointer:
6315 ASSERT(slot.representation() == kUntagged);
6316 ASSERT(slot.may_contain_inner_pointer());
6317 break;
6318 case InnerPointerAccess::kCannotBeInnerPointer:
6319 ASSERT(slot.representation() == kUntagged);
6320 break;
6321 }
6322 SetInputAt(kInstancePos, instance);
6323 SetInputAt(kValuePos, value);
6324 }
6325
6326 // Convenience constructor for slots not containing an untagged address.
6327 StoreFieldInstr(const Slot& slot,
6328 Value* instance,
6329 Value* value,
6330 StoreBarrierType emit_store_barrier,
6331 const InstructionSource& source,
6332 Kind kind = Kind::kOther,
6333 compiler::Assembler::MemoryOrder memory_order =
6334 compiler::Assembler::kRelaxedNonAtomic)
6335 : StoreFieldInstr(slot,
6336 instance,
6337 value,
6338 emit_store_barrier,
6340 source,
6341 kind,
6342 memory_order) {}
6343
6344 // Convenience constructor that looks up an IL Slot for the given [field].
6345 StoreFieldInstr(const Field& field,
6346 Value* instance,
6347 Value* value,
6348 StoreBarrierType emit_store_barrier,
6349 const InstructionSource& source,
6350 const ParsedFunction* parsed_function,
6351 Kind kind = Kind::kOther)
6352 : StoreFieldInstr(Slot::Get(field, parsed_function),
6353 instance,
6354 value,
6355 emit_store_barrier,
6356 source,
6357 kind) {}
6358
6359 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6360 // Slots are unboxed based on statically inferrable type information.
6361 // Either sound non-nullable static types (JIT) or global type flow analysis
6362 // results (AOT).
6363 return slot().representation() != kTagged ? kNotSpeculative : kGuardInputs;
6364 }
6365
6366 DECLARE_INSTRUCTION(StoreField)
6367 DECLARE_ATTRIBUTES_NAMED(("slot", "is_initialization"),
6368 (&slot(), is_initialization()))
6369
6370 enum { kInstancePos = 0, kValuePos = 1 };
6371
6372 Value* instance() const { return inputs_[kInstancePos]; }
6373 const Slot& slot() const { return slot_; }
6374 Value* value() const { return inputs_[kValuePos]; }
6375
6376 virtual TokenPosition token_pos() const { return token_pos_; }
6377 bool is_initialization() const { return is_initialization_; }
6378
6379 bool ShouldEmitStoreBarrier() const {
6380 if (slot().has_untagged_instance()) {
6381 // The instance is not a Dart object, so not traversed by the GC.
6382 return false;
6383 }
6384 if (slot().representation() != kTagged) {
6385 // The target field is native and unboxed, so not traversed by the GC.
6386 return false;
6387 }
6388 if (instance()->definition() == value()->definition()) {
6389 // `x.slot = x` cannot create an old->new or old&marked->old&unmarked
6390 // reference.
6391 return false;
6392 }
6393
6394 if (value()->definition()->Type()->IsBool()) {
6395 return false;
6396 }
6397 return value()->NeedsWriteBarrier() &&
6398 (emit_store_barrier_ == kEmitStoreBarrier);
6399 }
6400
6401 void set_emit_store_barrier(StoreBarrierType value) {
6402 emit_store_barrier_ = value;
6403 }
6404
6405 InnerPointerAccess stores_inner_pointer() const {
6406 return stores_inner_pointer_;
6407 }
6408 void set_stores_inner_pointer(InnerPointerAccess value) {
6409 // We should never change this for a non-untagged field.
6410 ASSERT(stores_inner_pointer_ != InnerPointerAccess::kNotUntagged);
6411 // We only convert from may to cannot, never the other direction.
6412 ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
6413 stores_inner_pointer_ = value;
6414 }
6415
6416 virtual bool CanTriggerGC() const { return false; }
6417
6418 virtual bool ComputeCanDeoptimize() const { return false; }
6419
6420 // May require a deoptimization target for input conversions.
6421 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
6422
6423 // Currently CSE/LICM don't operate on any instructions that can be affected
6424 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
6425 // are marked as having no side-effects.
6426 virtual bool HasUnknownSideEffects() const { return false; }
6427
6428 virtual bool MayHaveVisibleEffect() const { return true; }
6429
6430 virtual Representation RequiredInputRepresentation(intptr_t index) const;
6431
6432 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6433
6435
6436#define FIELD_LIST(F) \
6437 F(const Slot&, slot_) \
6438 F(StoreBarrierType, emit_store_barrier_) \
6439 F(compiler::Assembler::MemoryOrder, memory_order_) \
6440 F(const TokenPosition, token_pos_) \
6441 /* Marks initializing stores. E.g. in the constructor. */ \
6442 F(const bool, is_initialization_) \
6443 F(InnerPointerAccess, stores_inner_pointer_)
6444
6446 TemplateInstruction,
6447 FIELD_LIST)
6448#undef FIELD_LIST
6449
6450 private:
6451 friend class JitCallSpecializer; // For ASSERT(initialization_).
6452
6453 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
6454
6455 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
6456 // Write barrier is skipped for nullable and non-nullable smis.
6457 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
6458 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
6459 : compiler::Assembler::kValueIsNotSmi;
6460 }
6461
6462 DISALLOW_COPY_AND_ASSIGN(StoreFieldInstr);
6463};
6464
6465class GuardFieldInstr : public TemplateInstruction<1, NoThrow, Pure> {
6466 public:
6467 GuardFieldInstr(Value* value, const Field& field, intptr_t deopt_id)
6468 : TemplateInstruction(deopt_id), field_(field) {
6469 SetInputAt(0, value);
6470 CheckField(field);
6471 }
6472
6473 Value* value() const { return inputs_[0]; }
6474
6475 const Field& field() const { return field_; }
6476
6477 virtual bool ComputeCanDeoptimize() const { return true; }
6478 virtual bool CanBecomeDeoptimizationTarget() const {
6479 // Ensure that we record kDeopt PC descriptor in unoptimized code.
6480 return true;
6481 }
6482
6484
6485#define FIELD_LIST(F) F(const Field&, field_)
6486
6488 TemplateInstruction,
6489 FIELD_LIST)
6490#undef FIELD_LIST
6491
6492 private:
6493 DISALLOW_COPY_AND_ASSIGN(GuardFieldInstr);
6494};
6495
6496class GuardFieldClassInstr : public GuardFieldInstr {
6497 public:
6498 GuardFieldClassInstr(Value* value, const Field& field, intptr_t deopt_id)
6499 : GuardFieldInstr(value, field, deopt_id) {
6500 CheckField(field);
6501 }
6502
6503 DECLARE_INSTRUCTION(GuardFieldClass)
6504
6505 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6506
6507 virtual bool AttributesEqual(const Instruction& other) const;
6508
6509 DECLARE_EMPTY_SERIALIZATION(GuardFieldClassInstr, GuardFieldInstr)
6510
6511 private:
6512 DISALLOW_COPY_AND_ASSIGN(GuardFieldClassInstr);
6513};
6514
6515class GuardFieldLengthInstr : public GuardFieldInstr {
6516 public:
6517 GuardFieldLengthInstr(Value* value, const Field& field, intptr_t deopt_id)
6518 : GuardFieldInstr(value, field, deopt_id) {
6519 CheckField(field);
6520 }
6521
6522 DECLARE_INSTRUCTION(GuardFieldLength)
6523
6524 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6525
6526 virtual bool AttributesEqual(const Instruction& other) const;
6527
6528 DECLARE_EMPTY_SERIALIZATION(GuardFieldLengthInstr, GuardFieldInstr)
6529
6530 private:
6531 DISALLOW_COPY_AND_ASSIGN(GuardFieldLengthInstr);
6532};
6533
6534// For a field of static type G<T0, ..., Tn> and a stored value of runtime
6535// type T checks that type arguments of T at G exactly match <T0, ..., Tn>
6536// and updates guarded state (UntaggedField::static_type_exactness_state_)
6537// accordingly.
6538//
6539// See StaticTypeExactnessState for more information.
6540class GuardFieldTypeInstr : public GuardFieldInstr {
6541 public:
6542 GuardFieldTypeInstr(Value* value, const Field& field, intptr_t deopt_id)
6543 : GuardFieldInstr(value, field, deopt_id) {
6544 CheckField(field);
6545 }
6546
6547 DECLARE_INSTRUCTION(GuardFieldType)
6548
6549 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
6550
6551 virtual bool AttributesEqual(const Instruction& other) const;
6552
6553 DECLARE_EMPTY_SERIALIZATION(GuardFieldTypeInstr, GuardFieldInstr)
6554
6555 private:
6556 DISALLOW_COPY_AND_ASSIGN(GuardFieldTypeInstr);
6557};
6558
6559template <intptr_t N>
6560class TemplateLoadField : public TemplateDefinition<N, Throws> {
6561 using Base = TemplateDefinition<N, Throws>;
6562
6563 public:
6564 TemplateLoadField(const InstructionSource& source,
6565 bool calls_initializer = false,
6566 intptr_t deopt_id = DeoptId::kNone,
6567 const Field* field = nullptr)
6568 : Base(source, deopt_id),
6569 token_pos_(source.token_pos),
6570 throw_exception_on_initialization_(
6571 field != nullptr && !field->has_initializer() && field->is_late()),
6572 calls_initializer_(calls_initializer) {
6573 ASSERT(!calls_initializer || field != nullptr);
6574 ASSERT(!calls_initializer || (deopt_id != DeoptId::kNone));
6575 }
6576
6577 virtual TokenPosition token_pos() const { return token_pos_; }
6578 bool calls_initializer() const { return calls_initializer_; }
6579 void set_calls_initializer(bool value) { calls_initializer_ = value; }
6580
6581 bool throw_exception_on_initialization() const {
6582 return throw_exception_on_initialization_;
6583 }
6584
6585 // Slow path is used if load throws exception on initialization.
6586 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
6587 return Base::SlowPathSharingSupported(is_optimizing);
6588 }
6589
6590 virtual intptr_t DeoptimizationTarget() const { return Base::GetDeoptId(); }
6591 virtual bool ComputeCanDeoptimize() const { return false; }
6592 virtual bool ComputeCanDeoptimizeAfterCall() const {
6593 return calls_initializer() && !CompilerState::Current().is_aot();
6594 }
6595 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
6596 return Base::InputCount();
6597 }
6598
6599 virtual bool HasUnknownSideEffects() const {
6600 return calls_initializer() && !throw_exception_on_initialization();
6601 }
6602
6603 virtual bool CanCallDart() const {
6604 // The slow path (running the field initializer) always calls one of a
6605 // specific set of stubs. For those stubs that do not simply call the
6606 // runtime, the GC recognizes their frames and restores write barriers
6607 // automatically (see Thread::RestoreWriteBarrierInvariant).
6608 return false;
6609 }
6610 virtual bool CanTriggerGC() const { return calls_initializer(); }
6611 virtual bool MayThrow() const { return calls_initializer(); }
6612
6613#define FIELD_LIST(F) \
6614 F(const TokenPosition, token_pos_) \
6615 F(const bool, throw_exception_on_initialization_) \
6616 F(bool, calls_initializer_)
6617
6618 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TemplateLoadField, Base, FIELD_LIST)
6619#undef FIELD_LIST
6620
6621 private:
6622 DISALLOW_COPY_AND_ASSIGN(TemplateLoadField);
6623};
6624
6625class LoadStaticFieldInstr : public TemplateLoadField<0> {
6626 public:
6627 LoadStaticFieldInstr(const Field& field,
6628 const InstructionSource& source,
6629 bool calls_initializer = false,
6630 intptr_t deopt_id = DeoptId::kNone)
6631 : TemplateLoadField<0>(source, calls_initializer, deopt_id, &field),
6632 field_(field) {}
6633
6634 DECLARE_INSTRUCTION(LoadStaticField)
6635
6636 virtual CompileType ComputeType() const;
6637
6638 const Field& field() const { return field_; }
6639
6640 virtual bool AllowsCSE() const {
6641 // If two loads of a static-final-late field call the initializer and one
6642 // dominates another, we can remove the dominated load with the result of
6643 // the dominating load.
6644 //
6645 // Though if the field is final-late there can be stores into it via
6646 // load/compare-with-sentinel/store. Those loads have
6647 // `!field().has_initializer()` and we won't allow CSE for them.
6648 return field().is_final() &&
6649 (!field().is_late() || field().has_initializer());
6650 }
6651
6652 virtual bool AttributesEqual(const Instruction& other) const;
6653
6655
6656#define FIELD_LIST(F) F(const Field&, field_)
6657
6658 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadStaticFieldInstr,
6659 TemplateLoadField,
6660 FIELD_LIST)
6661#undef FIELD_LIST
6662
6663 private:
6664 DISALLOW_COPY_AND_ASSIGN(LoadStaticFieldInstr);
6665};
6666
6667class StoreStaticFieldInstr : public TemplateDefinition<1, NoThrow> {
6668 public:
6669 StoreStaticFieldInstr(const Field& field,
6670 Value* value,
6671 const InstructionSource& source)
6672 : TemplateDefinition(source),
6673 field_(field),
6674 token_pos_(source.token_pos) {
6675 DEBUG_ASSERT(field.IsNotTemporaryScopedHandle());
6676 SetInputAt(kValuePos, value);
6677 CheckField(field);
6678 }
6679
6680 enum { kValuePos = 0 };
6681
6682 DECLARE_INSTRUCTION(StoreStaticField)
6683
6684 const Field& field() const { return field_; }
6685 Value* value() const { return inputs_[kValuePos]; }
6686
6687 virtual bool ComputeCanDeoptimize() const { return false; }
6688
6689 // Currently CSE/LICM don't operate on any instructions that can be affected
6690 // by stores/loads. LoadOptimizer handles loads separately. Hence stores
6691 // are marked as having no side-effects.
6692 virtual bool HasUnknownSideEffects() const { return false; }
6693
6694 virtual bool MayHaveVisibleEffect() const { return true; }
6695
6696 virtual TokenPosition token_pos() const { return token_pos_; }
6697
6699
6700#define FIELD_LIST(F) \
6701 F(const Field&, field_) \
6702 F(const TokenPosition, token_pos_)
6703
6704 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreStaticFieldInstr,
6705 TemplateDefinition,
6706 FIELD_LIST)
6707#undef FIELD_LIST
6708
6709 private:
6710 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
6711 ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
6712 return value()->Type()->CanBeSmi() ? compiler::Assembler::kValueCanBeSmi
6713 : compiler::Assembler::kValueIsNotSmi;
6714 }
6715
6716 DISALLOW_COPY_AND_ASSIGN(StoreStaticFieldInstr);
6717};
6718
6719enum AlignmentType {
6722};
6723
6724class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
6725 public:
6726 LoadIndexedInstr(Value* array,
6727 Value* index,
6728 bool index_unboxed,
6729 intptr_t index_scale,
6730 intptr_t class_id,
6731 AlignmentType alignment,
6732 intptr_t deopt_id,
6733 const InstructionSource& source,
6734 CompileType* result_type = nullptr);
6735
6736 enum { kArrayPos = 0, kIndexPos = 1 };
6737
6738 TokenPosition token_pos() const { return token_pos_; }
6739
6740 DECLARE_INSTRUCTION(LoadIndexed)
6741 virtual CompileType ComputeType() const;
6742 virtual bool RecomputeType();
6743
6744 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6745 // The array may be tagged or untagged (for external arrays).
6746 if (idx == kArrayPos) return kNoRepresentation;
6747 ASSERT_EQUAL(idx, kIndexPos);
6748 return index_unboxed_ ? kUnboxedIntPtr : kTagged;
6749 }
6750
6751 bool IsUntagged() const {
6752 return array()->definition()->representation() == kUntagged;
6753 }
6754
6755 Value* array() const { return inputs_[kArrayPos]; }
6756 Value* index() const { return inputs_[kIndexPos]; }
6757 intptr_t index_scale() const { return index_scale_; }
6758 intptr_t class_id() const { return class_id_; }
6759 bool aligned() const { return alignment_ == kAlignedAccess; }
6760
6761 virtual intptr_t DeoptimizationTarget() const {
6762 // Direct access since this instruction cannot deoptimize, and the deopt-id
6763 // was inherited from another instruction that could deoptimize.
6764 return GetDeoptId();
6765 }
6766
6767 virtual bool ComputeCanDeoptimize() const { return false; }
6768
6769 // The representation returned by LoadIndexed for arrays with the given cid.
6770 // May not match the representation for the element returned by
6771 // RepresentationUtils::RepresentationOfArrayElement.
6772 static Representation ReturnRepresentation(intptr_t array_cid);
6773
6774 Representation representation() const {
6775 return ReturnRepresentation(class_id());
6776 }
6777
6778 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6779
6780 virtual bool HasUnknownSideEffects() const { return false; }
6781
6782 virtual Definition* Canonicalize(FlowGraph* flow_graph);
6783
6785
6786#define FIELD_LIST(F) \
6787 F(const bool, index_unboxed_) \
6788 F(const intptr_t, index_scale_) \
6789 F(const intptr_t, class_id_) \
6790 F(const AlignmentType, alignment_) \
6791 F(const TokenPosition, token_pos_) \
6792 /* derived from call */ \
6793 F(CompileType*, result_type_)
6794
6796 TemplateDefinition,
6797 FIELD_LIST)
6798#undef FIELD_LIST
6799
6800 private:
6801 DISALLOW_COPY_AND_ASSIGN(LoadIndexedInstr);
6802};
6803
6804// Loads the specified number of code units from the given string, packing
6805// multiple code units into a single datatype. In essence, this is a specialized
6806// version of LoadIndexedInstr which accepts only string targets and can load
6807// multiple elements at once. The result datatype differs depending on the
6808// string type, element count, and architecture; if possible, the result is
6809// packed into a Smi, falling back to a Mint otherwise.
6810// TODO(zerny): Add support for loading into UnboxedInt32x4.
6811class LoadCodeUnitsInstr : public TemplateDefinition<2, NoThrow> {
6812 public:
6813 LoadCodeUnitsInstr(Value* str,
6814 Value* index,
6815 intptr_t element_count,
6816 intptr_t class_id,
6817 const InstructionSource& source)
6818 : TemplateDefinition(source),
6819 class_id_(class_id),
6820 token_pos_(source.token_pos),
6821 element_count_(element_count),
6822 representation_(kTagged) {
6823 ASSERT(element_count == 1 || element_count == 2 || element_count == 4);
6824 ASSERT(IsStringClassId(class_id));
6825 SetInputAt(0, str);
6826 SetInputAt(1, index);
6827 }
6828
6829 TokenPosition token_pos() const { return token_pos_; }
6830
6831 DECLARE_INSTRUCTION(LoadCodeUnits)
6832 virtual CompileType ComputeType() const;
6833
6834 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6835 if (idx == 0) {
6836 // The string may be tagged or untagged (for external strings).
6837 return kNoRepresentation;
6838 }
6839 ASSERT(idx == 1);
6840 return kTagged;
6841 }
6842
6843 bool IsExternal() const {
6844 return array()->definition()->representation() == kUntagged;
6845 }
6846
6847 Value* array() const { return inputs_[0]; }
6848 Value* index() const { return inputs_[1]; }
6849
6850 intptr_t index_scale() const {
6851 return compiler::target::Instance::ElementSizeFor(class_id_);
6852 }
6853
6854 intptr_t class_id() const { return class_id_; }
6855 intptr_t element_count() const { return element_count_; }
6856
6857 bool can_pack_into_smi() const {
6858 return element_count() <=
6859 compiler::target::kSmiBits / (index_scale() * kBitsPerByte);
6860 }
6861
6862 virtual bool ComputeCanDeoptimize() const { return false; }
6863
6864 virtual Representation representation() const { return representation_; }
6865 void set_representation(Representation repr) { representation_ = repr; }
6866 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6867
6868 virtual bool HasUnknownSideEffects() const { return false; }
6869
6870 virtual bool CanTriggerGC() const {
6871 return !can_pack_into_smi() && (representation() == kTagged);
6872 }
6873
6874#define FIELD_LIST(F) \
6875 F(const intptr_t, class_id_) \
6876 F(const TokenPosition, token_pos_) \
6877 F(const intptr_t, element_count_) \
6878 F(Representation, representation_)
6879
6880 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadCodeUnitsInstr,
6881 TemplateDefinition,
6882 FIELD_LIST)
6883#undef FIELD_LIST
6884
6885 private:
6886 DISALLOW_COPY_AND_ASSIGN(LoadCodeUnitsInstr);
6887};
6888
6889class OneByteStringFromCharCodeInstr
6890 : public TemplateDefinition<1, NoThrow, Pure> {
6891 public:
6892 explicit OneByteStringFromCharCodeInstr(Value* char_code) {
6893 SetInputAt(0, char_code);
6894 }
6895
6896 DECLARE_INSTRUCTION(OneByteStringFromCharCode)
6897 virtual CompileType ComputeType() const;
6898
6899 Value* char_code() const { return inputs_[0]; }
6900
6901 virtual bool ComputeCanDeoptimize() const { return false; }
6902
6903 virtual bool AttributesEqual(const Instruction& other) const { return true; }
6904
6905 DECLARE_EMPTY_SERIALIZATION(OneByteStringFromCharCodeInstr,
6906 TemplateDefinition)
6907
6908 private:
6909 DISALLOW_COPY_AND_ASSIGN(OneByteStringFromCharCodeInstr);
6910};
6911
6912class StringToCharCodeInstr : public TemplateDefinition<1, NoThrow, Pure> {
6913 public:
6914 StringToCharCodeInstr(Value* str, intptr_t cid) : cid_(cid) {
6915 ASSERT(str != nullptr);
6916 SetInputAt(0, str);
6917 }
6918
6919 DECLARE_INSTRUCTION(StringToCharCode)
6920 virtual CompileType ComputeType() const;
6921
6922 Value* str() const { return inputs_[0]; }
6923
6924 virtual bool ComputeCanDeoptimize() const { return false; }
6925
6926 virtual bool AttributesEqual(const Instruction& other) const {
6927 return other.AsStringToCharCode()->cid_ == cid_;
6928 }
6929
6930#define FIELD_LIST(F) F(const intptr_t, cid_)
6931
6932 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StringToCharCodeInstr,
6933 TemplateDefinition,
6934 FIELD_LIST)
6935#undef FIELD_LIST
6936
6937 private:
6938 DISALLOW_COPY_AND_ASSIGN(StringToCharCodeInstr);
6939};
6940
6941// Scanning instruction to compute the result size and decoding parameters
6942// for the UTF-8 decoder. Equivalent to:
6943//
6944// int _scan(Uint8List bytes, int start, int end, _OneByteString table,
6945// _Utf8Decoder decoder) {
6946// int size = 0;
6947// int flags = 0;
6948// for (int i = start; i < end; i++) {
6949// int t = table.codeUnitAt(bytes[i]);
6950// size += t & sizeMask;
6951// flags |= t;
6952// }
6953// decoder._scanFlags |= flags & flagsMask;
6954// return size;
6955// }
6956//
6957// under these assumptions:
6958// - The difference between start and end must be less than 2^30, since the
6959// resulting length can be twice the input length (and the result has to be in
6960// Smi range). This is guaranteed by `_Utf8Decoder.chunkSize` which is set to
6961// `65536`.
6962// - The decoder._scanFlags field is unboxed or contains a smi.
6963// - The first 128 entries of the table have the value 1.
6964class Utf8ScanInstr : public TemplateDefinition<5, NoThrow> {
6965 public:
6966 Utf8ScanInstr(Value* decoder,
6967 Value* bytes,
6968 Value* start,
6969 Value* end,
6970 Value* table,
6971 const Slot& decoder_scan_flags_field)
6972 : scan_flags_field_(decoder_scan_flags_field) {
6973 SetInputAt(0, decoder);
6974 SetInputAt(1, bytes);
6975 SetInputAt(2, start);
6976 SetInputAt(3, end);
6977 SetInputAt(4, table);
6978 }
6979
6980 DECLARE_INSTRUCTION(Utf8Scan)
6981
6982 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
6983 ASSERT(idx >= 0 || idx <= 4);
6984 // The start and end inputs are unboxed, but in smi range.
6985 if (idx == 2 || idx == 3) return kUnboxedIntPtr;
6986 return kTagged;
6987 }
6988
6989 virtual Representation representation() const { return kUnboxedIntPtr; }
6990
6991 virtual bool HasUnknownSideEffects() const { return true; }
6992 virtual bool ComputeCanDeoptimize() const { return false; }
6993 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
6994 virtual void InferRange(RangeAnalysis* analysis, Range* range);
6995
6996 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
6997 return kNotSpeculative;
6998 }
6999
7000 virtual bool AttributesEqual(const Instruction& other) const {
7001 return scan_flags_field_.Equals(other.AsUtf8Scan()->scan_flags_field_);
7002 }
7003
7004 bool IsScanFlagsUnboxed() const;
7005
7007
7008#define FIELD_LIST(F) F(const Slot&, scan_flags_field_)
7009
7011 TemplateDefinition,
7012 FIELD_LIST)
7013#undef FIELD_LIST
7014
7015 private:
7016 DISALLOW_COPY_AND_ASSIGN(Utf8ScanInstr);
7017};
7018
7019class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
7020 public:
7021 StoreIndexedInstr(Value* array,
7022 Value* index,
7023 Value* value,
7024 StoreBarrierType emit_store_barrier,
7025 bool index_unboxed,
7026 intptr_t index_scale,
7027 intptr_t class_id,
7028 AlignmentType alignment,
7029 intptr_t deopt_id,
7030 const InstructionSource& source,
7031 SpeculativeMode speculative_mode = kGuardInputs);
7032 DECLARE_INSTRUCTION(StoreIndexed)
7033
7034 enum { kArrayPos = 0, kIndexPos = 1, kValuePos = 2 };
7035
7036 Value* array() const { return inputs_[kArrayPos]; }
7037 Value* index() const { return inputs_[kIndexPos]; }
7038 Value* value() const { return inputs_[kValuePos]; }
7039
7040 intptr_t index_scale() const { return index_scale_; }
7041 intptr_t class_id() const { return class_id_; }
7042 bool aligned() const { return alignment_ == kAlignedAccess; }
7043
7044 bool ShouldEmitStoreBarrier() const {
7045 if (array()->definition() == value()->definition()) {
7046 // `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
7047 // reference.
7048 return false;
7049 }
7050
7051 if (value()->definition()->Type()->IsBool()) {
7052 return false;
7053 }
7054 return value()->NeedsWriteBarrier() &&
7055 (emit_store_barrier_ == kEmitStoreBarrier);
7056 }
7057
7058 void set_emit_store_barrier(StoreBarrierType value) {
7059 emit_store_barrier_ = value;
7060 }
7061
7062 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
7063 return speculative_mode_;
7064 }
7065
7066 virtual bool ComputeCanDeoptimize() const { return false; }
7067
7068 // The value representation expected by StoreIndexed for arrays with the
7069 // given cid. May not match the representation for the element returned by
7070 // RepresentationUtils::RepresentationOfArrayElement.
7071 static Representation ValueRepresentation(intptr_t array_cid);
7072
7073 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
7074
7075 bool IsUntagged() const {
7076 return array()->definition()->representation() == kUntagged;
7077 }
7078
7079 virtual intptr_t DeoptimizationTarget() const {
7080 // Direct access since this instruction cannot deoptimize, and the deopt-id
7081 // was inherited from another instruction that could deoptimize.
7082 return GetDeoptId();
7083 }
7084
7085 virtual bool HasUnknownSideEffects() const { return false; }
7086
7087 virtual bool MayHaveVisibleEffect() const { return true; }
7088
7089 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
7090
7092
7093#define FIELD_LIST(F) \
7094 F(StoreBarrierType, emit_store_barrier_) \
7095 F(const bool, index_unboxed_) \
7096 F(const intptr_t, index_scale_) \
7097 F(const intptr_t, class_id_) \
7098 F(const AlignmentType, alignment_) \
7099 F(const TokenPosition, token_pos_) \
7100 F(const SpeculativeMode, speculative_mode_)
7101
7103 TemplateInstruction,
7104 FIELD_LIST)
7105#undef FIELD_LIST
7106
7107 private:
7108 compiler::Assembler::CanBeSmi CanValueBeSmi() const {
7109 return compiler::Assembler::kValueCanBeSmi;
7110 }
7111
7112 DISALLOW_COPY_AND_ASSIGN(StoreIndexedInstr);
7113};
7114
7115class RecordCoverageInstr : public TemplateInstruction<0, NoThrow> {
7116 public:
7117 RecordCoverageInstr(const Array& coverage_array,
7118 intptr_t coverage_index,
7119 const InstructionSource& source)
7120 : TemplateInstruction(source),
7121 coverage_array_(coverage_array),
7122 coverage_index_(coverage_index),
7123 token_pos_(source.token_pos) {}
7124
7125 DECLARE_INSTRUCTION(RecordCoverage)
7126
7127 virtual TokenPosition token_pos() const { return token_pos_; }
7128 virtual bool ComputeCanDeoptimize() const { return false; }
7129 virtual bool HasUnknownSideEffects() const { return false; }
7130 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
7131
7132#define FIELD_LIST(F) \
7133 F(const Array&, coverage_array_) \
7134 F(const intptr_t, coverage_index_) \
7135 F(const TokenPosition, token_pos_)
7136
7137 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RecordCoverageInstr,
7138 TemplateInstruction,
7139 FIELD_LIST)
7140#undef FIELD_LIST
7141
7142 private:
7143 DISALLOW_COPY_AND_ASSIGN(RecordCoverageInstr);
7144};
7145
7146// Note overridable, built-in: value ? false : true.
7147class BooleanNegateInstr : public TemplateDefinition<1, NoThrow> {
7148 public:
7149 explicit BooleanNegateInstr(Value* value) { SetInputAt(0, value); }
7150
7151 DECLARE_INSTRUCTION(BooleanNegate)
7152 virtual CompileType ComputeType() const;
7153
7154 Value* value() const { return inputs_[0]; }
7155
7156 virtual bool ComputeCanDeoptimize() const { return false; }
7157
7158 virtual bool HasUnknownSideEffects() const { return false; }
7159
7160 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7161
7162 DECLARE_EMPTY_SERIALIZATION(BooleanNegateInstr, TemplateDefinition)
7163
7164 private:
7165 DISALLOW_COPY_AND_ASSIGN(BooleanNegateInstr);
7166};
7167
7168// bool ? -1 : 0
7169class BoolToIntInstr : public TemplateDefinition<1, NoThrow> {
7170 public:
7171 explicit BoolToIntInstr(Value* value) { SetInputAt(0, value); }
7172
7173 DECLARE_INSTRUCTION(BoolToInt)
7174
7175 Value* value() const { return inputs_[0]; }
7176
7177 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7178 return kTagged;
7179 }
7180 virtual Representation representation() const { return kUnboxedInt32; }
7181
7182 virtual bool ComputeCanDeoptimize() const { return false; }
7183
7184 virtual bool HasUnknownSideEffects() const { return false; }
7185
7186 DECLARE_EMPTY_SERIALIZATION(BoolToIntInstr, TemplateDefinition)
7187
7188 private:
7189 DISALLOW_COPY_AND_ASSIGN(BoolToIntInstr);
7190};
7191
7192// int == 0 ? false : true
7193class IntToBoolInstr : public TemplateDefinition<1, NoThrow> {
7194 public:
7195 explicit IntToBoolInstr(Value* value) {
7196 ASSERT(value->definition()->representation() == kUnboxedInt32);
7197 SetInputAt(0, value);
7198 }
7199
7200 DECLARE_INSTRUCTION(IntToBool)
7201 virtual CompileType ComputeType() const;
7202
7203 Value* value() const { return inputs_[0]; }
7204
7205 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7206 return kUnboxedInt32;
7207 }
7208 virtual Representation representation() const { return kTagged; }
7209
7210 virtual bool ComputeCanDeoptimize() const { return false; }
7211
7212 virtual bool HasUnknownSideEffects() const { return false; }
7213
7214 DECLARE_EMPTY_SERIALIZATION(IntToBoolInstr, TemplateDefinition)
7215
7216 private:
7217 DISALLOW_COPY_AND_ASSIGN(IntToBoolInstr);
7218};
7219
7220class InstanceOfInstr : public TemplateDefinition<3, Throws> {
7221 public:
7222 InstanceOfInstr(const InstructionSource& source,
7223 Value* value,
7224 Value* instantiator_type_arguments,
7225 Value* function_type_arguments,
7226 const AbstractType& type,
7227 intptr_t deopt_id)
7228 : TemplateDefinition(source, deopt_id),
7229 token_pos_(source.token_pos),
7230 type_(type) {
7231 ASSERT(!type.IsNull());
7232 SetInputAt(0, value);
7233 SetInputAt(1, instantiator_type_arguments);
7234 SetInputAt(2, function_type_arguments);
7235 }
7236
7237 DECLARE_INSTRUCTION(InstanceOf)
7238 virtual CompileType ComputeType() const;
7239
7240 Value* value() const { return inputs_[0]; }
7241 Value* instantiator_type_arguments() const { return inputs_[1]; }
7242 Value* function_type_arguments() const { return inputs_[2]; }
7243
7244 const AbstractType& type() const { return type_; }
7245 virtual TokenPosition token_pos() const { return token_pos_; }
7246
7247 virtual bool ComputeCanDeoptimize() const { return false; }
7248 virtual bool ComputeCanDeoptimizeAfterCall() const {
7249 return !CompilerState::Current().is_aot();
7250 }
7251
7252 virtual bool HasUnknownSideEffects() const { return false; }
7253
7255
7256#define FIELD_LIST(F) \
7257 F(const TokenPosition, token_pos_) \
7258 F(const AbstractType&, type_)
7259
7261 TemplateDefinition,
7262 FIELD_LIST)
7263#undef FIELD_LIST
7264
7265 private:
7266 DISALLOW_COPY_AND_ASSIGN(InstanceOfInstr);
7267};
7268
7269// Subclasses of 'AllocationInstr' must maintain the invariant that if
7270// 'WillAllocateNewOrRemembered' is true, then the result of the allocation must
7271// either reside in new space or be in the store buffer.
7272class AllocationInstr : public Definition {
7273 public:
7274 explicit AllocationInstr(const InstructionSource& source,
7275 intptr_t deopt_id = DeoptId::kNone)
7276 : Definition(source, deopt_id),
7277 token_pos_(source.token_pos),
7278 identity_(AliasIdentity::Unknown()) {}
7279
7280 virtual TokenPosition token_pos() const { return token_pos_; }
7281
7282 virtual AliasIdentity Identity() const { return identity_; }
7283 virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
7284
7285 // TODO(sjindel): Update these conditions when the incremental write barrier
7286 // is added.
7287 virtual bool WillAllocateNewOrRemembered() const = 0;
7288
7289 virtual bool MayThrow() const {
7290 // Any allocation instruction may throw an OutOfMemory error.
7291 return true;
7292 }
7293 virtual bool ComputeCanDeoptimize() const { return false; }
7294 virtual bool ComputeCanDeoptimizeAfterCall() const {
7295 // We test that allocation instructions have correct deopt environment
7296 // (which is needed in case OOM is thrown) by actually deoptimizing
7297 // optimized code in allocation slow paths.
7298 return !CompilerState::Current().is_aot();
7299 }
7300 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
7301 return InputCount();
7302 }
7303
7304 // Returns the slot in the allocated object that contains the value at the
7305 // given input position. Returns nullptr if the input position is invalid
7306 // or if the input is not stored in the object.
7307 virtual const Slot* SlotForInput(intptr_t pos) { return nullptr; }
7308
7309 // Returns the input index that has a corresponding slot which is identical to
7310 // the given slot. Returns a negative index if no such input found.
7311 intptr_t InputForSlot(const Slot& slot) {
7312 for (intptr_t i = 0; i < InputCount(); i++) {
7313 auto* const input_slot = SlotForInput(i);
7314 if (input_slot != nullptr && input_slot->IsIdentical(slot)) {
7315 return i;
7316 }
7317 }
7318 return -1;
7319 }
7320
7321 // Returns whether the allocated object has initialized fields and/or payload
7322 // elements. Override for any subclass that returns an uninitialized object.
7323 virtual bool ObjectIsInitialized() { return true; }
7324
7326
7327 DECLARE_ABSTRACT_INSTRUCTION(Allocation);
7328
7329#define FIELD_LIST(F) \
7330 F(const TokenPosition, token_pos_) \
7331 F(AliasIdentity, identity_)
7332
7334 Definition,
7335 FIELD_LIST)
7336#undef FIELD_LIST
7337
7338 private:
7339 DISALLOW_COPY_AND_ASSIGN(AllocationInstr);
7340};
7341
7342template <intptr_t N>
7343class TemplateAllocation : public AllocationInstr {
7344 public:
7345 explicit TemplateAllocation(const InstructionSource& source,
7346 intptr_t deopt_id)
7347 : AllocationInstr(source, deopt_id), inputs_() {}
7348
7349 virtual intptr_t InputCount() const { return N; }
7350 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
7351
7352 // Non-array allocation may throw, but it doesn't have any
7353 // visible effects: it can be eliminated and other
7354 // instructions can be hoisted over.
7355 virtual bool MayHaveVisibleEffect() const { return false; }
7356
7357 DECLARE_EMPTY_SERIALIZATION(TemplateAllocation, AllocationInstr)
7358
7359 protected:
7360 EmbeddedArray<Value*, N> inputs_;
7361
7362 private:
7363 friend class BranchInstr;
7364 friend class IfThenElseInstr;
7365 friend class RecordCoverageInstr;
7366
7367 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
7368};
7369
7370class AllocateObjectInstr : public AllocationInstr {
7371 public:
7372 enum { kTypeArgumentsPos = 0 };
7373 AllocateObjectInstr(const InstructionSource& source,
7374 const Class& cls,
7375 intptr_t deopt_id,
7376 Value* type_arguments = nullptr)
7377 : AllocationInstr(source, deopt_id),
7378 cls_(cls),
7379 has_type_arguments_(type_arguments != nullptr),
7380 type_arguments_slot_(nullptr),
7381 type_arguments_(type_arguments) {
7382 DEBUG_ASSERT(cls.IsNotTemporaryScopedHandle());
7383 ASSERT(!cls.IsNull());
7384 ASSERT((cls.NumTypeArguments() > 0) == has_type_arguments_);
7385 if (has_type_arguments_) {
7386 SetInputAt(kTypeArgumentsPos, type_arguments);
7387 type_arguments_slot_ =
7388 &Slot::GetTypeArgumentsSlotFor(Thread::Current(), cls);
7389 }
7390 }
7391
7392 DECLARE_INSTRUCTION(AllocateObject)
7393 virtual CompileType ComputeType() const;
7394
7395 const Class& cls() const { return cls_; }
7396 Value* type_arguments() const { return type_arguments_; }
7397
7398 virtual intptr_t InputCount() const { return has_type_arguments_ ? 1 : 0; }
7399 virtual Value* InputAt(intptr_t i) const {
7400 ASSERT(has_type_arguments_ && i == kTypeArgumentsPos);
7401 return type_arguments_;
7402 }
7403
7404 virtual bool HasUnknownSideEffects() const { return false; }
7405
7406 // Object allocation may throw, but it doesn't have any
7407 // visible effects: it can be eliminated and other
7408 // instructions can be hoisted over.
7409 virtual bool MayHaveVisibleEffect() const { return false; }
7410
7411 virtual bool WillAllocateNewOrRemembered() const {
7412 return WillAllocateNewOrRemembered(cls());
7413 }
7414
7415 static bool WillAllocateNewOrRemembered(const Class& cls) {
7416 return IsAllocatableInNewSpace(cls.target_instance_size());
7417 }
7418
7419 virtual const Slot* SlotForInput(intptr_t pos) {
7420 return pos == kTypeArgumentsPos ? type_arguments_slot_ : nullptr;
7421 }
7422
7424
7425#define FIELD_LIST(F) \
7426 F(const Class&, cls_) \
7427 F(const bool, has_type_arguments_) \
7428 F(const Slot*, type_arguments_slot_)
7429
7430 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateObjectInstr,
7431 AllocationInstr,
7432 FIELD_LIST)
7433#undef FIELD_LIST
7434
7435 private:
7436 virtual void RawSetInputAt(intptr_t i, Value* value) {
7437 ASSERT(has_type_arguments_ && (i == kTypeArgumentsPos));
7438 type_arguments_ = value;
7439 }
7440
7441 Value* type_arguments_ = nullptr;
7442
7443 DISALLOW_COPY_AND_ASSIGN(AllocateObjectInstr);
7444};
7445
7446// Allocates and null initializes a closure object, given the closure function
7447// and the context as values.
7448class AllocateClosureInstr : public TemplateAllocation<3> {
7449 public:
7450 enum Inputs {
7451 kFunctionPos = 0,
7452 kContextPos = 1,
7453 kInstantiatorTypeArgsPos = 2,
7454 };
7455 AllocateClosureInstr(const InstructionSource& source,
7456 Value* closure_function,
7457 Value* context,
7458 Value* instantiator_type_args, // Optional.
7459 bool is_generic,
7460 bool is_tear_off,
7461 intptr_t deopt_id)
7462 : TemplateAllocation(source, deopt_id),
7463 has_instantiator_type_args_(instantiator_type_args != nullptr),
7464 is_generic_(is_generic),
7465 is_tear_off_(is_tear_off) {
7466 SetInputAt(kFunctionPos, closure_function);
7467 SetInputAt(kContextPos, context);
7468 if (has_instantiator_type_args_) {
7469 SetInputAt(kInstantiatorTypeArgsPos, instantiator_type_args);
7470 }
7471 }
7472
7473 DECLARE_INSTRUCTION(AllocateClosure)
7474 virtual CompileType ComputeType() const;
7475
7476 virtual intptr_t InputCount() const {
7477 return has_instantiator_type_args() ? 3 : 2;
7478 }
7479
7480 Value* closure_function() const { return inputs_[kFunctionPos]; }
7481 Value* context() const { return inputs_[kContextPos]; }
7482
7483 bool has_instantiator_type_args() const {
7484 return has_instantiator_type_args_;
7485 }
7486 bool is_generic() const { return is_generic_; }
7487 bool is_tear_off() const { return is_tear_off_; }
7488
7489 const Function& known_function() const {
7490 Value* const value = closure_function();
7491 if (value->BindsToConstant()) {
7492 ASSERT(value->BoundConstant().IsFunction());
7493 return Function::Cast(value->BoundConstant());
7494 }
7495 return Object::null_function();
7496 }
7497
7498 virtual const Slot* SlotForInput(intptr_t pos) {
7499 switch (pos) {
7500 case kFunctionPos:
7501 return &Slot::Closure_function();
7502 case kContextPos:
7503 return &Slot::Closure_context();
7504 case kInstantiatorTypeArgsPos:
7505 return has_instantiator_type_args()
7506 ? &Slot::Closure_instantiator_type_arguments()
7507 : nullptr;
7508 default:
7509 return TemplateAllocation::SlotForInput(pos);
7510 }
7511 }
7512
7513 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7514
7515 virtual bool AllowsCSE() const { return is_tear_off(); }
7516
7517 virtual bool HasUnknownSideEffects() const { return false; }
7518
7519 virtual bool AttributesEqual(const Instruction& other) const {
7520 const auto other_ac = other.AsAllocateClosure();
7521 return (other_ac->has_instantiator_type_args() ==
7522 has_instantiator_type_args()) &&
7523 (other_ac->is_generic() == is_generic()) &&
7524 (other_ac->is_tear_off() == is_tear_off());
7525 }
7526
7527 virtual bool WillAllocateNewOrRemembered() const {
7528 return IsAllocatableInNewSpace(compiler::target::Closure::InstanceSize());
7529 }
7530
7531#define FIELD_LIST(F) \
7532 F(const bool, has_instantiator_type_args_) \
7533 F(const bool, is_generic_) \
7534 F(const bool, is_tear_off_)
7535
7536 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateClosureInstr,
7537 TemplateAllocation,
7538 FIELD_LIST)
7539#undef FIELD_LIST
7540
7541 private:
7542 DISALLOW_COPY_AND_ASSIGN(AllocateClosureInstr);
7543};
7544
7545class AllocateUninitializedContextInstr : public TemplateAllocation<0> {
7546 public:
7547 AllocateUninitializedContextInstr(const InstructionSource& source,
7548 intptr_t num_context_variables,
7549 intptr_t deopt_id);
7550
7551 DECLARE_INSTRUCTION(AllocateUninitializedContext)
7552 virtual CompileType ComputeType() const;
7553
7554 intptr_t num_context_variables() const { return num_context_variables_; }
7555
7556 virtual bool HasUnknownSideEffects() const { return false; }
7557
7558 virtual bool WillAllocateNewOrRemembered() const {
7559 return compiler::target::WillAllocateNewOrRememberedContext(
7560 num_context_variables_);
7561 }
7562
7563 virtual bool ObjectIsInitialized() { return false; }
7564
7566
7567#define FIELD_LIST(F) F(const intptr_t, num_context_variables_)
7568
7569 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateUninitializedContextInstr,
7570 TemplateAllocation,
7571 FIELD_LIST)
7572#undef FIELD_LIST
7573
7574 private:
7575 DISALLOW_COPY_AND_ASSIGN(AllocateUninitializedContextInstr);
7576};
7577
7578// Allocates and null initializes a record object.
7579class AllocateRecordInstr : public TemplateAllocation<0> {
7580 public:
7581 AllocateRecordInstr(const InstructionSource& source,
7582 RecordShape shape,
7583 intptr_t deopt_id)
7584 : TemplateAllocation(source, deopt_id), shape_(shape) {}
7585
7586 DECLARE_INSTRUCTION(AllocateRecord)
7587 virtual CompileType ComputeType() const;
7588
7589 RecordShape shape() const { return shape_; }
7590 intptr_t num_fields() const { return shape_.num_fields(); }
7591
7592 virtual bool HasUnknownSideEffects() const { return false; }
7593
7594 virtual bool WillAllocateNewOrRemembered() const {
7596 compiler::target::Record::InstanceSize(num_fields()));
7597 }
7598
7599#define FIELD_LIST(F) F(const RecordShape, shape_)
7600
7601 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateRecordInstr,
7602 TemplateAllocation,
7603 FIELD_LIST)
7604#undef FIELD_LIST
7605
7606 private:
7607 DISALLOW_COPY_AND_ASSIGN(AllocateRecordInstr);
7608};
7609
7610// Allocates and initializes fields of a small record object
7611// (with 2 or 3 fields).
7612class AllocateSmallRecordInstr : public TemplateAllocation<3> {
7613 public:
7614 AllocateSmallRecordInstr(const InstructionSource& source,
7615 RecordShape shape, // 2 or 3 fields.
7616 Value* value0,
7617 Value* value1,
7618 Value* value2, // Optional.
7619 intptr_t deopt_id)
7620 : TemplateAllocation(source, deopt_id), shape_(shape) {
7621 const intptr_t num_fields = shape.num_fields();
7622 ASSERT(num_fields == 2 || num_fields == 3);
7623 ASSERT((num_fields > 2) == (value2 != nullptr));
7624 SetInputAt(0, value0);
7625 SetInputAt(1, value1);
7626 if (num_fields > 2) {
7627 SetInputAt(2, value2);
7628 }
7629 }
7630
7631 DECLARE_INSTRUCTION(AllocateSmallRecord)
7632 virtual CompileType ComputeType() const;
7633
7634 RecordShape shape() const { return shape_; }
7635 intptr_t num_fields() const { return shape().num_fields(); }
7636
7637 virtual intptr_t InputCount() const { return num_fields(); }
7638
7639 virtual const Slot* SlotForInput(intptr_t pos) {
7640 return &Slot::GetRecordFieldSlot(
7641 Thread::Current(), compiler::target::Record::field_offset(pos));
7642 }
7643
7644 virtual bool HasUnknownSideEffects() const { return false; }
7645
7646 virtual bool WillAllocateNewOrRemembered() const {
7648 compiler::target::Record::InstanceSize(num_fields()));
7649 }
7650
7651#define FIELD_LIST(F) F(const RecordShape, shape_)
7652
7653 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateSmallRecordInstr,
7654 TemplateAllocation,
7655 FIELD_LIST)
7656#undef FIELD_LIST
7657
7658 private:
7659 DISALLOW_COPY_AND_ASSIGN(AllocateSmallRecordInstr);
7660};
7661
7662// This instruction captures the state of the object which had its allocation
7663// removed during the AllocationSinking pass.
7664// It does not produce any real code only deoptimization information.
7665class MaterializeObjectInstr : public VariadicDefinition {
7666 public:
7667 MaterializeObjectInstr(AllocationInstr* allocation,
7668 const Class& cls,
7669 intptr_t length_or_shape,
7670 const ZoneGrowableArray<const Slot*>& slots,
7671 InputsArray&& values)
7672 : VariadicDefinition(std::move(values)),
7673 cls_(cls),
7674 length_or_shape_(length_or_shape),
7675 slots_(slots),
7676 registers_remapped_(false),
7677 allocation_(allocation) {
7678 ASSERT(slots_.length() == InputCount());
7679 }
7680
7681 AllocationInstr* allocation() const { return allocation_; }
7682 const Class& cls() const { return cls_; }
7683
7684 intptr_t length_or_shape() const { return length_or_shape_; }
7685
7686 intptr_t FieldOffsetAt(intptr_t i) const {
7687 return slots_[i]->offset_in_bytes();
7688 }
7689
7690 const Location& LocationAt(intptr_t i) {
7691 ASSERT(0 <= i && i < InputCount());
7692 return locations_[i];
7693 }
7694
7695 DECLARE_INSTRUCTION(MaterializeObject)
7696
7697 // SelectRepresentations pass is run once more while MaterializeObject
7698 // instructions are still in the graph. To avoid any redundant boxing
7699 // operations inserted by that pass we should indicate that this
7700 // instruction can cope with any representation as it is essentially
7701 // an environment use.
7702 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7703 ASSERT(0 <= idx && idx < InputCount());
7704 return kNoRepresentation;
7705 }
7706
7707 virtual bool ComputeCanDeoptimize() const { return false; }
7708 virtual bool HasUnknownSideEffects() const { return false; }
7709 virtual bool CanReplaceWithConstant() const { return false; }
7710
7711 Location* locations() { return locations_; }
7712 void set_locations(Location* locations) { locations_ = locations; }
7713
7714 virtual bool MayThrow() const { return false; }
7715
7716 void RemapRegisters(intptr_t* cpu_reg_slots, intptr_t* fpu_reg_slots);
7717
7718 bool was_visited_for_liveness() const { return visited_for_liveness_; }
7719 void mark_visited_for_liveness() { visited_for_liveness_ = true; }
7720
7722
7723#define FIELD_LIST(F) \
7724 F(const Class&, cls_) \
7725 F(intptr_t, length_or_shape_) \
7726 F(const ZoneGrowableArray<const Slot*>&, slots_) \
7727 F(bool, registers_remapped_)
7728
7729 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MaterializeObjectInstr,
7730 VariadicDefinition,
7731 FIELD_LIST)
7732#undef FIELD_LIST
7734
7735 private:
7736 Location* locations_ = nullptr;
7737
7738 // Not serialized.
7739 AllocationInstr* allocation_ = nullptr;
7740 bool visited_for_liveness_ = false;
7741
7742 DISALLOW_COPY_AND_ASSIGN(MaterializeObjectInstr);
7743};
7744
7745class ArrayAllocationInstr : public AllocationInstr {
7746 public:
7747 explicit ArrayAllocationInstr(const InstructionSource& source,
7748 intptr_t deopt_id)
7749 : AllocationInstr(source, deopt_id) {}
7750
7751 virtual Value* num_elements() const = 0;
7752
7753 bool HasConstantNumElements() const {
7754 return num_elements()->BindsToSmiConstant();
7755 }
7756 intptr_t GetConstantNumElements() const {
7757 return num_elements()->BoundSmiConstant();
7758 }
7759
7760 DECLARE_ABSTRACT_INSTRUCTION(ArrayAllocation);
7761
7762 DECLARE_EMPTY_SERIALIZATION(ArrayAllocationInstr, AllocationInstr)
7763
7764 private:
7765 DISALLOW_COPY_AND_ASSIGN(ArrayAllocationInstr);
7766};
7767
7768template <intptr_t N>
7769class TemplateArrayAllocation : public ArrayAllocationInstr {
7770 public:
7771 explicit TemplateArrayAllocation(const InstructionSource& source,
7772 intptr_t deopt_id)
7773 : ArrayAllocationInstr(source, deopt_id), inputs_() {}
7774
7775 virtual intptr_t InputCount() const { return N; }
7776 virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
7777
7778 DECLARE_EMPTY_SERIALIZATION(TemplateArrayAllocation, ArrayAllocationInstr)
7779
7780 protected:
7781 EmbeddedArray<Value*, N> inputs_;
7782
7783 private:
7784 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
7785
7786 DISALLOW_COPY_AND_ASSIGN(TemplateArrayAllocation);
7787};
7788
7789class CreateArrayInstr : public TemplateArrayAllocation<2> {
7790 public:
7791 CreateArrayInstr(const InstructionSource& source,
7792 Value* type_arguments,
7793 Value* num_elements,
7794 intptr_t deopt_id)
7795 : TemplateArrayAllocation(source, deopt_id) {
7796 SetInputAt(kTypeArgumentsPos, type_arguments);
7797 SetInputAt(kLengthPos, num_elements);
7798 }
7799
7800 enum { kTypeArgumentsPos = 0, kLengthPos = 1 };
7801
7802 DECLARE_INSTRUCTION(CreateArray)
7803 virtual CompileType ComputeType() const;
7804
7805 Value* type_arguments() const { return inputs_[kTypeArgumentsPos]; }
7806 virtual Value* num_elements() const { return inputs_[kLengthPos]; }
7807
7808 virtual bool HasUnknownSideEffects() const { return false; }
7809
7810 virtual bool WillAllocateNewOrRemembered() const {
7811 // Large arrays will use cards instead; cannot skip write barrier.
7812 if (!HasConstantNumElements()) return false;
7813 return compiler::target::WillAllocateNewOrRememberedArray(
7814 GetConstantNumElements());
7815 }
7816
7817 virtual const Slot* SlotForInput(intptr_t pos) {
7818 switch (pos) {
7819 case kTypeArgumentsPos:
7820 return &Slot::Array_type_arguments();
7821 case kLengthPos:
7822 return &Slot::Array_length();
7823 default:
7824 return TemplateArrayAllocation::SlotForInput(pos);
7825 }
7826 }
7827
7828 DECLARE_EMPTY_SERIALIZATION(CreateArrayInstr, TemplateArrayAllocation)
7829
7830 private:
7831 DISALLOW_COPY_AND_ASSIGN(CreateArrayInstr);
7832};
7833
7834class AllocateTypedDataInstr : public TemplateArrayAllocation<1> {
7835 public:
7836 AllocateTypedDataInstr(const InstructionSource& source,
7837 classid_t class_id,
7838 Value* num_elements,
7839 intptr_t deopt_id)
7840 : TemplateArrayAllocation(source, deopt_id), class_id_(class_id) {
7841 SetInputAt(kLengthPos, num_elements);
7842 }
7843
7844 enum { kLengthPos = 0 };
7845
7846 DECLARE_INSTRUCTION(AllocateTypedData)
7847 virtual CompileType ComputeType() const;
7848
7849 classid_t class_id() const { return class_id_; }
7850 virtual Value* num_elements() const { return inputs_[kLengthPos]; }
7851
7852 virtual bool HasUnknownSideEffects() const { return false; }
7853
7854 virtual bool WillAllocateNewOrRemembered() const {
7855 // No write barriers are generated for typed data accesses.
7856 return false;
7857 }
7858
7859 virtual const Slot* SlotForInput(intptr_t pos) {
7860 switch (pos) {
7861 case kLengthPos:
7862 return &Slot::TypedDataBase_length();
7863 default:
7864 return TemplateArrayAllocation::SlotForInput(pos);
7865 }
7866 }
7867
7868#define FIELD_LIST(F) F(const classid_t, class_id_)
7869
7870 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateTypedDataInstr,
7871 TemplateArrayAllocation,
7872 FIELD_LIST)
7873#undef FIELD_LIST
7874
7875 private:
7876 DISALLOW_COPY_AND_ASSIGN(AllocateTypedDataInstr);
7877};
7878
7879// This instruction is used to access untagged fields in untagged pointers to
7880// non-Dart objects, such as Thread and IsolateGroup, which do not point to
7881// managed memory.
7882//
7883// To access untagged fields in Dart objects, use LoadField with an
7884// appropriately created Slot.
7885//
7886// To access tagged fields in non-Dart objects, see
7887// FlowGraphBuilder::RawLoadField in kernel_to_il.cc.
7888class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
7889 public:
7890 LoadUntaggedInstr(Value* object, intptr_t offset) : offset_(offset) {
7891 ASSERT(object->definition()->representation() == kUntagged);
7892 ASSERT(!object->definition()->MayCreateUnsafeUntaggedPointer());
7893 SetInputAt(0, object);
7894 }
7895
7896 virtual Representation representation() const { return kUntagged; }
7897 DECLARE_INSTRUCTION(LoadUntagged)
7898
7899 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7900 ASSERT(idx == 0);
7901 // The object may be tagged or untagged (for external objects).
7902 return kNoRepresentation;
7903 }
7904
7905 Value* object() const { return inputs_[0]; }
7906 intptr_t offset() const { return offset_; }
7907
7908 virtual bool MayCreateUnsafeUntaggedPointer() const {
7909 // See the documentation for LoadUntaggedInstr.
7910 return false;
7911 }
7912
7913 virtual bool ComputeCanDeoptimize() const { return false; }
7914
7915 virtual bool HasUnknownSideEffects() const { return false; }
7916 virtual bool AttributesEqual(const Instruction& other) const {
7917 return other.AsLoadUntagged()->offset_ == offset_;
7918 }
7919
7921
7922#define FIELD_LIST(F) F(const intptr_t, offset_)
7923
7925 TemplateDefinition,
7926 FIELD_LIST)
7927#undef FIELD_LIST
7928
7929 private:
7930 DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
7931};
7932
7933// This instruction is used to perform untagged address calculations instead of
7934// converting GC-movable untagged pointers to unboxed integers in IL. Given an
7935// untagged address [base] as well as an [index] and [offset], where [index]
7936// is scaled by [index_scale], returns the untagged address
7937//
7938// base + (index * index_scale) + offset
7939//
7940// This allows the flow graph checker to enforce that there are no live untagged
7941// addresses of GC-movable objects when GC can happen.
7942class CalculateElementAddressInstr : public TemplateDefinition<3, NoThrow> {
7943 public:
7944 enum { kBasePos, kIndexPos, kOffsetPos };
7945 CalculateElementAddressInstr(Value* base,
7946 Value* index,
7947 intptr_t index_scale,
7948 Value* offset)
7949 : index_scale_(index_scale) {
7950 ASSERT(base->definition()->representation() == kUntagged);
7951 ASSERT(Utils::IsPowerOfTwo(index_scale));
7952 ASSERT(1 <= index_scale && index_scale <= 16);
7953 SetInputAt(kBasePos, base);
7954 SetInputAt(kIndexPos, index);
7955 SetInputAt(kOffsetPos, offset);
7956 }
7957
7958 DECLARE_INSTRUCTION(CalculateElementAddress)
7959
7960 virtual Representation representation() const { return kUntagged; }
7961
7962 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
7963 if (idx == kBasePos) return kUntagged;
7964 ASSERT(idx == kIndexPos || idx == kOffsetPos);
7965 return kUnboxedIntPtr;
7966 }
7967
7968 Value* base() const { return inputs_[kBasePos]; }
7969 Value* index() const { return inputs_[kIndexPos]; }
7970 Value* offset() const { return inputs_[kOffsetPos]; }
7971 intptr_t index_scale() const { return index_scale_; }
7972
7973 virtual Definition* Canonicalize(FlowGraph* flow_graph);
7974
7975 virtual bool MayCreateUnsafeUntaggedPointer() const {
7976 return base()->definition()->MayCreateUnsafeUntaggedPointer();
7977 }
7978
7979 virtual bool AllowsCSE() const { return !MayCreateUnsafeUntaggedPointer(); }
7980
7981 virtual bool ComputeCanDeoptimize() const { return false; }
7982
7983 virtual bool HasUnknownSideEffects() const { return false; }
7984 virtual bool AttributesEqual(const Instruction& other) const {
7985 return other.AsCalculateElementAddress()->index_scale_ == index_scale_;
7986 }
7987
7989
7990#define FIELD_LIST(F) F(const intptr_t, index_scale_)
7991
7992 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CalculateElementAddressInstr,
7993 TemplateDefinition,
7994 FIELD_LIST)
7995#undef FIELD_LIST
7996
7997 private:
7998 DISALLOW_COPY_AND_ASSIGN(CalculateElementAddressInstr);
7999};
8000
8001class LoadClassIdInstr : public TemplateDefinition<1, NoThrow, Pure> {
8002 public:
8003 explicit LoadClassIdInstr(Value* object,
8004 Representation representation = kTagged,
8005 bool input_can_be_smi = true)
8006 : representation_(representation), input_can_be_smi_(input_can_be_smi) {
8007 ASSERT(representation == kTagged || representation == kUnboxedUword);
8008 SetInputAt(0, object);
8009 }
8010
8011 virtual Representation representation() const { return representation_; }
8012 DECLARE_INSTRUCTION(LoadClassId)
8013 virtual CompileType ComputeType() const;
8014
8015 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8016
8017 Value* object() const { return inputs_[0]; }
8018
8019 virtual bool ComputeCanDeoptimize() const { return false; }
8020
8021 virtual bool AttributesEqual(const Instruction& other) const {
8022 auto const other_load = other.AsLoadClassId();
8023 return other_load->representation_ == representation_ &&
8024 other_load->input_can_be_smi_ == input_can_be_smi_;
8025 }
8026
8027 void InferRange(uword* lower, uword* upper);
8028 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8029
8031
8032#define FIELD_LIST(F) \
8033 F(const Representation, representation_) \
8034 F(const bool, input_can_be_smi_)
8035
8037 TemplateDefinition,
8038 FIELD_LIST)
8039#undef FIELD_LIST
8040
8041 private:
8042 DISALLOW_COPY_AND_ASSIGN(LoadClassIdInstr);
8043};
8044
8045// LoadFieldInstr represents a load from the given [slot] in the given
8046// [instance]. If calls_initializer(), then LoadFieldInstr also calls field
8047// initializer if field is not initialized yet (contains sentinel value).
8048//
8049// Note: if slot was a subject of the field unboxing optimization then this load
8050// would both load the box stored in the field and then load the content of
8051// the box.
8052class LoadFieldInstr : public TemplateLoadField<1> {
8053 public:
8054 LoadFieldInstr(Value* instance,
8055 const Slot& slot,
8056 InnerPointerAccess loads_inner_pointer,
8057 const InstructionSource& source,
8058 bool calls_initializer = false,
8059 intptr_t deopt_id = DeoptId::kNone)
8060 : TemplateLoadField(source,
8061 calls_initializer,
8062 deopt_id,
8063 slot.IsDartField() ? &slot.field() : nullptr),
8064 slot_(slot),
8065 loads_inner_pointer_(loads_inner_pointer) {
8066 switch (loads_inner_pointer) {
8067 case InnerPointerAccess::kNotUntagged:
8068 ASSERT(slot.representation() != kUntagged);
8069 break;
8070 case InnerPointerAccess::kMayBeInnerPointer:
8071 ASSERT(slot.representation() == kUntagged);
8072 ASSERT(slot.may_contain_inner_pointer());
8073 break;
8074 case InnerPointerAccess::kCannotBeInnerPointer:
8075 ASSERT(slot.representation() == kUntagged);
8076 break;
8077 }
8078 SetInputAt(0, instance);
8079 }
8080
8081 // Convenience function for slots that cannot hold untagged addresses.
8082 LoadFieldInstr(Value* instance,
8083 const Slot& slot,
8084 const InstructionSource& source,
8085 bool calls_initializer = false,
8086 intptr_t deopt_id = DeoptId::kNone)
8087 : LoadFieldInstr(instance,
8088 slot,
8090 source,
8091 calls_initializer,
8092 deopt_id) {}
8093
8094 Value* instance() const { return inputs_[0]; }
8095 const Slot& slot() const { return slot_; }
8096
8097 InnerPointerAccess loads_inner_pointer() const {
8098 return loads_inner_pointer_;
8099 }
8100 void set_loads_inner_pointer(InnerPointerAccess value) {
8101 // We should never change this for a non-untagged field.
8102 ASSERT(loads_inner_pointer_ != InnerPointerAccess::kNotUntagged);
8103 // We only convert from may to cannot, never the other direction.
8104 ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
8105 loads_inner_pointer_ = value;
8106 }
8107
8108 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8109 ASSERT_EQUAL(idx, 0);
8110 return slot_.has_untagged_instance() ? kUntagged : kTagged;
8111 }
8112
8113 virtual Representation representation() const;
8114
8115 DECLARE_INSTRUCTION(LoadField)
8116 DECLARE_ATTRIBUTE(&slot())
8117
8118 virtual CompileType ComputeType() const;
8119
8120 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8121
8122 // Whether the load may return an untagged pointer that points to memory
8123 // within the instance.
8124 bool MayCreateUntaggedAlias() const;
8125
8126 virtual bool MayCreateUnsafeUntaggedPointer() const;
8127
8128 bool IsImmutableLoad() const {
8129 // The data() field in PointerBase is marked mutable, but is not actually
8130 // mutable if it doesn't contain an inner pointer (e.g., for external
8131 // typed data and Pointer objects).
8132 if (slot().IsIdentical(Slot::PointerBase_data())) {
8133 return loads_inner_pointer() != InnerPointerAccess::kMayBeInnerPointer;
8134 }
8135 return slot().is_immutable();
8136 }
8137
8138 bool IsImmutableLengthLoad() const { return slot().IsImmutableLengthSlot(); }
8139
8140 // Try evaluating this load against the given constant value of
8141 // the instance. Returns true if evaluation succeeded and
8142 // puts result into result.
8143 // Note: we only evaluate loads when we can ensure that
8144 // instance has the field.
8145 bool Evaluate(const Object& instance_value, Object* result);
8146
8147 static bool TryEvaluateLoad(const Object& instance,
8148 const Field& field,
8149 Object* result);
8150
8151 static bool TryEvaluateLoad(const Object& instance,
8152 const Slot& field,
8153 Object* result);
8154
8155 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8156
8157 static bool IsFixedLengthArrayCid(intptr_t cid);
8158 static bool IsTypedDataViewFactory(const Function& function);
8159 static bool IsUnmodifiableTypedDataViewFactory(const Function& function);
8160
8161 virtual bool AllowsCSE() const { return slot_.is_immutable(); }
8162
8163 virtual bool CanTriggerGC() const { return calls_initializer(); }
8164
8165 virtual bool AttributesEqual(const Instruction& other) const;
8166
8168
8169#define FIELD_LIST(F) \
8170 F(const Slot&, slot_) \
8171 F(InnerPointerAccess, loads_inner_pointer_)
8172
8174 TemplateLoadField,
8175 FIELD_LIST)
8176#undef FIELD_LIST
8177
8178 private:
8179 intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
8180
8181 // Generate code which checks if field is initialized and
8182 // calls initializer if it is not. Field value is already loaded.
8183 void EmitNativeCodeForInitializerCall(FlowGraphCompiler* compiler);
8184
8185 DISALLOW_COPY_AND_ASSIGN(LoadFieldInstr);
8186};
8187
8188class InstantiateTypeInstr : public TemplateDefinition<2, Throws> {
8189 public:
8190 InstantiateTypeInstr(const InstructionSource& source,
8191 const AbstractType& type,
8192 Value* instantiator_type_arguments,
8193 Value* function_type_arguments,
8194 intptr_t deopt_id)
8195 : TemplateDefinition(source, deopt_id),
8196 token_pos_(source.token_pos),
8197 type_(type) {
8198 DEBUG_ASSERT(type.IsNotTemporaryScopedHandle());
8199 SetInputAt(0, instantiator_type_arguments);
8200 SetInputAt(1, function_type_arguments);
8201 }
8202
8203 DECLARE_INSTRUCTION(InstantiateType)
8204
8205 Value* instantiator_type_arguments() const { return inputs_[0]; }
8206 Value* function_type_arguments() const { return inputs_[1]; }
8207 const AbstractType& type() const { return type_; }
8208 virtual TokenPosition token_pos() const { return token_pos_; }
8209
8210 virtual bool ComputeCanDeoptimize() const { return false; }
8211 virtual bool ComputeCanDeoptimizeAfterCall() const {
8212 return !CompilerState::Current().is_aot();
8213 }
8214 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
8215 return InputCount();
8216 }
8217
8218 virtual bool HasUnknownSideEffects() const { return false; }
8219
8221
8222#define FIELD_LIST(F) \
8223 F(const TokenPosition, token_pos_) \
8224 F(const AbstractType&, type_)
8225
8226 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeInstr,
8227 TemplateDefinition,
8228 FIELD_LIST)
8229#undef FIELD_LIST
8230
8231 private:
8232 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeInstr);
8233};
8234
8235class InstantiateTypeArgumentsInstr : public TemplateDefinition<3, Throws> {
8236 public:
8237 InstantiateTypeArgumentsInstr(const InstructionSource& source,
8238 Value* instantiator_type_arguments,
8239 Value* function_type_arguments,
8240 Value* type_arguments,
8241 const Class& instantiator_class,
8242 const Function& function,
8243 intptr_t deopt_id)
8244 : TemplateDefinition(source, deopt_id),
8245 token_pos_(source.token_pos),
8246 instantiator_class_(instantiator_class),
8247 function_(function) {
8248 DEBUG_ASSERT(instantiator_class.IsNotTemporaryScopedHandle());
8249 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
8250 SetInputAt(0, instantiator_type_arguments);
8251 SetInputAt(1, function_type_arguments);
8252 SetInputAt(2, type_arguments);
8253 }
8254
8255 DECLARE_INSTRUCTION(InstantiateTypeArguments)
8256
8257 Value* instantiator_type_arguments() const { return inputs_[0]; }
8258 Value* function_type_arguments() const { return inputs_[1]; }
8259 Value* type_arguments() const { return inputs_[2]; }
8260 const Class& instantiator_class() const { return instantiator_class_; }
8261 const Function& function() const { return function_; }
8262 virtual TokenPosition token_pos() const { return token_pos_; }
8263
8264 virtual bool ComputeCanDeoptimize() const { return false; }
8265 virtual bool ComputeCanDeoptimizeAfterCall() const {
8266 return !CompilerState::Current().is_aot();
8267 }
8268 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
8269 return InputCount();
8270 }
8271
8272 virtual bool HasUnknownSideEffects() const { return false; }
8273
8274 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8275
8276 bool CanShareInstantiatorTypeArguments(
8277 bool* with_runtime_check = nullptr) const {
8278 if (instantiator_class().IsNull() || !type_arguments()->BindsToConstant() ||
8279 !type_arguments()->BoundConstant().IsTypeArguments()) {
8280 return false;
8281 }
8282 const auto& type_args =
8283 TypeArguments::Cast(type_arguments()->BoundConstant());
8284 return type_args.CanShareInstantiatorTypeArguments(instantiator_class(),
8285 with_runtime_check);
8286 }
8287
8288 bool CanShareFunctionTypeArguments(bool* with_runtime_check = nullptr) const {
8289 if (function().IsNull() || !type_arguments()->BindsToConstant() ||
8290 !type_arguments()->BoundConstant().IsTypeArguments()) {
8291 return false;
8292 }
8293 const auto& type_args =
8294 TypeArguments::Cast(type_arguments()->BoundConstant());
8295 return type_args.CanShareFunctionTypeArguments(function(),
8296 with_runtime_check);
8297 }
8298
8299 const Code& GetStub() const {
8300 bool with_runtime_check;
8301 if (CanShareInstantiatorTypeArguments(&with_runtime_check)) {
8302 ASSERT(with_runtime_check);
8303 return StubCode::InstantiateTypeArgumentsMayShareInstantiatorTA();
8304 } else if (CanShareFunctionTypeArguments(&with_runtime_check)) {
8305 ASSERT(with_runtime_check);
8306 return StubCode::InstantiateTypeArgumentsMayShareFunctionTA();
8307 }
8308 return StubCode::InstantiateTypeArguments();
8309 }
8310
8312
8313#define FIELD_LIST(F) \
8314 F(const TokenPosition, token_pos_) \
8315 F(const Class&, instantiator_class_) \
8316 F(const Function&, function_)
8317
8318 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeArgumentsInstr,
8319 TemplateDefinition,
8320 FIELD_LIST)
8321#undef FIELD_LIST
8322
8323 private:
8324 DISALLOW_COPY_AND_ASSIGN(InstantiateTypeArgumentsInstr);
8325};
8326
8327// [AllocateContext] instruction allocates a new Context object with the space
8328// for the given [context_variables].
8329class AllocateContextInstr : public TemplateAllocation<0> {
8330 public:
8331 AllocateContextInstr(const InstructionSource& source,
8332 const ZoneGrowableArray<const Slot*>& context_slots,
8333 intptr_t deopt_id)
8334 : TemplateAllocation(source, deopt_id), context_slots_(context_slots) {}
8335
8336 DECLARE_INSTRUCTION(AllocateContext)
8337 virtual CompileType ComputeType() const;
8338
8339 const ZoneGrowableArray<const Slot*>& context_slots() const {
8340 return context_slots_;
8341 }
8342
8343 intptr_t num_context_variables() const { return context_slots().length(); }
8344
8345 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8346
8347 virtual bool ComputeCanDeoptimize() const { return false; }
8348
8349 virtual bool HasUnknownSideEffects() const { return false; }
8350
8351 virtual bool WillAllocateNewOrRemembered() const {
8352 return compiler::target::WillAllocateNewOrRememberedContext(
8353 context_slots().length());
8354 }
8355
8357
8358#define FIELD_LIST(F) F(const ZoneGrowableArray<const Slot*>&, context_slots_)
8359
8360 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateContextInstr,
8361 TemplateAllocation,
8362 FIELD_LIST)
8363#undef FIELD_LIST
8364
8365 private:
8366 DISALLOW_COPY_AND_ASSIGN(AllocateContextInstr);
8367};
8368
8369// [CloneContext] instruction clones the given Context object assuming that
8370// it contains exactly the provided [context_variables].
8371class CloneContextInstr : public TemplateDefinition<1, Throws> {
8372 public:
8373 CloneContextInstr(const InstructionSource& source,
8374 Value* context_value,
8375 const ZoneGrowableArray<const Slot*>& context_slots,
8376 intptr_t deopt_id)
8377 : TemplateDefinition(source, deopt_id),
8378 token_pos_(source.token_pos),
8379 context_slots_(context_slots) {
8380 SetInputAt(0, context_value);
8381 }
8382
8383 virtual TokenPosition token_pos() const { return token_pos_; }
8384 Value* context_value() const { return inputs_[0]; }
8385
8386 const ZoneGrowableArray<const Slot*>& context_slots() const {
8387 return context_slots_;
8388 }
8389
8390 DECLARE_INSTRUCTION(CloneContext)
8391 virtual CompileType ComputeType() const;
8392
8393 virtual bool ComputeCanDeoptimize() const { return false; }
8394 virtual bool ComputeCanDeoptimizeAfterCall() const {
8395 // We test that allocation instructions have correct deopt environment
8396 // (which is needed in case OOM is thrown) by actually deoptimizing
8397 // optimized code in allocation slow paths.
8398 return !CompilerState::Current().is_aot();
8399 }
8400 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
8401 return InputCount();
8402 }
8403
8404 virtual bool HasUnknownSideEffects() const { return false; }
8405
8406#define FIELD_LIST(F) \
8407 F(const TokenPosition, token_pos_) \
8408 F(const ZoneGrowableArray<const Slot*>&, context_slots_)
8409
8411 TemplateDefinition,
8412 FIELD_LIST)
8413#undef FIELD_LIST
8414
8415 private:
8416 DISALLOW_COPY_AND_ASSIGN(CloneContextInstr);
8417};
8418
8419class CheckEitherNonSmiInstr : public TemplateInstruction<2, NoThrow, Pure> {
8420 public:
8421 CheckEitherNonSmiInstr(Value* left, Value* right, intptr_t deopt_id)
8422 : TemplateInstruction(deopt_id) {
8423 SetInputAt(0, left);
8424 SetInputAt(1, right);
8425 }
8426
8427 Value* left() const { return inputs_[0]; }
8428 Value* right() const { return inputs_[1]; }
8429
8430 DECLARE_INSTRUCTION(CheckEitherNonSmi)
8431
8432 virtual bool ComputeCanDeoptimize() const { return true; }
8433
8434 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
8435
8436 virtual bool AttributesEqual(const Instruction& other) const { return true; }
8437
8438 DECLARE_EMPTY_SERIALIZATION(CheckEitherNonSmiInstr, TemplateInstruction)
8439
8440 private:
8441 DISALLOW_COPY_AND_ASSIGN(CheckEitherNonSmiInstr);
8442};
8443
8444struct Boxing : public AllStatic {
8445 // Whether the given representation can be boxed or unboxed.
8446 static bool Supports(Representation rep);
8447
8448 // The native representation that results from unboxing a value with the
8449 // representation [rep].
8450 //
8451 // The native representation can hold all values represented by [rep], but
8452 // may be larger than the value size of [rep]. For example, byte-sized
8453 // values are zero or sign-extended to word-sized values on x86 architectures
8454 // to avoid having to allocate byte registers.
8455 static constexpr Representation NativeRepresentation(Representation rep) {
8456 // Only change integer representations.
8457 if (!RepresentationUtils::IsUnboxedInteger(rep)) return rep;
8458 // Use signed word-sized integers for representations smaller than 4 bytes.
8459 return RepresentationUtils::ValueSize(rep) < 4 ? kUnboxedIntPtr : rep;
8460 }
8461
8462 // Whether boxing this value requires allocating a new object.
8463 static bool RequiresAllocation(Representation rep);
8464
8465 // The offset into the Layout object for the boxed value that can store
8466 // the full range of values in the representation.
8467 // Only defined for allocated boxes (i.e., RequiresAllocation must be true).
8468 static intptr_t ValueOffset(Representation rep);
8469
8470 // The class ID for the boxed value that can store the full range
8471 // of values in the representation.
8472 static intptr_t BoxCid(Representation rep);
8473};
8474
8475class BoxInstr : public TemplateDefinition<1, NoThrow, Pure> {
8476 public:
8477 static BoxInstr* Create(Representation from, Value* value);
8478
8479 Value* value() const { return inputs_[0]; }
8480 Representation from_representation() const { return from_representation_; }
8481
8483 virtual CompileType ComputeType() const;
8484
8485 virtual bool ComputeCanDeoptimize() const { return false; }
8486 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
8487
8488 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8489 ASSERT(idx == 0);
8490 return from_representation();
8491 }
8492
8493 virtual bool AttributesEqual(const Instruction& other) const {
8494 return other.AsBox()->from_representation() == from_representation();
8495 }
8496
8497 Definition* Canonicalize(FlowGraph* flow_graph);
8498
8499 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
8500
8501 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8502 return kNotSpeculative;
8503 }
8504
8505#define FIELD_LIST(F) F(const Representation, from_representation_)
8506
8508 TemplateDefinition,
8509 FIELD_LIST)
8510#undef FIELD_LIST
8511
8512 protected:
8513 BoxInstr(Representation from_representation, Value* value)
8514 : from_representation_(from_representation) {
8515 SetInputAt(0, value);
8516 }
8517
8518 private:
8519 intptr_t ValueOffset() const {
8520 return Boxing::ValueOffset(from_representation());
8521 }
8522
8523 DISALLOW_COPY_AND_ASSIGN(BoxInstr);
8524};
8525
8526class BoxIntegerInstr : public BoxInstr {
8527 public:
8528 BoxIntegerInstr(Representation representation, Value* value)
8529 : BoxInstr(representation, value) {}
8530
8531 virtual bool ValueFitsSmi() const;
8532
8533 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8534
8535 virtual CompileType ComputeType() const;
8536 virtual bool RecomputeType();
8537
8538 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8539
8540 virtual bool CanTriggerGC() const { return !ValueFitsSmi(); }
8541
8543
8544 DECLARE_EMPTY_SERIALIZATION(BoxIntegerInstr, BoxInstr)
8545
8546 private:
8547 DISALLOW_COPY_AND_ASSIGN(BoxIntegerInstr);
8548};
8549
8550class BoxSmallIntInstr : public BoxIntegerInstr {
8551 public:
8552 explicit BoxSmallIntInstr(Representation rep, Value* value)
8553 : BoxIntegerInstr(rep, value) {
8554 ASSERT(RepresentationUtils::ValueSize(rep) * kBitsPerByte <=
8555 compiler::target::kSmiBits);
8556 }
8557
8558 virtual bool ValueFitsSmi() const { return true; }
8559
8560 DECLARE_INSTRUCTION(BoxSmallInt)
8561
8562 DECLARE_EMPTY_SERIALIZATION(BoxSmallIntInstr, BoxIntegerInstr)
8563
8564 private:
8565 DISALLOW_COPY_AND_ASSIGN(BoxSmallIntInstr);
8566};
8567
8568class BoxInteger32Instr : public BoxIntegerInstr {
8569 public:
8570 BoxInteger32Instr(Representation representation, Value* value)
8571 : BoxIntegerInstr(representation, value) {}
8572
8574
8575 DECLARE_EMPTY_SERIALIZATION(BoxInteger32Instr, BoxIntegerInstr)
8576
8577 private:
8578 DISALLOW_COPY_AND_ASSIGN(BoxInteger32Instr);
8579};
8580
8581class BoxInt32Instr : public BoxInteger32Instr {
8582 public:
8583 explicit BoxInt32Instr(Value* value)
8584 : BoxInteger32Instr(kUnboxedInt32, value) {}
8585
8587
8588 DECLARE_EMPTY_SERIALIZATION(BoxInt32Instr, BoxInteger32Instr)
8589
8590 private:
8591 DISALLOW_COPY_AND_ASSIGN(BoxInt32Instr);
8592};
8593
8594class BoxUint32Instr : public BoxInteger32Instr {
8595 public:
8596 explicit BoxUint32Instr(Value* value)
8597 : BoxInteger32Instr(kUnboxedUint32, value) {}
8598
8600
8601 DECLARE_EMPTY_SERIALIZATION(BoxUint32Instr, BoxInteger32Instr)
8602
8603 private:
8604 DISALLOW_COPY_AND_ASSIGN(BoxUint32Instr);
8605};
8606
8607class BoxInt64Instr : public BoxIntegerInstr {
8608 public:
8609 explicit BoxInt64Instr(Value* value)
8610 : BoxIntegerInstr(kUnboxedInt64, value) {}
8611
8612 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8613
8614 DECLARE_INSTRUCTION(BoxInt64)
8615
8616 DECLARE_EMPTY_SERIALIZATION(BoxInt64Instr, BoxIntegerInstr)
8617
8618 private:
8619 DISALLOW_COPY_AND_ASSIGN(BoxInt64Instr);
8620};
8621
8622class UnboxInstr : public TemplateDefinition<1, NoThrow, Pure> {
8623 public:
8624 static UnboxInstr* Create(Representation to,
8625 Value* value,
8626 intptr_t deopt_id,
8627 SpeculativeMode speculative_mode = kGuardInputs);
8628
8629 Value* value() const { return inputs_[0]; }
8630
8631 virtual bool ComputeCanDeoptimize() const {
8632 if (SpeculativeModeOfInputs() == kNotSpeculative) {
8633 return false;
8634 }
8635
8636 const intptr_t value_cid = value()->Type()->ToCid();
8637 const intptr_t box_cid = BoxCid();
8638
8639 if (value_cid == box_cid) {
8640 return false;
8641 }
8642
8643 if (CanConvertSmi() && (value_cid == kSmiCid)) {
8644 return false;
8645 }
8646
8647 return true;
8648 }
8649
8650 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
8651 return speculative_mode_;
8652 }
8653
8654 virtual Representation representation() const { return representation_; }
8655
8656 DECLARE_INSTRUCTION(Unbox)
8657
8658 virtual bool AttributesEqual(const Instruction& other) const {
8659 auto const other_unbox = other.AsUnbox();
8660 return (representation() == other_unbox->representation()) &&
8661 (speculative_mode_ == other_unbox->speculative_mode_);
8662 }
8663
8664 Definition* Canonicalize(FlowGraph* flow_graph);
8665
8666 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
8667
8668 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
8669
8670#define FIELD_LIST(F) \
8671 F(const Representation, representation_) \
8672 F(SpeculativeMode, speculative_mode_)
8673
8675 TemplateDefinition,
8676 FIELD_LIST)
8677#undef FIELD_LIST
8678
8679 protected:
8680 UnboxInstr(Representation representation,
8681 Value* value,
8682 intptr_t deopt_id,
8683 SpeculativeMode speculative_mode)
8684 : TemplateDefinition(deopt_id),
8685 representation_(representation),
8686 speculative_mode_(speculative_mode) {
8687 // Unboxing doesn't currently handle non-native representations.
8688 ASSERT_EQUAL(Boxing::NativeRepresentation(representation), representation);
8689 SetInputAt(0, value);
8690 }
8691
8692 void set_speculative_mode(SpeculativeMode value) {
8693 speculative_mode_ = value;
8694 }
8695
8696 private:
8697 bool CanConvertSmi() const;
8698 void EmitLoadFromBox(FlowGraphCompiler* compiler);
8699 void EmitSmiConversion(FlowGraphCompiler* compiler);
8700 void EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler);
8701 void EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler);
8702 void EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler);
8703
8704 intptr_t BoxCid() const { return Boxing::BoxCid(representation_); }
8705
8706 intptr_t ValueOffset() const { return Boxing::ValueOffset(representation_); }
8707
8708 DISALLOW_COPY_AND_ASSIGN(UnboxInstr);
8709};
8710
8711class UnboxIntegerInstr : public UnboxInstr {
8712 public:
8713 enum TruncationMode { kTruncate, kNoTruncation };
8714
8715 UnboxIntegerInstr(Representation representation,
8716 TruncationMode truncation_mode,
8717 Value* value,
8718 intptr_t deopt_id,
8719 SpeculativeMode speculative_mode)
8720 : UnboxInstr(representation, value, deopt_id, speculative_mode),
8721 is_truncating_(truncation_mode == kTruncate) {}
8722
8723 bool is_truncating() const { return is_truncating_; }
8724
8725 void mark_truncating() { is_truncating_ = true; }
8726
8727 virtual bool ComputeCanDeoptimize() const;
8728
8729 virtual bool AttributesEqual(const Instruction& other) const {
8730 auto const other_unbox = other.AsUnboxInteger();
8731 return UnboxInstr::AttributesEqual(other) &&
8732 (other_unbox->is_truncating_ == is_truncating_);
8733 }
8734
8735 virtual Definition* Canonicalize(FlowGraph* flow_graph);
8736
8737 virtual void InferRange(RangeAnalysis* analysis, Range* range);
8738
8739 DECLARE_ABSTRACT_INSTRUCTION(UnboxInteger)
8740
8742
8743#define FIELD_LIST(F) F(bool, is_truncating_)
8744
8746 UnboxInstr,
8747 FIELD_LIST)
8748#undef FIELD_LIST
8749
8750 private:
8751 DISALLOW_COPY_AND_ASSIGN(UnboxIntegerInstr);
8752};
8753
8754class UnboxInteger32Instr : public UnboxIntegerInstr {
8755 public:
8756 UnboxInteger32Instr(Representation representation,
8757 TruncationMode truncation_mode,
8758 Value* value,
8759 intptr_t deopt_id,
8760 SpeculativeMode speculative_mode)
8761 : UnboxIntegerInstr(representation,
8762 truncation_mode,
8763 value,
8764 deopt_id,
8765 speculative_mode) {}
8766
8768
8769 DECLARE_EMPTY_SERIALIZATION(UnboxInteger32Instr, UnboxIntegerInstr)
8770
8771 private:
8772 DISALLOW_COPY_AND_ASSIGN(UnboxInteger32Instr);
8773};
8774
8775class UnboxUint32Instr : public UnboxInteger32Instr {
8776 public:
8777 UnboxUint32Instr(Value* value,
8778 intptr_t deopt_id,
8779 SpeculativeMode speculative_mode = kGuardInputs)
8780 : UnboxInteger32Instr(kUnboxedUint32,
8781 kTruncate,
8782 value,
8783 deopt_id,
8784 speculative_mode) {
8785 ASSERT(is_truncating());
8786 }
8787
8789
8790 DECLARE_EMPTY_SERIALIZATION(UnboxUint32Instr, UnboxInteger32Instr)
8791
8792 private:
8793 DISALLOW_COPY_AND_ASSIGN(UnboxUint32Instr);
8794};
8795
8796class UnboxInt32Instr : public UnboxInteger32Instr {
8797 public:
8798 UnboxInt32Instr(TruncationMode truncation_mode,
8799 Value* value,
8800 intptr_t deopt_id,
8801 SpeculativeMode speculative_mode = kGuardInputs)
8802 : UnboxInteger32Instr(kUnboxedInt32,
8803 truncation_mode,
8804 value,
8805 deopt_id,
8806 speculative_mode) {}
8807
8809
8810 DECLARE_EMPTY_SERIALIZATION(UnboxInt32Instr, UnboxInteger32Instr)
8811
8812 private:
8813 DISALLOW_COPY_AND_ASSIGN(UnboxInt32Instr);
8814};
8815
8816class UnboxInt64Instr : public UnboxIntegerInstr {
8817 public:
8818 UnboxInt64Instr(Value* value,
8819 intptr_t deopt_id,
8820 SpeculativeMode speculative_mode)
8821 : UnboxIntegerInstr(kUnboxedInt64,
8822 kNoTruncation,
8823 value,
8824 deopt_id,
8825 speculative_mode) {}
8826
8828
8829 DECLARE_EMPTY_SERIALIZATION(UnboxInt64Instr, UnboxIntegerInstr)
8830
8831 private:
8832 DISALLOW_COPY_AND_ASSIGN(UnboxInt64Instr);
8833};
8834
8835bool Definition::IsInt64Definition() {
8836 return (Type()->ToCid() == kMintCid) || IsBinaryInt64Op() ||
8837 IsUnaryInt64Op() || IsShiftInt64Op() || IsSpeculativeShiftInt64Op() ||
8838 IsBoxInt64() || IsUnboxInt64();
8839}
8840
8841// Calls into the runtime and performs a case-insensitive comparison of the
8842// UTF16 strings (i.e. TwoByteString) located at
8843// str[lhs_index:lhs_index + length] and str[rhs_index:rhs_index + length].
8844// Depending on [handle_surrogates], we will treat the strings as either
8845// UCS2 (no surrogate handling) or UTF16 (surrogates handled appropriately).
8846class CaseInsensitiveCompareInstr
8847 : public TemplateDefinition<4, NoThrow, Pure> {
8848 public:
8849 CaseInsensitiveCompareInstr(Value* str,
8850 Value* lhs_index,
8851 Value* rhs_index,
8852 Value* length,
8853 bool handle_surrogates,
8854 intptr_t cid)
8855 : handle_surrogates_(handle_surrogates), cid_(cid) {
8856 ASSERT(cid == kTwoByteStringCid);
8857 ASSERT(index_scale() == 2);
8858 SetInputAt(0, str);
8859 SetInputAt(1, lhs_index);
8860 SetInputAt(2, rhs_index);
8861 SetInputAt(3, length);
8862 }
8863
8864 Value* str() const { return inputs_[0]; }
8865 Value* lhs_index() const { return inputs_[1]; }
8866 Value* rhs_index() const { return inputs_[2]; }
8867 Value* length() const { return inputs_[3]; }
8868
8869 const RuntimeEntry& TargetFunction() const;
8870 intptr_t class_id() const { return cid_; }
8871
8872 intptr_t index_scale() const {
8873 return compiler::target::Instance::ElementSizeFor(cid_);
8874 }
8875
8876 virtual bool ComputeCanDeoptimize() const { return false; }
8877
8878 virtual Representation representation() const { return kTagged; }
8879
8880 DECLARE_INSTRUCTION(CaseInsensitiveCompare)
8881 virtual CompileType ComputeType() const;
8882
8883 virtual bool AttributesEqual(const Instruction& other) const {
8884 const auto* other_compare = other.AsCaseInsensitiveCompare();
8885 return (other_compare->handle_surrogates_ == handle_surrogates_) &&
8886 (other_compare->cid_ == cid_);
8887 }
8888
8889#define FIELD_LIST(F) \
8890 F(const bool, handle_surrogates_) \
8891 F(const intptr_t, cid_)
8892
8893 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CaseInsensitiveCompareInstr,
8894 TemplateDefinition,
8895 FIELD_LIST)
8896#undef FIELD_LIST
8897
8898 private:
8899 DISALLOW_COPY_AND_ASSIGN(CaseInsensitiveCompareInstr);
8900};
8901
8902// Represents Math's static min and max functions.
8903class MathMinMaxInstr : public TemplateDefinition<2, NoThrow, Pure> {
8904 public:
8905 MathMinMaxInstr(MethodRecognizer::Kind op_kind,
8906 Value* left_value,
8907 Value* right_value,
8908 intptr_t deopt_id,
8909 intptr_t result_cid)
8910 : TemplateDefinition(deopt_id),
8911 op_kind_(op_kind),
8912 result_cid_(result_cid) {
8913 ASSERT((result_cid == kSmiCid) || (result_cid == kDoubleCid));
8914 SetInputAt(0, left_value);
8915 SetInputAt(1, right_value);
8916 }
8917
8918 MethodRecognizer::Kind op_kind() const { return op_kind_; }
8919
8920 Value* left() const { return inputs_[0]; }
8921 Value* right() const { return inputs_[1]; }
8922
8923 intptr_t result_cid() const { return result_cid_; }
8924
8925 virtual bool ComputeCanDeoptimize() const { return false; }
8926
8927 virtual Representation representation() const {
8928 if (result_cid() == kSmiCid) {
8929 return kTagged;
8930 }
8931 ASSERT(result_cid() == kDoubleCid);
8932 return kUnboxedDouble;
8933 }
8934
8935 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8936 if (result_cid() == kSmiCid) {
8937 return kTagged;
8938 }
8939 ASSERT(result_cid() == kDoubleCid);
8940 return kUnboxedDouble;
8941 }
8942
8943 virtual intptr_t DeoptimizationTarget() const {
8944 // Direct access since this instruction cannot deoptimize, and the deopt-id
8945 // was inherited from another instruction that could deoptimize.
8946 return GetDeoptId();
8947 }
8948
8949 DECLARE_INSTRUCTION(MathMinMax)
8950 virtual CompileType ComputeType() const;
8951 virtual bool AttributesEqual(const Instruction& other) const;
8952
8953#define FIELD_LIST(F) \
8954 F(const MethodRecognizer::Kind, op_kind_) \
8955 F(const intptr_t, result_cid_)
8956
8958 TemplateDefinition,
8959 FIELD_LIST)
8960#undef FIELD_LIST
8961
8962 private:
8963 DISALLOW_COPY_AND_ASSIGN(MathMinMaxInstr);
8964};
8965
8966class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
8967 public:
8968 BinaryDoubleOpInstr(Token::Kind op_kind,
8969 Value* left,
8970 Value* right,
8971 intptr_t deopt_id,
8972 const InstructionSource& source,
8973 SpeculativeMode speculative_mode = kGuardInputs,
8974 Representation representation = kUnboxedDouble)
8975 : TemplateDefinition(source, deopt_id),
8976 op_kind_(op_kind),
8977 token_pos_(source.token_pos),
8978 speculative_mode_(speculative_mode),
8979 representation_(representation) {
8980 ASSERT((representation == kUnboxedFloat) ||
8981 (representation == kUnboxedDouble));
8982 SetInputAt(0, left);
8983 SetInputAt(1, right);
8984 }
8985
8986 Value* left() const { return inputs_[0]; }
8987 Value* right() const { return inputs_[1]; }
8988
8989 Token::Kind op_kind() const { return op_kind_; }
8990
8991 virtual TokenPosition token_pos() const { return token_pos_; }
8992
8993 virtual bool ComputeCanDeoptimize() const { return false; }
8994
8995 virtual Representation representation() const { return representation_; }
8996
8997 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
8998 ASSERT((idx == 0) || (idx == 1));
8999 return representation_;
9000 }
9001
9002 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9003 return speculative_mode_;
9004 }
9005
9006 virtual intptr_t DeoptimizationTarget() const {
9007 // Direct access since this instruction cannot deoptimize, and the deopt-id
9008 // was inherited from another instruction that could deoptimize.
9009 return GetDeoptId();
9010 }
9011
9012 DECLARE_ATTRIBUTE(op_kind())
9013
9015
9016 DECLARE_INSTRUCTION(BinaryDoubleOp)
9017
9018 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9019
9020 virtual bool AttributesEqual(const Instruction& other) const {
9021 auto const other_bin_op = other.AsBinaryDoubleOp();
9022 return (op_kind() == other_bin_op->op_kind()) &&
9023 (speculative_mode_ == other_bin_op->speculative_mode_) &&
9024 (representation_ == other_bin_op->representation_);
9025 }
9026
9027#define FIELD_LIST(F) \
9028 F(const Token::Kind, op_kind_) \
9029 F(const TokenPosition, token_pos_) \
9030 F(const SpeculativeMode, speculative_mode_) \
9031 F(const Representation, representation_)
9032
9033 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryDoubleOpInstr,
9034 TemplateDefinition,
9035 FIELD_LIST)
9036#undef FIELD_LIST
9037
9038 private:
9039 DISALLOW_COPY_AND_ASSIGN(BinaryDoubleOpInstr);
9040};
9041
9042class DoubleTestOpInstr : public TemplateComparison<1, NoThrow, Pure> {
9043 public:
9044 DoubleTestOpInstr(MethodRecognizer::Kind op_kind,
9045 Value* value,
9046 intptr_t deopt_id,
9047 const InstructionSource& source)
9048 : TemplateComparison(source, Token::kEQ, deopt_id), op_kind_(op_kind) {
9049 SetInputAt(0, value);
9050 }
9051
9052 Value* value() const { return InputAt(0); }
9053
9054 MethodRecognizer::Kind op_kind() const { return op_kind_; }
9055
9056 virtual bool ComputeCanDeoptimize() const { return false; }
9057
9058 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9059 ASSERT(idx == 0);
9060 return kUnboxedDouble;
9061 }
9062
9064
9065 DECLARE_COMPARISON_INSTRUCTION(DoubleTestOp)
9066
9067 virtual CompileType ComputeType() const;
9068
9069 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9070
9071 virtual bool AttributesEqual(const Instruction& other) const {
9072 return op_kind_ == other.AsDoubleTestOp()->op_kind() &&
9073 ComparisonInstr::AttributesEqual(other);
9074 }
9075
9076 virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
9077
9078#define FIELD_LIST(F) F(const MethodRecognizer::Kind, op_kind_)
9079
9081 TemplateComparison,
9082 FIELD_LIST)
9083#undef FIELD_LIST
9084
9085 private:
9086 DISALLOW_COPY_AND_ASSIGN(DoubleTestOpInstr);
9087};
9088
9089class HashDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
9090 public:
9091 HashDoubleOpInstr(Value* value, intptr_t deopt_id)
9092 : TemplateDefinition(deopt_id) {
9093 SetInputAt(0, value);
9094 }
9095
9096 static HashDoubleOpInstr* Create(Value* value, intptr_t deopt_id) {
9097 return new HashDoubleOpInstr(value, deopt_id);
9098 }
9099
9100 Value* value() const { return inputs_[0]; }
9101
9102 virtual intptr_t DeoptimizationTarget() const {
9103 // Direct access since this instruction cannot deoptimize, and the deopt-id
9104 // was inherited from another instruction that could deoptimize.
9105 return GetDeoptId();
9106 }
9107
9108 virtual Representation representation() const { return kUnboxedInt64; }
9109
9110 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9111 ASSERT(idx == 0);
9112 return kUnboxedDouble;
9113 }
9114
9115 DECLARE_INSTRUCTION(HashDoubleOp)
9116
9117 virtual bool ComputeCanDeoptimize() const { return false; }
9118
9119 virtual CompileType ComputeType() const { return CompileType::Smi(); }
9120
9121 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9122
9123 DECLARE_EMPTY_SERIALIZATION(HashDoubleOpInstr, TemplateDefinition)
9124
9125 private:
9126 DISALLOW_COPY_AND_ASSIGN(HashDoubleOpInstr);
9127};
9128
9129class HashIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
9130 public:
9131 HashIntegerOpInstr(Value* value, bool smi, intptr_t deopt_id)
9132 : TemplateDefinition(deopt_id), smi_(smi) {
9133 SetInputAt(0, value);
9134 }
9135
9136 static HashIntegerOpInstr* Create(Value* value, bool smi, intptr_t deopt_id) {
9137 return new HashIntegerOpInstr(value, smi, deopt_id);
9138 }
9139
9140 Value* value() const { return inputs_[0]; }
9141
9142 virtual intptr_t DeoptimizationTarget() const {
9143 // Direct access since this instruction cannot deoptimize, and the deopt-id
9144 // was inherited from another instruction that could deoptimize.
9145 return GetDeoptId();
9146 }
9147
9148 virtual Representation representation() const { return kTagged; }
9149
9150 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9151 ASSERT(idx == 0);
9152 return kTagged;
9153 }
9154
9155 DECLARE_INSTRUCTION(HashIntegerOp)
9156
9157 virtual bool ComputeCanDeoptimize() const { return false; }
9158
9159 virtual CompileType ComputeType() const { return CompileType::Smi(); }
9160
9161 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9162
9163#define FIELD_LIST(F) F(const bool, smi_)
9164
9165 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(HashIntegerOpInstr,
9166 TemplateDefinition,
9167 FIELD_LIST)
9168#undef FIELD_LIST
9169
9171
9172 private:
9173 DISALLOW_COPY_AND_ASSIGN(HashIntegerOpInstr);
9174};
9175
9176class UnaryIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
9177 public:
9178 UnaryIntegerOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
9179 : TemplateDefinition(deopt_id), op_kind_(op_kind) {
9180 ASSERT((op_kind == Token::kNEGATE) || (op_kind == Token::kBIT_NOT));
9181 SetInputAt(0, value);
9182 }
9183
9184 static UnaryIntegerOpInstr* Make(Representation representation,
9185 Token::Kind op_kind,
9186 Value* value,
9187 intptr_t deopt_id,
9188 SpeculativeMode speculative_mode,
9189 Range* range);
9190
9191 Value* value() const { return inputs_[0]; }
9192 Token::Kind op_kind() const { return op_kind_; }
9193
9194 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9195
9196 virtual bool AttributesEqual(const Instruction& other) const {
9197 return other.AsUnaryIntegerOp()->op_kind() == op_kind();
9198 }
9199
9200 virtual intptr_t DeoptimizationTarget() const {
9201 // Direct access since this instruction cannot deoptimize, and the deopt-id
9202 // was inherited from another instruction that could deoptimize.
9203 return GetDeoptId();
9204 }
9205
9207
9208 DECLARE_ABSTRACT_INSTRUCTION(UnaryIntegerOp)
9209
9210 DECLARE_ATTRIBUTE(op_kind())
9211
9212#define FIELD_LIST(F) F(const Token::Kind, op_kind_)
9213
9214 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryIntegerOpInstr,
9215 TemplateDefinition,
9216 FIELD_LIST)
9217#undef FIELD_LIST
9218
9219 private:
9220 DISALLOW_COPY_AND_ASSIGN(UnaryIntegerOpInstr);
9221};
9222
9223// Handles both Smi operations: BIT_OR and NEGATE.
9224class UnarySmiOpInstr : public UnaryIntegerOpInstr {
9225 public:
9226 UnarySmiOpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
9227 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {}
9228
9229 virtual bool ComputeCanDeoptimize() const {
9230 return op_kind() == Token::kNEGATE;
9231 }
9232
9233 virtual CompileType ComputeType() const;
9234
9235 DECLARE_INSTRUCTION(UnarySmiOp)
9236
9237 DECLARE_EMPTY_SERIALIZATION(UnarySmiOpInstr, UnaryIntegerOpInstr)
9238
9239 private:
9240 DISALLOW_COPY_AND_ASSIGN(UnarySmiOpInstr);
9241};
9242
9243class UnaryUint32OpInstr : public UnaryIntegerOpInstr {
9244 public:
9245 UnaryUint32OpInstr(Token::Kind op_kind, Value* value, intptr_t deopt_id)
9246 : UnaryIntegerOpInstr(op_kind, value, deopt_id) {
9247 ASSERT(IsSupported(op_kind));
9248 }
9249
9250 virtual bool ComputeCanDeoptimize() const { return false; }
9251
9252 virtual Representation representation() const { return kUnboxedUint32; }
9253
9254 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9255 ASSERT(idx == 0);
9256 return kUnboxedUint32;
9257 }
9258
9259 static bool IsSupported(Token::Kind op_kind) {
9260 return op_kind == Token::kBIT_NOT;
9261 }
9262
9263 DECLARE_INSTRUCTION(UnaryUint32Op)
9264
9265 DECLARE_EMPTY_SERIALIZATION(UnaryUint32OpInstr, UnaryIntegerOpInstr)
9266
9267 private:
9268 DISALLOW_COPY_AND_ASSIGN(UnaryUint32OpInstr);
9269};
9270
9271class UnaryInt64OpInstr : public UnaryIntegerOpInstr {
9272 public:
9273 UnaryInt64OpInstr(Token::Kind op_kind,
9274 Value* value,
9275 intptr_t deopt_id,
9276 SpeculativeMode speculative_mode = kGuardInputs)
9277 : UnaryIntegerOpInstr(op_kind, value, deopt_id),
9278 speculative_mode_(speculative_mode) {
9279 ASSERT(op_kind == Token::kBIT_NOT || op_kind == Token::kNEGATE);
9280 }
9281
9282 virtual bool ComputeCanDeoptimize() const { return false; }
9283
9284 virtual Representation representation() const { return kUnboxedInt64; }
9285
9286 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9287 ASSERT(idx == 0);
9288 return kUnboxedInt64;
9289 }
9290
9291 virtual bool AttributesEqual(const Instruction& other) const {
9292 auto const unary_op_other = other.AsUnaryInt64Op();
9293 return UnaryIntegerOpInstr::AttributesEqual(other) &&
9294 (speculative_mode_ == unary_op_other->speculative_mode_);
9295 }
9296
9297 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9298 return speculative_mode_;
9299 }
9300
9301 DECLARE_INSTRUCTION(UnaryInt64Op)
9302
9303#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
9304
9306 UnaryIntegerOpInstr,
9307 FIELD_LIST)
9308#undef FIELD_LIST
9309
9310 private:
9311 DISALLOW_COPY_AND_ASSIGN(UnaryInt64OpInstr);
9312};
9313
9314class BinaryIntegerOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
9315 public:
9316 BinaryIntegerOpInstr(Token::Kind op_kind,
9317 Value* left,
9318 Value* right,
9319 intptr_t deopt_id)
9320 : TemplateDefinition(deopt_id),
9321 op_kind_(op_kind),
9322 can_overflow_(true),
9323 is_truncating_(false) {
9324 SetInputAt(0, left);
9325 SetInputAt(1, right);
9326 }
9327
9328 static BinaryIntegerOpInstr* Make(
9329 Representation representation,
9330 Token::Kind op_kind,
9331 Value* left,
9332 Value* right,
9333 intptr_t deopt_id,
9334 SpeculativeMode speculative_mode = kGuardInputs);
9335
9336 static BinaryIntegerOpInstr* Make(
9337 Representation representation,
9338 Token::Kind op_kind,
9339 Value* left,
9340 Value* right,
9341 intptr_t deopt_id,
9342 bool can_overflow,
9343 bool is_truncating,
9344 Range* range,
9345 SpeculativeMode speculative_mode = kGuardInputs);
9346
9347 Token::Kind op_kind() const { return op_kind_; }
9348 Value* left() const { return inputs_[0]; }
9349 Value* right() const { return inputs_[1]; }
9350
9351 bool can_overflow() const { return can_overflow_; }
9352 void set_can_overflow(bool overflow) {
9353 ASSERT(!is_truncating_ || !overflow);
9354 can_overflow_ = overflow;
9355 }
9356
9357 bool is_truncating() const { return is_truncating_; }
9358 void mark_truncating() {
9359 is_truncating_ = true;
9360 set_can_overflow(false);
9361 }
9362
9363 // Returns true if right is either a non-zero Integer constant or has a range
9364 // that does not include the possibility of being zero.
9365 bool RightIsNonZero() const;
9366
9367 // Returns true if right is a non-zero Smi constant which absolute value is
9368 // a power of two.
9369 bool RightIsPowerOfTwoConstant() const;
9370
9371 virtual Definition* Canonicalize(FlowGraph* flow_graph);
9372
9373 virtual bool AttributesEqual(const Instruction& other) const;
9374
9375 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
9376
9377 virtual void InferRange(RangeAnalysis* analysis, Range* range);
9378
9380
9381 DECLARE_ABSTRACT_INSTRUCTION(BinaryIntegerOp)
9382
9383 DECLARE_ATTRIBUTE(op_kind())
9384
9385#define FIELD_LIST(F) \
9386 F(const Token::Kind, op_kind_) \
9387 F(bool, can_overflow_) \
9388 F(bool, is_truncating_)
9389
9390 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryIntegerOpInstr,
9391 TemplateDefinition,
9392 FIELD_LIST)
9393#undef FIELD_LIST
9394
9395 protected:
9396 void InferRangeHelper(const Range* left_range,
9397 const Range* right_range,
9398 Range* range);
9399
9400 private:
9401 Definition* CreateConstantResult(FlowGraph* graph, const Integer& result);
9402
9403 DISALLOW_COPY_AND_ASSIGN(BinaryIntegerOpInstr);
9404};
9405
9406class BinarySmiOpInstr : public BinaryIntegerOpInstr {
9407 public:
9408 BinarySmiOpInstr(Token::Kind op_kind,
9409 Value* left,
9410 Value* right,
9411 intptr_t deopt_id,
9412 // Provided by BinaryIntegerOpInstr::Make for constant RHS.
9413 Range* right_range = nullptr)
9414 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
9415 right_range_(right_range) {}
9416
9417 virtual bool ComputeCanDeoptimize() const;
9418
9419 virtual void InferRange(RangeAnalysis* analysis, Range* range);
9420 virtual CompileType ComputeType() const;
9421
9422 DECLARE_INSTRUCTION(BinarySmiOp)
9423
9424 Range* right_range() const { return right_range_; }
9425
9426#define FIELD_LIST(F) F(Range*, right_range_)
9427
9429 BinaryIntegerOpInstr,
9430 FIELD_LIST)
9431#undef FIELD_LIST
9432
9433 private:
9434 DISALLOW_COPY_AND_ASSIGN(BinarySmiOpInstr);
9435};
9436
9437class BinaryInt32OpInstr : public BinaryIntegerOpInstr {
9438 public:
9439 BinaryInt32OpInstr(Token::Kind op_kind,
9440 Value* left,
9441 Value* right,
9442 intptr_t deopt_id)
9443 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
9444 SetInputAt(0, left);
9445 SetInputAt(1, right);
9446 }
9447
9448 static bool IsSupported(Token::Kind op_kind, Value* left, Value* right) {
9449#if defined(TARGET_ARCH_IS_32_BIT)
9450 switch (op_kind) {
9451 case Token::kADD:
9452 case Token::kSUB:
9453 case Token::kMUL:
9454 case Token::kBIT_AND:
9455 case Token::kBIT_OR:
9456 case Token::kBIT_XOR:
9457 return true;
9458
9459 case Token::kSHL:
9460 case Token::kSHR:
9461 case Token::kUSHR:
9462 if (right->BindsToConstant() && right->BoundConstant().IsSmi()) {
9463 const intptr_t value = Smi::Cast(right->BoundConstant()).Value();
9464 return 0 <= value && value < kBitsPerWord;
9465 }
9466 return false;
9467
9468 default:
9469 return false;
9470 }
9471#else
9472 return false;
9473#endif
9474 }
9475
9476 virtual bool ComputeCanDeoptimize() const;
9477
9478 virtual Representation representation() const { return kUnboxedInt32; }
9479
9480 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9481 ASSERT((idx == 0) || (idx == 1));
9482 return kUnboxedInt32;
9483 }
9484
9485 DECLARE_INSTRUCTION(BinaryInt32Op)
9486
9487 DECLARE_EMPTY_SERIALIZATION(BinaryInt32OpInstr, BinaryIntegerOpInstr)
9488
9489 private:
9490 DISALLOW_COPY_AND_ASSIGN(BinaryInt32OpInstr);
9491};
9492
9493class BinaryUint32OpInstr : public BinaryIntegerOpInstr {
9494 public:
9495 BinaryUint32OpInstr(Token::Kind op_kind,
9496 Value* left,
9497 Value* right,
9498 intptr_t deopt_id)
9499 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
9500 mark_truncating();
9501 ASSERT(IsSupported(op_kind));
9502 }
9503
9504 virtual bool ComputeCanDeoptimize() const { return false; }
9505
9506 virtual Representation representation() const { return kUnboxedUint32; }
9507
9508 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9509 ASSERT((idx == 0) || (idx == 1));
9510 return kUnboxedUint32;
9511 }
9512
9513 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9514 return kNotSpeculative;
9515 }
9516
9517 static bool IsSupported(Token::Kind op_kind) {
9518 switch (op_kind) {
9519 case Token::kADD:
9520 case Token::kSUB:
9521 case Token::kMUL:
9522 case Token::kBIT_AND:
9523 case Token::kBIT_OR:
9524 case Token::kBIT_XOR:
9525 return true;
9526 default:
9527 return false;
9528 }
9529 }
9530
9531 DECLARE_INSTRUCTION(BinaryUint32Op)
9532
9533 DECLARE_EMPTY_SERIALIZATION(BinaryUint32OpInstr, BinaryIntegerOpInstr)
9534
9535 private:
9536 DISALLOW_COPY_AND_ASSIGN(BinaryUint32OpInstr);
9537};
9538
9539class BinaryInt64OpInstr : public BinaryIntegerOpInstr {
9540 public:
9541 BinaryInt64OpInstr(Token::Kind op_kind,
9542 Value* left,
9543 Value* right,
9544 intptr_t deopt_id,
9545 SpeculativeMode speculative_mode = kGuardInputs)
9546 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
9547 speculative_mode_(speculative_mode) {
9548 mark_truncating();
9549 }
9550
9551 virtual bool ComputeCanDeoptimize() const {
9552 ASSERT(!can_overflow());
9553 return false;
9554 }
9555
9556 virtual bool MayThrow() const {
9557 return (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) &&
9558 !RightIsNonZero();
9559 }
9560
9561 virtual Representation representation() const { return kUnboxedInt64; }
9562
9563 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9564 ASSERT((idx == 0) || (idx == 1));
9565 return kUnboxedInt64;
9566 }
9567
9568 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9569 return speculative_mode_;
9570 }
9571
9572 virtual bool AttributesEqual(const Instruction& other) const {
9573 return BinaryIntegerOpInstr::AttributesEqual(other) &&
9574 (speculative_mode_ == other.AsBinaryInt64Op()->speculative_mode_);
9575 }
9576
9577 DECLARE_INSTRUCTION(BinaryInt64Op)
9578
9579#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
9580
9581 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryInt64OpInstr,
9582 BinaryIntegerOpInstr,
9583 FIELD_LIST)
9584#undef FIELD_LIST
9585
9586 private:
9587 DISALLOW_COPY_AND_ASSIGN(BinaryInt64OpInstr);
9588};
9589
9590// Base class for integer shift operations.
9591class ShiftIntegerOpInstr : public BinaryIntegerOpInstr {
9592 public:
9593 ShiftIntegerOpInstr(Token::Kind op_kind,
9594 Value* left,
9595 Value* right,
9596 intptr_t deopt_id,
9597 // Provided by BinaryIntegerOpInstr::Make for constant RHS
9598 Range* right_range = nullptr)
9599 : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
9600 shift_range_(right_range) {
9601 ASSERT((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
9602 (op_kind == Token::kUSHR));
9603 mark_truncating();
9604 }
9605
9606 Range* shift_range() const { return shift_range_; }
9607
9608 // Set the range directly (takes ownership).
9609 void set_shift_range(Range* shift_range) { shift_range_ = shift_range; }
9610
9611 virtual void InferRange(RangeAnalysis* analysis, Range* range);
9612
9613 DECLARE_ABSTRACT_INSTRUCTION(ShiftIntegerOp)
9614
9615#define FIELD_LIST(F) F(Range*, shift_range_)
9616
9617 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr,
9618 BinaryIntegerOpInstr,
9619 FIELD_LIST)
9620#undef FIELD_LIST
9621
9622 protected:
9623 static constexpr intptr_t kShiftCountLimit = 63;
9624
9625 // Returns true if the shift amount is guaranteed to be in
9626 // [0..max] range.
9627 bool IsShiftCountInRange(int64_t max = kShiftCountLimit) const;
9628
9629 private:
9630 DISALLOW_COPY_AND_ASSIGN(ShiftIntegerOpInstr);
9631};
9632
9633// Non-speculative int64 shift. Takes 2 unboxed int64.
9634// Throws if right operand is negative.
9635class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
9636 public:
9637 ShiftInt64OpInstr(Token::Kind op_kind,
9638 Value* left,
9639 Value* right,
9640 intptr_t deopt_id,
9641 Range* right_range = nullptr)
9642 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9643
9644 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9645 return kNotSpeculative;
9646 }
9647 virtual bool ComputeCanDeoptimize() const { return false; }
9648 virtual bool MayThrow() const { return !IsShiftCountInRange(); }
9649
9650 virtual Representation representation() const { return kUnboxedInt64; }
9651
9652 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9653 ASSERT((idx == 0) || (idx == 1));
9654 return kUnboxedInt64;
9655 }
9656
9657 DECLARE_INSTRUCTION(ShiftInt64Op)
9658
9659 DECLARE_EMPTY_SERIALIZATION(ShiftInt64OpInstr, ShiftIntegerOpInstr)
9660
9661 private:
9662 DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
9663};
9664
9665// Speculative int64 shift. Takes unboxed int64 and smi.
9666// Deoptimizes if right operand is negative or greater than kShiftCountLimit.
9667class SpeculativeShiftInt64OpInstr : public ShiftIntegerOpInstr {
9668 public:
9669 SpeculativeShiftInt64OpInstr(Token::Kind op_kind,
9670 Value* left,
9671 Value* right,
9672 intptr_t deopt_id,
9673 Range* right_range = nullptr)
9674 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9675
9676 virtual bool ComputeCanDeoptimize() const {
9677 ASSERT(!can_overflow());
9678 return !IsShiftCountInRange();
9679 }
9680
9681 virtual Representation representation() const { return kUnboxedInt64; }
9682
9683 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9684 ASSERT((idx == 0) || (idx == 1));
9685 return (idx == 0) ? kUnboxedInt64 : kTagged;
9686 }
9687
9688 DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)
9689
9690 DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftInt64OpInstr, ShiftIntegerOpInstr)
9691
9692 private:
9693 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
9694};
9695
9696// Non-speculative uint32 shift. Takes unboxed uint32 and unboxed int64.
9697// Throws if right operand is negative.
9698class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
9699 public:
9700 ShiftUint32OpInstr(Token::Kind op_kind,
9701 Value* left,
9702 Value* right,
9703 intptr_t deopt_id,
9704 Range* right_range = nullptr)
9705 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9706
9707 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9708 return kNotSpeculative;
9709 }
9710 virtual bool ComputeCanDeoptimize() const { return false; }
9711 virtual bool MayThrow() const {
9712 return !IsShiftCountInRange(kUint32ShiftCountLimit);
9713 }
9714
9715 virtual Representation representation() const { return kUnboxedUint32; }
9716
9717 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9718 ASSERT((idx == 0) || (idx == 1));
9719 return (idx == 0) ? kUnboxedUint32 : kUnboxedInt64;
9720 }
9721
9722 DECLARE_INSTRUCTION(ShiftUint32Op)
9723
9724 DECLARE_EMPTY_SERIALIZATION(ShiftUint32OpInstr, ShiftIntegerOpInstr)
9725
9726 private:
9727 static constexpr intptr_t kUint32ShiftCountLimit = 31;
9728
9729 DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
9730};
9731
9732// Speculative uint32 shift. Takes unboxed uint32 and smi.
9733// Deoptimizes if right operand is negative.
9734class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
9735 public:
9736 SpeculativeShiftUint32OpInstr(Token::Kind op_kind,
9737 Value* left,
9738 Value* right,
9739 intptr_t deopt_id,
9740 Range* right_range = nullptr)
9741 : ShiftIntegerOpInstr(op_kind, left, right, deopt_id, right_range) {}
9742
9743 virtual bool ComputeCanDeoptimize() const { return !IsShiftCountInRange(); }
9744
9745 virtual Representation representation() const { return kUnboxedUint32; }
9746
9747 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9748 ASSERT((idx == 0) || (idx == 1));
9749 return (idx == 0) ? kUnboxedUint32 : kTagged;
9750 }
9751
9752 DECLARE_INSTRUCTION(SpeculativeShiftUint32Op)
9753
9754 DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftUint32OpInstr,
9755 ShiftIntegerOpInstr)
9756
9757 private:
9758 static constexpr intptr_t kUint32ShiftCountLimit = 31;
9759
9760 DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
9761};
9762
9763class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
9764 public:
9765 UnaryDoubleOpInstr(Token::Kind op_kind,
9766 Value* value,
9767 intptr_t deopt_id,
9768 SpeculativeMode speculative_mode = kGuardInputs,
9769 Representation representation = kUnboxedDouble)
9770 : TemplateDefinition(deopt_id),
9771 op_kind_(op_kind),
9772 speculative_mode_(speculative_mode),
9773 representation_(representation) {
9774 ASSERT((representation == kUnboxedFloat) ||
9775 (representation == kUnboxedDouble));
9776 SetInputAt(0, value);
9777 }
9778
9779 Value* value() const { return inputs_[0]; }
9780 Token::Kind op_kind() const { return op_kind_; }
9781
9782 DECLARE_INSTRUCTION(UnaryDoubleOp)
9783
9784 virtual bool ComputeCanDeoptimize() const { return false; }
9785
9786 virtual intptr_t DeoptimizationTarget() const {
9787 // Direct access since this instruction cannot deoptimize, and the deopt-id
9788 // was inherited from another instruction that could deoptimize.
9789 return GetDeoptId();
9790 }
9791
9792 virtual Representation representation() const { return representation_; }
9793
9794 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
9795 ASSERT(idx == 0);
9796 return representation_;
9797 }
9798
9799 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9800 return speculative_mode_;
9801 }
9802
9803 virtual bool AttributesEqual(const Instruction& other) const {
9804 auto other_op = other.Cast<UnaryDoubleOpInstr>();
9805 return (op_kind_ == other_op->op_kind_) &&
9806 (speculative_mode_ == other_op->speculative_mode_) &&
9807 (representation_ == other_op->representation_);
9808 }
9809
9810 DECLARE_ATTRIBUTE(op_kind())
9811
9813
9814#define FIELD_LIST(F) \
9815 F(const Token::Kind, op_kind_) \
9816 F(const SpeculativeMode, speculative_mode_) \
9817 F(const Representation, representation_)
9818
9819 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryDoubleOpInstr,
9820 TemplateDefinition,
9821 FIELD_LIST)
9822#undef FIELD_LIST
9823
9824 private:
9825 DISALLOW_COPY_AND_ASSIGN(UnaryDoubleOpInstr);
9826};
9827
9828class CheckStackOverflowInstr : public TemplateInstruction<0, NoThrow> {
9829 public:
9830 enum Kind {
9831 // kOsrAndPreemption stack overflow checks are emitted in both unoptimized
9832 // and optimized versions of the code and they serve as both preemption and
9833 // OSR entry points.
9834 kOsrAndPreemption,
9835
9836 // kOsrOnly stack overflow checks are only needed in the unoptimized code
9837 // because we can't OSR optimized code.
9838 kOsrOnly,
9839 };
9840
9841 CheckStackOverflowInstr(const InstructionSource& source,
9842 intptr_t stack_depth,
9843 intptr_t loop_depth,
9844 intptr_t deopt_id,
9845 Kind kind)
9846 : TemplateInstruction(source, deopt_id),
9847 token_pos_(source.token_pos),
9848 stack_depth_(stack_depth),
9849 loop_depth_(loop_depth),
9850 kind_(kind) {
9851 ASSERT(kind != kOsrOnly || loop_depth > 0);
9852 }
9853
9854 virtual TokenPosition token_pos() const { return token_pos_; }
9855 bool in_loop() const { return loop_depth_ > 0; }
9856 intptr_t stack_depth() const { return stack_depth_; }
9857 intptr_t loop_depth() const { return loop_depth_; }
9858
9859 DECLARE_INSTRUCTION(CheckStackOverflow)
9860
9861 virtual bool ComputeCanDeoptimize() const { return false; }
9862 virtual bool ComputeCanDeoptimizeAfterCall() const {
9863 return !CompilerState::Current().is_aot();
9864 }
9865
9866 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
9867
9868 virtual bool HasUnknownSideEffects() const { return false; }
9869
9870 virtual bool CanEliminate(const BlockEntryInstr* block) const {
9871 return false;
9872 }
9873
9874 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
9875 return SlowPathSharingSupported(is_optimizing);
9876 }
9877
9879
9880#define FIELD_LIST(F) \
9881 F(const TokenPosition, token_pos_) \
9882 F(const intptr_t, stack_depth_) \
9883 F(const intptr_t, loop_depth_) \
9884 F(const Kind, kind_)
9885
9886 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckStackOverflowInstr,
9887 TemplateInstruction,
9888 FIELD_LIST)
9889#undef FIELD_LIST
9890
9891 private:
9892 DISALLOW_COPY_AND_ASSIGN(CheckStackOverflowInstr);
9893};
9894
9895// TODO(vegorov): remove this instruction in favor of Int32ToDouble.
9896class SmiToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9897 public:
9898 SmiToDoubleInstr(Value* value, const InstructionSource& source)
9899 : TemplateDefinition(source), token_pos_(source.token_pos) {
9900 SetInputAt(0, value);
9901 }
9902
9903 Value* value() const { return inputs_[0]; }
9904 virtual TokenPosition token_pos() const { return token_pos_; }
9905
9906 DECLARE_INSTRUCTION(SmiToDouble)
9907
9908 virtual Representation representation() const { return kUnboxedDouble; }
9909
9910 virtual bool ComputeCanDeoptimize() const { return false; }
9911
9912 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9913
9914#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
9915
9917 TemplateDefinition,
9918 FIELD_LIST)
9919#undef FIELD_LIST
9920
9921 private:
9922 DISALLOW_COPY_AND_ASSIGN(SmiToDoubleInstr);
9923};
9924
9925class Int32ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9926 public:
9927 explicit Int32ToDoubleInstr(Value* value) { SetInputAt(0, value); }
9928
9929 Value* value() const { return inputs_[0]; }
9930
9931 DECLARE_INSTRUCTION(Int32ToDouble)
9932
9933 virtual Representation RequiredInputRepresentation(intptr_t index) const {
9934 ASSERT(index == 0);
9935 return kUnboxedInt32;
9936 }
9937
9938 virtual Representation representation() const { return kUnboxedDouble; }
9939
9940 virtual bool ComputeCanDeoptimize() const { return false; }
9941
9942 virtual bool AttributesEqual(const Instruction& other) const { return true; }
9943
9944 DECLARE_EMPTY_SERIALIZATION(Int32ToDoubleInstr, TemplateDefinition)
9945
9946 private:
9947 DISALLOW_COPY_AND_ASSIGN(Int32ToDoubleInstr);
9948};
9949
9950class Int64ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
9951 public:
9952 Int64ToDoubleInstr(Value* value,
9953 intptr_t deopt_id,
9954 SpeculativeMode speculative_mode = kGuardInputs)
9955 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
9956 SetInputAt(0, value);
9957 }
9958
9959 Value* value() const { return inputs_[0]; }
9960
9961 DECLARE_INSTRUCTION(Int64ToDouble)
9962
9963 virtual Representation RequiredInputRepresentation(intptr_t index) const {
9964 ASSERT(index == 0);
9965 return kUnboxedInt64;
9966 }
9967
9968 virtual Representation representation() const { return kUnboxedDouble; }
9969
9970 virtual intptr_t DeoptimizationTarget() const {
9971 // Direct access since this instruction cannot deoptimize, and the deopt-id
9972 // was inherited from another instruction that could deoptimize.
9973 return GetDeoptId();
9974 }
9975
9976 virtual bool ComputeCanDeoptimize() const { return false; }
9977
9978 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
9979 return speculative_mode_;
9980 }
9981
9982 virtual bool AttributesEqual(const Instruction& other) const {
9983 return speculative_mode_ == other.AsInt64ToDouble()->speculative_mode_;
9984 }
9985
9986#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
9987
9988 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Int64ToDoubleInstr,
9989 TemplateDefinition,
9990 FIELD_LIST)
9991#undef FIELD_LIST
9992
9993 private:
9994 DISALLOW_COPY_AND_ASSIGN(Int64ToDoubleInstr);
9995};
9996
9997class DoubleToIntegerInstr : public TemplateDefinition<1, Throws, Pure> {
9998 public:
9999 DoubleToIntegerInstr(Value* value,
10000 MethodRecognizer::Kind recognized_kind,
10001 intptr_t deopt_id)
10002 : TemplateDefinition(deopt_id), recognized_kind_(recognized_kind) {
10003 ASSERT((recognized_kind == MethodRecognizer::kDoubleToInteger) ||
10004 (recognized_kind == MethodRecognizer::kDoubleFloorToInt) ||
10005 (recognized_kind == MethodRecognizer::kDoubleCeilToInt));
10006 SetInputAt(0, value);
10007 }
10008
10009 Value* value() const { return inputs_[0]; }
10010
10011 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
10012
10013 DECLARE_INSTRUCTION(DoubleToInteger)
10014 virtual CompileType ComputeType() const;
10015
10016 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10017 ASSERT(idx == 0);
10018 return kUnboxedDouble;
10019 }
10020
10021 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
10022 ASSERT(idx == 0);
10023 return kNotSpeculative;
10024 }
10025
10026 virtual bool ComputeCanDeoptimize() const {
10027 return !CompilerState::Current().is_aot();
10028 }
10029
10030 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10031
10032 virtual bool HasUnknownSideEffects() const { return false; }
10033
10034 virtual bool AttributesEqual(const Instruction& other) const {
10035 return other.AsDoubleToInteger()->recognized_kind() == recognized_kind();
10036 }
10037
10038#define FIELD_LIST(F) F(const MethodRecognizer::Kind, recognized_kind_)
10039
10040 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToIntegerInstr,
10041 TemplateDefinition,
10042 FIELD_LIST)
10043#undef FIELD_LIST
10044
10045 private:
10046 DISALLOW_COPY_AND_ASSIGN(DoubleToIntegerInstr);
10047};
10048
10049// Similar to 'DoubleToIntegerInstr' but expects unboxed double as input
10050// and creates a Smi.
10051class DoubleToSmiInstr : public TemplateDefinition<1, NoThrow, Pure> {
10052 public:
10053 DoubleToSmiInstr(Value* value, intptr_t deopt_id)
10054 : TemplateDefinition(deopt_id) {
10055 SetInputAt(0, value);
10056 }
10057
10058 Value* value() const { return inputs_[0]; }
10059
10060 DECLARE_INSTRUCTION(DoubleToSmi)
10061 virtual CompileType ComputeType() const;
10062
10063 virtual bool ComputeCanDeoptimize() const { return true; }
10064
10065 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10066 ASSERT(idx == 0);
10067 return kUnboxedDouble;
10068 }
10069
10070 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10071
10072 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10073
10074 DECLARE_EMPTY_SERIALIZATION(DoubleToSmiInstr, TemplateDefinition)
10075
10076 private:
10077 DISALLOW_COPY_AND_ASSIGN(DoubleToSmiInstr);
10078};
10079
10080class DoubleToFloatInstr : public TemplateDefinition<1, NoThrow, Pure> {
10081 public:
10082 DoubleToFloatInstr(Value* value,
10083 intptr_t deopt_id,
10084 SpeculativeMode speculative_mode = kGuardInputs)
10085 : TemplateDefinition(deopt_id), speculative_mode_(speculative_mode) {
10086 SetInputAt(0, value);
10087 }
10088
10089 Value* value() const { return inputs_[0]; }
10090
10091 DECLARE_INSTRUCTION(DoubleToFloat)
10092
10093 virtual bool ComputeCanDeoptimize() const { return false; }
10094
10095 virtual Representation representation() const { return kUnboxedFloat; }
10096
10097 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10098 ASSERT(idx == 0);
10099 return kUnboxedDouble;
10100 }
10101
10102 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
10103 return speculative_mode_;
10104 }
10105
10106 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10107
10108 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10109
10110 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10111
10112#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
10113
10114 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToFloatInstr,
10115 TemplateDefinition,
10116 FIELD_LIST)
10117#undef FIELD_LIST
10118
10119 private:
10120 DISALLOW_COPY_AND_ASSIGN(DoubleToFloatInstr);
10121};
10122
10123class FloatToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
10124 public:
10125 FloatToDoubleInstr(Value* value, intptr_t deopt_id)
10126 : TemplateDefinition(deopt_id) {
10127 SetInputAt(0, value);
10128 }
10129
10130 Value* value() const { return inputs_[0]; }
10131
10132 DECLARE_INSTRUCTION(FloatToDouble)
10133
10134 virtual Representation representation() const { return kUnboxedDouble; }
10135
10136 virtual bool ComputeCanDeoptimize() const { return false; }
10137
10138 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10139 ASSERT(idx == 0);
10140 return kUnboxedFloat;
10141 }
10142
10143 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10144
10145 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10146
10147 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10148
10149 DECLARE_EMPTY_SERIALIZATION(FloatToDoubleInstr, TemplateDefinition)
10150
10151 private:
10152 DISALLOW_COPY_AND_ASSIGN(FloatToDoubleInstr);
10153};
10154
10155// left op right ? -1 : 0
10156class FloatCompareInstr : public TemplateDefinition<2, NoThrow, Pure> {
10157 public:
10158 FloatCompareInstr(Token::Kind op_kind, Value* left, Value* right)
10159 : op_kind_(op_kind) {
10160 SetInputAt(0, left);
10161 SetInputAt(1, right);
10162 }
10163
10164 Value* left() const { return inputs_[0]; }
10165 Value* right() const { return inputs_[1]; }
10166
10167 Token::Kind op_kind() const { return op_kind_; }
10168
10169 DECLARE_INSTRUCTION(FloatCompare)
10170
10171 DECLARE_ATTRIBUTE(op_kind())
10172
10173 virtual bool ComputeCanDeoptimize() const { return false; }
10174
10175 virtual Representation representation() const { return kUnboxedInt32; }
10176
10177 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10178 return kUnboxedFloat;
10179 }
10180
10181 virtual bool AttributesEqual(const Instruction& other) const {
10182 return other.AsFloatCompare()->op_kind() == op_kind();
10183 }
10184
10185#define FIELD_LIST(F) F(const Token::Kind, op_kind_)
10186
10188 TemplateDefinition,
10189 FIELD_LIST)
10190#undef FIELD_LIST
10191
10192 private:
10193 DISALLOW_COPY_AND_ASSIGN(FloatCompareInstr);
10194};
10195
10196// TODO(sjindel): Replace with FFICallInstr.
10197class InvokeMathCFunctionInstr : public VariadicDefinition {
10198 public:
10199 InvokeMathCFunctionInstr(InputsArray&& inputs,
10200 intptr_t deopt_id,
10201 MethodRecognizer::Kind recognized_kind,
10202 const InstructionSource& source);
10203
10204 static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_);
10205
10206 const RuntimeEntry& TargetFunction() const;
10207
10208 MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
10209
10210 virtual TokenPosition token_pos() const { return token_pos_; }
10211
10212 DECLARE_INSTRUCTION(InvokeMathCFunction)
10213
10214 virtual bool ComputeCanDeoptimize() const { return false; }
10215
10216 virtual Representation representation() const { return kUnboxedDouble; }
10217
10218 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10219 ASSERT((0 <= idx) && (idx < InputCount()));
10220 return kUnboxedDouble;
10221 }
10222
10223 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
10224 ASSERT((0 <= idx) && (idx < InputCount()));
10225 return kNotSpeculative;
10226 }
10227
10228 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10229
10230 virtual bool AllowsCSE() const { return true; }
10231 virtual bool HasUnknownSideEffects() const { return false; }
10232
10233 virtual bool AttributesEqual(const Instruction& other) const {
10234 auto const other_invoke = other.AsInvokeMathCFunction();
10235 return other_invoke->recognized_kind() == recognized_kind();
10236 }
10237
10238 virtual bool MayThrow() const { return false; }
10239
10240 static constexpr intptr_t kSavedSpTempIndex = 0;
10241 static constexpr intptr_t kObjectTempIndex = 1;
10242 static constexpr intptr_t kDoubleTempIndex = 2;
10243
10245
10246#define FIELD_LIST(F) \
10247 F(const MethodRecognizer::Kind, recognized_kind_) \
10248 F(const TokenPosition, token_pos_)
10249
10250 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InvokeMathCFunctionInstr,
10251 VariadicDefinition,
10252 FIELD_LIST)
10253#undef FIELD_LIST
10254
10255 private:
10256 DISALLOW_COPY_AND_ASSIGN(InvokeMathCFunctionInstr);
10257};
10258
10259class ExtractNthOutputInstr : public TemplateDefinition<1, NoThrow, Pure> {
10260 public:
10261 // Extract the Nth output register from value.
10262 ExtractNthOutputInstr(Value* value,
10263 intptr_t n,
10264 Representation definition_rep,
10265 intptr_t definition_cid)
10266 : index_(n),
10267 definition_rep_(definition_rep),
10268 definition_cid_(definition_cid) {
10269 SetInputAt(0, value);
10270 }
10271
10272 Value* value() const { return inputs_[0]; }
10273
10274 DECLARE_INSTRUCTION(ExtractNthOutput)
10275 DECLARE_ATTRIBUTE(index())
10276
10277 virtual CompileType ComputeType() const;
10278 virtual bool ComputeCanDeoptimize() const { return false; }
10279
10280 intptr_t index() const { return index_; }
10281
10282 virtual Representation representation() const { return definition_rep_; }
10283
10284 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10285 ASSERT(idx == 0);
10286 if (representation() == kTagged) {
10287 return kPairOfTagged;
10288 }
10289 UNREACHABLE();
10290 return definition_rep_;
10291 }
10292
10293 virtual bool AttributesEqual(const Instruction& other) const {
10294 auto const other_extract = other.AsExtractNthOutput();
10295 return (other_extract->representation() == representation()) &&
10296 (other_extract->index() == index());
10297 }
10298
10300
10301#define FIELD_LIST(F) \
10302 F(const intptr_t, index_) \
10303 F(const Representation, definition_rep_) \
10304 F(const intptr_t, definition_cid_)
10305
10306 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ExtractNthOutputInstr,
10307 TemplateDefinition,
10308 FIELD_LIST)
10309#undef FIELD_LIST
10310
10311 private:
10312 DISALLOW_COPY_AND_ASSIGN(ExtractNthOutputInstr);
10313};
10314
10315// Combines 2 values into a pair with kPairOfTagged representation.
10316class MakePairInstr : public TemplateDefinition<2, NoThrow, Pure> {
10317 public:
10318 MakePairInstr(Value* x, Value* y) {
10319 SetInputAt(0, x);
10320 SetInputAt(1, y);
10321 }
10322
10323 DECLARE_INSTRUCTION(MakePair)
10324
10325 virtual bool ComputeCanDeoptimize() const { return false; }
10326
10327 virtual Representation representation() const { return kPairOfTagged; }
10328
10329 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10330 ASSERT((0 <= idx) && (idx < InputCount()));
10331 return kTagged;
10332 }
10333
10334 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10335
10336 DECLARE_EMPTY_SERIALIZATION(MakePairInstr, TemplateDefinition)
10337
10338 private:
10339 DISALLOW_COPY_AND_ASSIGN(MakePairInstr);
10340};
10341
10342class UnboxLaneInstr : public TemplateDefinition<1, NoThrow, Pure> {
10343 public:
10344 UnboxLaneInstr(Value* value,
10345 intptr_t n,
10346 Representation definition_rep,
10347 intptr_t definition_cid)
10348 : lane_(n),
10349 definition_rep_(definition_rep),
10350 definition_cid_(definition_cid) {
10351 SetInputAt(0, value);
10352 }
10353
10354 Value* value() const { return inputs_[0]; }
10355
10356 DECLARE_INSTRUCTION(UnboxLane)
10357
10358 virtual CompileType ComputeType() const;
10359 virtual bool ComputeCanDeoptimize() const { return false; }
10360
10361 intptr_t lane() const { return lane_; }
10362
10363 virtual Representation representation() const { return definition_rep_; }
10364
10365 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10366 ASSERT(idx == 0);
10367 return kTagged;
10368 }
10369
10370 virtual bool AttributesEqual(const Instruction& other) const {
10371 auto const other_split = other.AsUnboxLane();
10372 return (other_split->representation() == representation()) &&
10373 (other_split->lane() == lane());
10374 }
10375
10376 Definition* Canonicalize(FlowGraph* flow_graph);
10377
10379
10380#define FIELD_LIST(F) \
10381 F(const intptr_t, lane_) \
10382 F(const Representation, definition_rep_) \
10383 F(const intptr_t, definition_cid_)
10384
10386 TemplateDefinition,
10387 FIELD_LIST)
10388#undef FIELD_LIST
10389
10390 private:
10391 DISALLOW_COPY_AND_ASSIGN(UnboxLaneInstr);
10392};
10393
10394class BoxLanesInstr : public TemplateDefinition<4, NoThrow, Pure> {
10395 public:
10396 BoxLanesInstr(Representation from_representation, Value* x, Value* y)
10397 : from_representation_(from_representation) {
10398 ASSERT(from_representation == kUnboxedDouble);
10399 ASSERT(x->definition()->representation() == from_representation);
10400 ASSERT(y->definition()->representation() == from_representation);
10401 SetInputAt(0, x);
10402 SetInputAt(1, y);
10403 }
10404 BoxLanesInstr(Representation from_representation,
10405 Value* x,
10406 Value* y,
10407 Value* z,
10408 Value* w)
10409 : from_representation_(from_representation) {
10410 ASSERT((from_representation == kUnboxedInt32) ||
10411 (from_representation == kUnboxedFloat));
10412 ASSERT(x->definition()->representation() == from_representation);
10413 ASSERT(y->definition()->representation() == from_representation);
10414 ASSERT(z->definition()->representation() == from_representation);
10415 ASSERT(w->definition()->representation() == from_representation);
10416 SetInputAt(0, x);
10417 SetInputAt(1, y);
10418 SetInputAt(2, z);
10419 SetInputAt(3, w);
10420 }
10421
10422 intptr_t InputCount() const {
10423 switch (from_representation_) {
10424 case kUnboxedDouble:
10425 return 2;
10426 case kUnboxedFloat:
10427 return 4;
10428 case kUnboxedInt32:
10429 return 4;
10430 default:
10431 UNREACHABLE();
10432 return 0;
10433 }
10434 }
10435 Value* x() const { return inputs_[0]; }
10436 Value* y() const { return inputs_[1]; }
10437 Value* z() const {
10438 ASSERT((from_representation() == kUnboxedInt32) ||
10439 (from_representation() == kUnboxedFloat));
10440 return inputs_[2];
10441 }
10442 Value* w() const {
10443 ASSERT((from_representation() == kUnboxedInt32) ||
10444 (from_representation() == kUnboxedFloat));
10445 return inputs_[3];
10446 }
10447 Representation from_representation() const { return from_representation_; }
10448
10449 DECLARE_INSTRUCTION(BoxLanes)
10450 virtual CompileType ComputeType() const;
10451
10452 virtual bool ComputeCanDeoptimize() const { return false; }
10453 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
10454
10455 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10456 ASSERT(idx == 0 || idx == 1 || idx == 2 || idx == 3);
10457 return from_representation();
10458 }
10459
10460 virtual bool AttributesEqual(const Instruction& other) const {
10461 return other.AsBoxLanes()->from_representation() == from_representation();
10462 }
10463
10464 Definition* Canonicalize(FlowGraph* flow_graph);
10465
10466 virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
10467
10468 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
10469 return kNotSpeculative;
10470 }
10471
10473
10474#define FIELD_LIST(F) F(const Representation, from_representation_)
10475
10477 TemplateDefinition,
10478 FIELD_LIST)
10479#undef FIELD_LIST
10480
10481 private:
10482 DISALLOW_COPY_AND_ASSIGN(BoxLanesInstr);
10483};
10484
10485class TruncDivModInstr : public TemplateDefinition<2, NoThrow, Pure> {
10486 public:
10487 TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id);
10488
10489 static intptr_t OutputIndexOf(Token::Kind token);
10490
10491 virtual bool ComputeCanDeoptimize() const { return true; }
10492
10493 virtual Representation representation() const { return kPairOfTagged; }
10494
10495 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10496 ASSERT((0 <= idx) && (idx < InputCount()));
10497 return kTagged;
10498 }
10499
10500 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
10501
10502 DECLARE_INSTRUCTION(TruncDivMod)
10503
10504 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10505
10507
10508 DECLARE_EMPTY_SERIALIZATION(TruncDivModInstr, TemplateDefinition)
10509
10510 private:
10511 Range* divisor_range() const {
10512 // Note: this range is only used to remove check for zero divisor from
10513 // the emitted pattern. It is not used for deciding whether instruction
10514 // will deoptimize or not - that is why it is ok to access range of
10515 // the definition directly. Otherwise range analysis or another pass
10516 // needs to cache range of the divisor in the operation to prevent
10517 // bugs when range information gets out of sync with the final decision
10518 // whether some instruction can deoptimize or not made in
10519 // EliminateEnvironments().
10520 return InputAt(1)->definition()->range();
10521 }
10522
10523 DISALLOW_COPY_AND_ASSIGN(TruncDivModInstr);
10524};
10525
10526class CheckClassInstr : public TemplateInstruction<1, NoThrow> {
10527 public:
10528 CheckClassInstr(Value* value,
10529 intptr_t deopt_id,
10530 const Cids& cids,
10531 const InstructionSource& source);
10532
10533 DECLARE_INSTRUCTION(CheckClass)
10534
10535 virtual bool ComputeCanDeoptimize() const { return true; }
10536
10537 virtual TokenPosition token_pos() const { return token_pos_; }
10538
10539 Value* value() const { return inputs_[0]; }
10540
10541 const Cids& cids() const { return cids_; }
10542
10543 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
10544
10545 bool IsNullCheck() const { return IsDeoptIfNull() || IsDeoptIfNotNull(); }
10546
10547 bool IsDeoptIfNull() const;
10548 bool IsDeoptIfNotNull() const;
10549
10550 bool IsBitTest() const;
10551 static bool IsCompactCidRange(const Cids& cids);
10552 intptr_t ComputeCidMask() const;
10553
10554 virtual bool AllowsCSE() const { return true; }
10555 virtual bool HasUnknownSideEffects() const { return false; }
10556
10557 virtual bool AttributesEqual(const Instruction& other) const;
10558
10560
10561#define FIELD_LIST(F) \
10562 F(const Cids&, cids_) \
10563 F(bool, is_bit_test_) \
10564 F(const TokenPosition, token_pos_)
10565
10567 TemplateInstruction,
10568 FIELD_LIST)
10569#undef FIELD_LIST
10570
10571 private:
10572 int EmitCheckCid(FlowGraphCompiler* compiler,
10573 int bias,
10574 intptr_t cid_start,
10575 intptr_t cid_end,
10576 bool is_last,
10577 compiler::Label* is_ok,
10578 compiler::Label* deopt,
10579 bool use_near_jump);
10580 void EmitBitTest(FlowGraphCompiler* compiler,
10581 intptr_t min,
10582 intptr_t max,
10583 intptr_t mask,
10584 compiler::Label* deopt);
10585 void EmitNullCheck(FlowGraphCompiler* compiler, compiler::Label* deopt);
10586
10587 DISALLOW_COPY_AND_ASSIGN(CheckClassInstr);
10588};
10589
10590class CheckSmiInstr : public TemplateInstruction<1, NoThrow, Pure> {
10591 public:
10592 CheckSmiInstr(Value* value,
10593 intptr_t deopt_id,
10594 const InstructionSource& source)
10595 : TemplateInstruction(source, deopt_id), token_pos_(source.token_pos) {
10596 SetInputAt(0, value);
10597 }
10598
10599 Value* value() const { return inputs_[0]; }
10600 virtual TokenPosition token_pos() const { return token_pos_; }
10601
10602 DECLARE_INSTRUCTION(CheckSmi)
10603
10604 virtual bool ComputeCanDeoptimize() const { return true; }
10605
10606 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
10607
10608 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10609
10610#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
10611
10613 TemplateInstruction,
10614 FIELD_LIST)
10615#undef FIELD_LIST
10616
10617 private:
10618 DISALLOW_COPY_AND_ASSIGN(CheckSmiInstr);
10619};
10620
10621// CheckNull instruction takes one input (`value`) and tests it for `null`.
10622// If `value` is `null`, then an exception is thrown according to
10623// `exception_type`. Otherwise, execution proceeds to the next instruction.
10624class CheckNullInstr : public TemplateDefinition<1, Throws, Pure> {
10625 public:
10626 enum ExceptionType {
10627 kNoSuchMethod,
10628 kArgumentError,
10629 kCastError,
10630 };
10631
10632 CheckNullInstr(Value* value,
10633 const String& function_name,
10634 intptr_t deopt_id,
10635 const InstructionSource& source,
10636 ExceptionType exception_type = kNoSuchMethod)
10637 : TemplateDefinition(source, deopt_id),
10638 token_pos_(source.token_pos),
10639 function_name_(function_name),
10640 exception_type_(exception_type) {
10641 DEBUG_ASSERT(function_name.IsNotTemporaryScopedHandle());
10642 ASSERT(function_name.IsSymbol());
10643 SetInputAt(0, value);
10644 }
10645
10646 Value* value() const { return inputs_[0]; }
10647 virtual TokenPosition token_pos() const { return token_pos_; }
10648 const String& function_name() const { return function_name_; }
10649 ExceptionType exception_type() const { return exception_type_; }
10650
10651 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
10652 return SlowPathSharingSupported(is_optimizing);
10653 }
10654
10655 DECLARE_INSTRUCTION(CheckNull)
10656
10657 virtual CompileType ComputeType() const;
10658 virtual bool RecomputeType();
10659
10660 // CheckNull can implicitly call Dart code (NoSuchMethodError constructor),
10661 // so it needs a deopt ID in optimized and unoptimized code.
10662 virtual bool ComputeCanDeoptimize() const { return false; }
10663 virtual bool ComputeCanDeoptimizeAfterCall() const {
10664 return !CompilerState::Current().is_aot();
10665 }
10666 virtual bool CanBecomeDeoptimizationTarget() const { return true; }
10667
10668 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10669
10670 virtual bool AttributesEqual(const Instruction& other) const;
10671
10672 static void AddMetadataForRuntimeCall(CheckNullInstr* check_null,
10673 FlowGraphCompiler* compiler);
10674
10675 virtual Value* RedefinedValue() const;
10676
10678
10679#define FIELD_LIST(F) \
10680 F(const TokenPosition, token_pos_) \
10681 F(const String&, function_name_) \
10682 F(const ExceptionType, exception_type_)
10683
10685 TemplateDefinition,
10686 FIELD_LIST)
10687#undef FIELD_LIST
10688
10689 private:
10690 DISALLOW_COPY_AND_ASSIGN(CheckNullInstr);
10691};
10692
10693class CheckClassIdInstr : public TemplateInstruction<1, NoThrow> {
10694 public:
10695 CheckClassIdInstr(Value* value, CidRangeValue cids, intptr_t deopt_id)
10696 : TemplateInstruction(deopt_id), cids_(cids) {
10697 SetInputAt(0, value);
10698 }
10699
10700 Value* value() const { return inputs_[0]; }
10701 const CidRangeValue& cids() const { return cids_; }
10702
10703 DECLARE_INSTRUCTION(CheckClassId)
10704
10705 virtual bool ComputeCanDeoptimize() const { return true; }
10706
10707 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
10708
10709 virtual bool AllowsCSE() const { return true; }
10710 virtual bool HasUnknownSideEffects() const { return false; }
10711
10712 virtual bool AttributesEqual(const Instruction& other) const {
10713 return other.Cast<CheckClassIdInstr>()->cids().Equals(cids_);
10714 }
10715
10717
10718#define FIELD_LIST(F) F(CidRangeValue, cids_)
10719
10721 TemplateInstruction,
10722 FIELD_LIST)
10723#undef FIELD_LIST
10724
10725 private:
10726 bool Contains(intptr_t cid) const;
10727
10728 DISALLOW_COPY_AND_ASSIGN(CheckClassIdInstr);
10729};
10730
10731// Base class for speculative [CheckArrayBoundInstr] and
10732// non-speculative [GenericCheckBoundInstr] bounds checking.
10733class CheckBoundBaseInstr : public TemplateDefinition<2, NoThrow, Pure> {
10734 public:
10735 CheckBoundBaseInstr(Value* length, Value* index, intptr_t deopt_id)
10736 : TemplateDefinition(deopt_id) {
10737 SetInputAt(kLengthPos, length);
10738 SetInputAt(kIndexPos, index);
10739 }
10740
10741 Value* length() const { return inputs_[kLengthPos]; }
10742 Value* index() const { return inputs_[kIndexPos]; }
10743
10744 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10745
10746 DECLARE_ABSTRACT_INSTRUCTION(CheckBoundBase);
10747
10748 virtual Value* RedefinedValue() const;
10749
10750 // Returns true if the bounds check can be eliminated without
10751 // changing the semantics (viz. 0 <= index < length).
10752 bool IsRedundant(bool use_loops = false);
10753
10754 // Give a name to the location/input indices.
10755 enum { kLengthPos = 0, kIndexPos = 1 };
10756
10757 DECLARE_EMPTY_SERIALIZATION(CheckBoundBaseInstr, TemplateDefinition)
10758
10759 private:
10760 DISALLOW_COPY_AND_ASSIGN(CheckBoundBaseInstr);
10761};
10762
10763// Performs an array bounds check, where
10764// safe_index := CheckArrayBound(length, index)
10765// returns the "safe" index when
10766// 0 <= index < length
10767// or otherwise deoptimizes (viz. speculative).
10768class CheckArrayBoundInstr : public CheckBoundBaseInstr {
10769 public:
10770 CheckArrayBoundInstr(Value* length, Value* index, intptr_t deopt_id)
10771 : CheckBoundBaseInstr(length, index, deopt_id), generalized_(false) {}
10772
10773 DECLARE_INSTRUCTION(CheckArrayBound)
10774
10775 virtual CompileType ComputeType() const;
10776 virtual bool RecomputeType();
10777
10778 virtual bool ComputeCanDeoptimize() const { return true; }
10779
10780 void mark_generalized() { generalized_ = true; }
10781
10782 // Returns the length offset for array and string types.
10783 static intptr_t LengthOffsetFor(intptr_t class_id);
10784
10785 static bool IsFixedLengthArrayType(intptr_t class_id);
10786
10787 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10788
10789#define FIELD_LIST(F) F(bool, generalized_)
10790
10791 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckArrayBoundInstr,
10792 CheckBoundBaseInstr,
10793 FIELD_LIST)
10794#undef FIELD_LIST
10795
10796 private:
10797 DISALLOW_COPY_AND_ASSIGN(CheckArrayBoundInstr);
10798};
10799
10800// Performs an array bounds check, where
10801// safe_index := GenericCheckBound(length, index)
10802// returns the "safe" index when
10803// 0 <= index < length
10804// or otherwise throws an out-of-bounds exception (viz. non-speculative).
10805class GenericCheckBoundInstr : public CheckBoundBaseInstr {
10806 public:
10807 // We prefer to have unboxed inputs on 64-bit where values can fit into a
10808 // register.
10809 static bool UseUnboxedRepresentation() {
10810 return compiler::target::kWordSize == 8;
10811 }
10812
10813 GenericCheckBoundInstr(Value* length, Value* index, intptr_t deopt_id)
10814 : CheckBoundBaseInstr(length, index, deopt_id) {}
10815
10816 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10817
10818 DECLARE_INSTRUCTION(GenericCheckBound)
10819
10820 virtual CompileType ComputeType() const;
10821 virtual bool RecomputeType();
10822
10823 virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
10824
10825 virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
10826 return kNotSpeculative;
10827 }
10828
10829 virtual Representation representation() const {
10830 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
10831 }
10832
10833 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
10834 ASSERT(idx == kIndexPos || idx == kLengthPos);
10835 return UseUnboxedRepresentation() ? kUnboxedInt64 : kTagged;
10836 }
10837
10838 // GenericCheckBound can implicitly call Dart code (RangeError or
10839 // ArgumentError constructor), so it can lazily deopt.
10840 virtual bool ComputeCanDeoptimize() const { return false; }
10841 virtual bool ComputeCanDeoptimizeAfterCall() const {
10842 return !CompilerState::Current().is_aot();
10843 }
10844
10845 virtual bool MayThrow() const { return true; }
10846
10847 virtual bool UseSharedSlowPathStub(bool is_optimizing) const {
10848 return SlowPathSharingSupported(is_optimizing);
10849 }
10850
10851 DECLARE_EMPTY_SERIALIZATION(GenericCheckBoundInstr, CheckBoundBaseInstr)
10852
10853 private:
10854 DISALLOW_COPY_AND_ASSIGN(GenericCheckBoundInstr);
10855};
10856
10857class CheckWritableInstr : public TemplateDefinition<1, Throws, Pure> {
10858 public:
10859 enum Kind {
10860 kWriteUnmodifiableTypedData = 0,
10861 kDeeplyImmutableAttachNativeFinalizer = 1,
10862 };
10863
10864 CheckWritableInstr(Value* receiver,
10865 intptr_t deopt_id,
10866 const InstructionSource& source,
10867 Kind kind = Kind::kWriteUnmodifiableTypedData)
10868 : TemplateDefinition(source, deopt_id), kind_(kind) {
10869 SetInputAt(kReceiver, receiver);
10870 }
10871
10872 virtual bool AttributesEqual(const Instruction& other) const { return true; }
10873
10874 DECLARE_INSTRUCTION(CheckWritable)
10875
10876 Value* value() const { return inputs_[kReceiver]; }
10877
10878 virtual Definition* Canonicalize(FlowGraph* flow_graph);
10879
10880 virtual Value* RedefinedValue() const;
10881
10882 virtual bool ComputeCanDeoptimize() const { return false; }
10883 virtual bool ComputeCanDeoptimizeAfterCall() const {
10884 return !CompilerState::Current().is_aot();
10885 }
10886
10887 Kind kind() const { return kind_; }
10888
10889 // Give a name to the location/input indices.
10890 enum {
10891 kReceiver = 0,
10892 };
10893
10894#define FIELD_LIST(F) F(const Kind, kind_)
10895
10896 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckWritableInstr,
10897 TemplateDefinition,
10898 FIELD_LIST)
10899
10900#undef FIELD_LIST
10901
10902 private:
10903 DISALLOW_COPY_AND_ASSIGN(CheckWritableInstr);
10904};
10905
10906// Instruction evaluates the given comparison and deoptimizes if it evaluates
10907// to false.
10908class CheckConditionInstr : public Instruction {
10909 public:
10910 CheckConditionInstr(ComparisonInstr* comparison, intptr_t deopt_id)
10911 : Instruction(deopt_id), comparison_(comparison) {
10912 ASSERT(comparison->ArgumentCount() == 0);
10913 ASSERT(comparison->env() == nullptr);
10914 for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
10915 comparison->InputAt(i)->set_instruction(this);
10916 }
10917 }
10918
10919 ComparisonInstr* comparison() const { return comparison_; }
10920
10921 DECLARE_INSTRUCTION(CheckCondition)
10922
10923 virtual bool ComputeCanDeoptimize() const { return true; }
10924
10925 virtual Instruction* Canonicalize(FlowGraph* flow_graph);
10926
10927 virtual bool AllowsCSE() const { return true; }
10928 virtual bool HasUnknownSideEffects() const { return false; }
10929
10930 virtual bool AttributesEqual(const Instruction& other) const {
10931 return other.AsCheckCondition()->comparison()->AttributesEqual(
10932 *comparison());
10933 }
10934
10935 virtual intptr_t InputCount() const { return comparison()->InputCount(); }
10936 virtual Value* InputAt(intptr_t i) const { return comparison()->InputAt(i); }
10937
10938 virtual bool MayThrow() const { return false; }
10939
10940 virtual void CopyDeoptIdFrom(const Instruction& instr) {
10941 Instruction::CopyDeoptIdFrom(instr);
10942 comparison()->CopyDeoptIdFrom(instr);
10943 }
10944
10946
10947#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
10948
10949 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckConditionInstr,
10950 Instruction,
10951 FIELD_LIST)
10952#undef FIELD_LIST
10954
10955 private:
10956 virtual void RawSetInputAt(intptr_t i, Value* value) {
10957 comparison()->RawSetInputAt(i, value);
10958 }
10959
10960 DISALLOW_COPY_AND_ASSIGN(CheckConditionInstr);
10961};
10962
10963class IntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
10964 public:
10965 IntConverterInstr(Representation from,
10966 Representation to,
10967 Value* value,
10968 intptr_t deopt_id)
10969 : TemplateDefinition(deopt_id),
10970 from_representation_(from),
10971 to_representation_(to),
10972 is_truncating_(to == kUnboxedUint32) {
10973 ASSERT(from != to);
10974 // Integer conversion doesn't currently handle non-native representations.
10975 ASSERT_EQUAL(Boxing::NativeRepresentation(from), from);
10976 ASSERT_EQUAL(Boxing::NativeRepresentation(to), to);
10977 ASSERT(from == kUnboxedInt64 || from == kUnboxedUint32 ||
10978 from == kUnboxedInt32 || from == kUntagged);
10979 ASSERT(to == kUnboxedInt64 || to == kUnboxedUint32 || to == kUnboxedInt32 ||
10980 to == kUntagged);
10981 ASSERT(from != kUntagged || to == kUnboxedIntPtr || to == kUnboxedAddress);
10982 ASSERT(to != kUntagged || from == kUnboxedIntPtr ||
10983 from == kUnboxedAddress);
10984 // Don't allow conversions from unsafe untagged addresses.
10985 ASSERT(!value->definition()->MayCreateUnsafeUntaggedPointer());
10986 SetInputAt(0, value);
10987 }
10988
10989 Value* value() const { return inputs_[0]; }
10990
10991 Representation from() const { return from_representation_; }
10992 Representation to() const { return to_representation_; }
10993 bool is_truncating() const { return is_truncating_; }
10994
10995 void mark_truncating() { is_truncating_ = true; }
10996
10997 Definition* Canonicalize(FlowGraph* flow_graph);
10998
10999 virtual bool ComputeCanDeoptimize() const;
11000
11001 virtual Representation representation() const { return to(); }
11002
11003 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
11004 ASSERT(idx == 0);
11005 return from();
11006 }
11007
11008 virtual bool AttributesEqual(const Instruction& other) const {
11009 ASSERT(other.IsIntConverter());
11010 auto const converter = other.AsIntConverter();
11011 return (converter->from() == from()) && (converter->to() == to()) &&
11012 (converter->is_truncating() == is_truncating());
11013 }
11014
11015 virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
11016
11017 virtual void InferRange(RangeAnalysis* analysis, Range* range);
11018
11019 virtual bool MayCreateUnsafeUntaggedPointer() const {
11020 // The compiler no longer converts between unsafe untagged pointers and
11021 // unboxed integers.
11022 return false;
11023 }
11024
11025 DECLARE_INSTRUCTION(IntConverter);
11026
11027 DECLARE_ATTRIBUTES_NAMED(("from", "to", "is_truncating"),
11028 (from(), to(), is_truncating()))
11029
11031
11032#define FIELD_LIST(F) \
11033 F(const Representation, from_representation_) \
11034 F(const Representation, to_representation_) \
11035 F(bool, is_truncating_)
11036
11038 TemplateDefinition,
11039 FIELD_LIST)
11040#undef FIELD_LIST
11041
11042 private:
11043 DISALLOW_COPY_AND_ASSIGN(IntConverterInstr);
11044};
11045
11046// Moves a floating-point value between CPU and FPU registers. Used to implement
11047// "softfp" calling conventions, where FPU arguments/return values are passed in
11048// normal CPU registers.
11049class BitCastInstr : public TemplateDefinition<1, NoThrow, Pure> {
11050 public:
11051 BitCastInstr(Representation from, Representation to, Value* value)
11052 : TemplateDefinition(DeoptId::kNone),
11053 from_representation_(from),
11054 to_representation_(to) {
11055 ASSERT(from != to);
11056 ASSERT((to == kUnboxedInt32 && from == kUnboxedFloat) ||
11057 (to == kUnboxedFloat && from == kUnboxedInt32) ||
11058 (to == kUnboxedInt64 && from == kUnboxedDouble) ||
11059 (to == kUnboxedDouble && from == kUnboxedInt64));
11060 SetInputAt(0, value);
11061 }
11062
11063 Value* value() const { return inputs_[0]; }
11064
11065 Representation from() const { return from_representation_; }
11066 Representation to() const { return to_representation_; }
11067
11068 virtual bool ComputeCanDeoptimize() const { return false; }
11069
11070 virtual Representation representation() const { return to(); }
11071
11072 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
11073 ASSERT(idx == 0);
11074 return from();
11075 }
11076
11077 virtual bool AttributesEqual(const Instruction& other) const {
11078 ASSERT(other.IsBitCast());
11079 auto const converter = other.AsBitCast();
11080 return converter->from() == from() && converter->to() == to();
11081 }
11082
11083 DECLARE_INSTRUCTION(BitCast);
11084
11086
11087#define FIELD_LIST(F) \
11088 F(const Representation, from_representation_) \
11089 F(const Representation, to_representation_)
11090
11092 TemplateDefinition,
11093 FIELD_LIST)
11094#undef FIELD_LIST
11095
11096 private:
11097 DISALLOW_COPY_AND_ASSIGN(BitCastInstr);
11098};
11099
11100class LoadThreadInstr : public TemplateDefinition<0, NoThrow, Pure> {
11101 public:
11102 LoadThreadInstr() : TemplateDefinition(DeoptId::kNone) {}
11103
11104 virtual bool ComputeCanDeoptimize() const { return false; }
11105
11106 virtual Representation representation() const { return kUntagged; }
11107
11108 virtual Representation RequiredInputRepresentation(intptr_t idx) const {
11109 UNREACHABLE();
11110 }
11111
11112 virtual bool MayCreateUnsafeUntaggedPointer() const {
11113 // Threads are not GC-movable objects.
11114 return false;
11115 }
11116
11117 // CSE is allowed. The thread should always be the same value.
11118 virtual bool AttributesEqual(const Instruction& other) const {
11119 ASSERT(other.IsLoadThread());
11120 return true;
11121 }
11122
11123 DECLARE_INSTRUCTION(LoadThread);
11124
11125 DECLARE_EMPTY_SERIALIZATION(LoadThreadInstr, TemplateDefinition)
11126
11127 private:
11128 DISALLOW_COPY_AND_ASSIGN(LoadThreadInstr);
11129};
11130
11131// SimdOpInstr
11132//
11133// All SIMD intrinsics and recognized methods are represented via instances
11134// of SimdOpInstr, a particular type of SimdOp is selected by SimdOpInstr::Kind.
11135//
11136// Defines below are used to construct SIMD_OP_LIST - a list of all SIMD
11137// operations. SIMD_OP_LIST contains information such as arity, input types and
11138// output type for each SIMD op and is used to derive things like input
11139// and output representations, type of return value, etc.
11140//
11141// Lists of SIMD ops are defined using macro M, OP and BINARY_OP which are
11142// expected to have the following signature:
11143//
11144// (Arity, HasMask, Name, (In_0, ..., In_Arity), Out)
11145//
11146// where:
11147//
11148// HasMask is either _ or MASK and determines if operation has an
11149// constant mask attribute
11150// In_0, ..., In_Arity are input types
11151// Out is output type
11152//
11153
11154// A binary SIMD op with the given name that has signature T x T -> T.
11155#define SIMD_BINARY_OP(M, T, Name) M(2, _, T##Name, (T, T), T)
11156
11157// List of SIMD_BINARY_OPs common for Float32x4 or Float64x2.
11158// Note: M for recognized methods and OP for operators.
11159#define SIMD_BINARY_FLOAT_OP_LIST(M, OP, T) \
11160 SIMD_BINARY_OP(OP, T, Add) \
11161 SIMD_BINARY_OP(OP, T, Sub) \
11162 SIMD_BINARY_OP(OP, T, Mul) \
11163 SIMD_BINARY_OP(OP, T, Div) \
11164 SIMD_BINARY_OP(M, T, Min) \
11165 SIMD_BINARY_OP(M, T, Max)
11166
11167// List of SIMD_BINARY_OP for Int32x4.
11168// Note: M for recognized methods and OP for operators.
11169#define SIMD_BINARY_INTEGER_OP_LIST(M, OP, T) \
11170 SIMD_BINARY_OP(OP, T, Add) \
11171 SIMD_BINARY_OP(OP, T, Sub) \
11172 SIMD_BINARY_OP(OP, T, BitAnd) \
11173 SIMD_BINARY_OP(OP, T, BitOr) \
11174 SIMD_BINARY_OP(OP, T, BitXor)
11175
11176// Given a signature of a given SIMD op construct its per component variations.
11177#define SIMD_PER_COMPONENT_XYZW(M, Arity, Name, Inputs, Output) \
11178 M(Arity, _, Name##X, Inputs, Output) \
11179 M(Arity, _, Name##Y, Inputs, Output) \
11180 M(Arity, _, Name##Z, Inputs, Output) \
11181 M(Arity, _, Name##W, Inputs, Output)
11182
11183// Define conversion between two SIMD types.
11184#define SIMD_CONVERSION(M, FromType, ToType) \
11185 M(1, _, FromType##To##ToType, (FromType), ToType)
11186
11187// List of all recognized SIMD operations.
11188// Note: except for operations that map to operators (Add, Mul, Sub, Div,
11189// BitXor, BitOr) all other operations must match names used by
11190// MethodRecognizer. This allows to autogenerate conversion from
11191// MethodRecognizer::Kind into SimdOpInstr::Kind (see KindForMethod helper).
11192// Note: M is for those SimdOp that are recognized methods and BINARY_OP
11193// is for operators.
11194#define SIMD_OP_LIST(M, BINARY_OP) \
11195 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float32x4) \
11196 SIMD_BINARY_FLOAT_OP_LIST(M, BINARY_OP, Float64x2) \
11197 SIMD_BINARY_INTEGER_OP_LIST(M, BINARY_OP, Int32x4) \
11198 SIMD_PER_COMPONENT_XYZW(M, 1, Float32x4Get, (Float32x4), Double) \
11199 SIMD_PER_COMPONENT_XYZW(M, 2, Float32x4With, (Double, Float32x4), Float32x4) \
11200 SIMD_PER_COMPONENT_XYZW(M, 1, Int32x4GetFlag, (Int32x4), Bool) \
11201 SIMD_PER_COMPONENT_XYZW(M, 2, Int32x4WithFlag, (Int32x4, Bool), Int32x4) \
11202 M(1, MASK, Float32x4Shuffle, (Float32x4), Float32x4) \
11203 M(1, MASK, Int32x4Shuffle, (Int32x4), Int32x4) \
11204 M(2, MASK, Float32x4ShuffleMix, (Float32x4, Float32x4), Float32x4) \
11205 M(2, MASK, Int32x4ShuffleMix, (Int32x4, Int32x4), Int32x4) \
11206 M(2, _, Float32x4Equal, (Float32x4, Float32x4), Int32x4) \
11207 M(2, _, Float32x4GreaterThan, (Float32x4, Float32x4), Int32x4) \
11208 M(2, _, Float32x4GreaterThanOrEqual, (Float32x4, Float32x4), Int32x4) \
11209 M(2, _, Float32x4LessThan, (Float32x4, Float32x4), Int32x4) \
11210 M(2, _, Float32x4LessThanOrEqual, (Float32x4, Float32x4), Int32x4) \
11211 M(2, _, Float32x4NotEqual, (Float32x4, Float32x4), Int32x4) \
11212 M(4, _, Int32x4FromInts, (Int32, Int32, Int32, Int32), Int32x4) \
11213 M(4, _, Int32x4FromBools, (Bool, Bool, Bool, Bool), Int32x4) \
11214 M(4, _, Float32x4FromDoubles, (Double, Double, Double, Double), Float32x4) \
11215 M(2, _, Float64x2FromDoubles, (Double, Double), Float64x2) \
11216 M(0, _, Float32x4Zero, (), Float32x4) \
11217 M(0, _, Float64x2Zero, (), Float64x2) \
11218 M(1, _, Float32x4Splat, (Double), Float32x4) \
11219 M(1, _, Float64x2Splat, (Double), Float64x2) \
11220 M(1, _, Int32x4GetSignMask, (Int32x4), Int8) \
11221 M(1, _, Float32x4GetSignMask, (Float32x4), Int8) \
11222 M(1, _, Float64x2GetSignMask, (Float64x2), Int8) \
11223 M(2, _, Float32x4Scale, (Double, Float32x4), Float32x4) \
11224 M(2, _, Float64x2Scale, (Float64x2, Double), Float64x2) \
11225 M(1, _, Float32x4Sqrt, (Float32x4), Float32x4) \
11226 M(1, _, Float64x2Sqrt, (Float64x2), Float64x2) \
11227 M(1, _, Float32x4Reciprocal, (Float32x4), Float32x4) \
11228 M(1, _, Float32x4ReciprocalSqrt, (Float32x4), Float32x4) \
11229 M(1, _, Float32x4Negate, (Float32x4), Float32x4) \
11230 M(1, _, Float64x2Negate, (Float64x2), Float64x2) \
11231 M(1, _, Float32x4Abs, (Float32x4), Float32x4) \
11232 M(1, _, Float64x2Abs, (Float64x2), Float64x2) \
11233 M(3, _, Float32x4Clamp, (Float32x4, Float32x4, Float32x4), Float32x4) \
11234 M(3, _, Float64x2Clamp, (Float64x2, Float64x2, Float64x2), Float64x2) \
11235 M(1, _, Float64x2GetX, (Float64x2), Double) \
11236 M(1, _, Float64x2GetY, (Float64x2), Double) \
11237 M(2, _, Float64x2WithX, (Float64x2, Double), Float64x2) \
11238 M(2, _, Float64x2WithY, (Float64x2, Double), Float64x2) \
11239 M(3, _, Int32x4Select, (Int32x4, Float32x4, Float32x4), Float32x4) \
11240 SIMD_CONVERSION(M, Float32x4, Int32x4) \
11241 SIMD_CONVERSION(M, Int32x4, Float32x4) \
11242 SIMD_CONVERSION(M, Float32x4, Float64x2) \
11243 SIMD_CONVERSION(M, Float64x2, Float32x4)
11244
11245class SimdOpInstr : public Definition {
11246 public:
11247 enum Kind {
11248#define DECLARE_ENUM(Arity, Mask, Name, ...) k##Name,
11250#undef DECLARE_ENUM
11251 kIllegalSimdOp,
11252 };
11253
11254 // Create SimdOp from the arguments of the given call and the given receiver.
11255 static SimdOpInstr* CreateFromCall(Zone* zone,
11256 MethodRecognizer::Kind kind,
11257 Definition* receiver,
11258 Instruction* call,
11259 intptr_t mask = 0);
11260
11261 // Create SimdOp from the arguments of the given factory call.
11262 static SimdOpInstr* CreateFromFactoryCall(Zone* zone,
11263 MethodRecognizer::Kind kind,
11264 Instruction* call);
11265
11266 // Create a binary SimdOp instr.
11267 static SimdOpInstr* Create(Kind kind,
11268 Value* left,
11269 Value* right,
11270 intptr_t deopt_id) {
11271 return new SimdOpInstr(kind, left, right, deopt_id);
11272 }
11273
11274 // Create a binary SimdOp instr.
11275 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
11276 Value* left,
11277 Value* right,
11278 intptr_t deopt_id) {
11279 return new SimdOpInstr(KindForMethod(kind), left, right, deopt_id);
11280 }
11281
11282 // Create a unary SimdOp.
11283 static SimdOpInstr* Create(MethodRecognizer::Kind kind,
11284 Value* left,
11285 intptr_t deopt_id) {
11286 return new SimdOpInstr(KindForMethod(kind), left, deopt_id);
11287 }
11288
11289 static Kind KindForOperator(MethodRecognizer::Kind kind);
11290
11291 static Kind KindForMethod(MethodRecognizer::Kind method_kind);
11292
11293 // Convert a combination of SIMD cid and an arithmetic token into Kind, e.g.
11294 // Float32x4 and Token::kADD becomes Float32x4Add.
11295 static Kind KindForOperator(intptr_t cid, Token::Kind op);
11296
11297 virtual intptr_t InputCount() const;
11298 virtual Value* InputAt(intptr_t i) const {
11299 ASSERT(0 <= i && i < InputCount());
11300 return inputs_[i];
11301 }
11302
11303 Kind kind() const { return kind_; }
11304 intptr_t mask() const {
11305 ASSERT(HasMask());
11306 return mask_;
11307 }
11308
11309 virtual Representation representation() const;
11310 virtual Representation RequiredInputRepresentation(intptr_t idx) const;
11311
11312 virtual CompileType ComputeType() const;
11313
11314 virtual bool MayThrow() const { return false; }
11315 virtual bool ComputeCanDeoptimize() const { return false; }
11316
11317 virtual intptr_t DeoptimizationTarget() const {
11318 // Direct access since this instruction cannot deoptimize, and the deopt-id
11319 // was inherited from another instruction that could deoptimize.
11320 return GetDeoptId();
11321 }
11322
11323 virtual bool HasUnknownSideEffects() const { return false; }
11324 virtual bool AllowsCSE() const { return true; }
11325
11326 virtual bool AttributesEqual(const Instruction& other) const {
11327 auto const other_op = other.AsSimdOp();
11328 return kind() == other_op->kind() &&
11329 (!HasMask() || mask() == other_op->mask());
11330 }
11331
11332 virtual Definition* Canonicalize(FlowGraph* flow_graph);
11333
11334 DECLARE_INSTRUCTION(SimdOp)
11336
11337#define FIELD_LIST(F) \
11338 F(const Kind, kind_) \
11339 F(intptr_t, mask_)
11340
11341 DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SimdOpInstr, Definition, FIELD_LIST)
11342#undef FIELD_LIST
11343
11344 private:
11345 SimdOpInstr(Kind kind, intptr_t deopt_id)
11346 : Definition(deopt_id), kind_(kind) {}
11347
11348 SimdOpInstr(Kind kind, Value* left, intptr_t deopt_id)
11349 : Definition(deopt_id), kind_(kind) {
11350 SetInputAt(0, left);
11351 }
11352
11353 SimdOpInstr(Kind kind, Value* left, Value* right, intptr_t deopt_id)
11354 : Definition(deopt_id), kind_(kind) {
11355 SetInputAt(0, left);
11356 SetInputAt(1, right);
11357 }
11358
11359 bool HasMask() const;
11360 void set_mask(intptr_t mask) { mask_ = mask; }
11361
11362 virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
11363
11364 // We consider SimdOpInstr to be very uncommon so we don't optimize them for
11365 // size. Any instance of SimdOpInstr has enough space to fit any variation.
11366 // TODO(dartbug.com/30949) optimize this for size.
11367 Value* inputs_[4];
11368
11369 DISALLOW_COPY_AND_ASSIGN(SimdOpInstr);
11370};
11371
11372// Generic instruction to call 1-argument stubs specified using [StubId].
11373class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
11374 public:
11375 enum class StubId {
11376 kCloneSuspendState,
11377 kInitAsync,
11378 kInitAsyncStar,
11379 kInitSyncStar,
11380 kFfiAsyncCallbackSend,
11381 };
11382
11383 Call1ArgStubInstr(const InstructionSource& source,
11384 StubId stub_id,
11385 Value* operand,
11386 intptr_t deopt_id)
11387 : TemplateDefinition(source, deopt_id),
11388 stub_id_(stub_id),
11389 token_pos_(source.token_pos) {
11390 SetInputAt(0, operand);
11391 }
11392
11393 Value* operand() const { return inputs_[0]; }
11394 StubId stub_id() const { return stub_id_; }
11395 virtual TokenPosition token_pos() const { return token_pos_; }
11396
11397 virtual bool CanCallDart() const { return true; }
11398 virtual bool ComputeCanDeoptimize() const { return false; }
11399 virtual bool ComputeCanDeoptimizeAfterCall() const { return true; }
11400 virtual bool HasUnknownSideEffects() const { return true; }
11401 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
11402 return InputCount();
11403 }
11404
11405 DECLARE_INSTRUCTION(Call1ArgStub);
11407
11408#define FIELD_LIST(F) \
11409 F(const StubId, stub_id_) \
11410 F(const TokenPosition, token_pos_)
11411
11413 TemplateDefinition,
11414 FIELD_LIST)
11415#undef FIELD_LIST
11416
11417 private:
11418 DISALLOW_COPY_AND_ASSIGN(Call1ArgStubInstr);
11419};
11420
11421// Suspends execution using the suspend stub specified using [StubId].
11422class SuspendInstr : public TemplateDefinition<2, Throws> {
11423 public:
11424 enum class StubId {
11425 kAwait,
11426 kAwaitWithTypeCheck,
11427 kYieldAsyncStar,
11428 kSuspendSyncStarAtStart,
11429 kSuspendSyncStarAtYield,
11430 };
11431
11432 SuspendInstr(const InstructionSource& source,
11433 StubId stub_id,
11434 Value* operand,
11435 Value* type_args,
11436 intptr_t deopt_id,
11437 intptr_t resume_deopt_id)
11438 : TemplateDefinition(source, deopt_id),
11439 stub_id_(stub_id),
11440 resume_deopt_id_(resume_deopt_id),
11441 token_pos_(source.token_pos) {
11442 SetInputAt(0, operand);
11443 if (has_type_args()) {
11444 SetInputAt(1, type_args);
11445 } else {
11446 ASSERT(type_args == nullptr);
11447 }
11448 }
11449
11450 bool has_type_args() const { return stub_id_ == StubId::kAwaitWithTypeCheck; }
11451 virtual intptr_t InputCount() const { return has_type_args() ? 2 : 1; }
11452
11453 Value* operand() const { return inputs_[0]; }
11454 Value* type_args() const {
11455 ASSERT(has_type_args());
11456 return inputs_[1];
11457 }
11458
11459 StubId stub_id() const { return stub_id_; }
11460 intptr_t resume_deopt_id() const { return resume_deopt_id_; }
11461 virtual TokenPosition token_pos() const { return token_pos_; }
11462
11463 virtual bool CanCallDart() const { return true; }
11464 virtual bool ComputeCanDeoptimize() const { return false; }
11465 virtual bool ComputeCanDeoptimizeAfterCall() const { return true; }
11466 virtual bool HasUnknownSideEffects() const { return true; }
11467 virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
11468 return InputCount();
11469 }
11470
11471 DECLARE_INSTRUCTION(Suspend);
11473
11474 virtual Definition* Canonicalize(FlowGraph* flow_graph);
11475
11476#define FIELD_LIST(F) \
11477 F(StubId, stub_id_) \
11478 F(const intptr_t, resume_deopt_id_) \
11479 F(const TokenPosition, token_pos_)
11480
11482 TemplateDefinition,
11483 FIELD_LIST)
11484#undef FIELD_LIST
11485
11486 private:
11487 DISALLOW_COPY_AND_ASSIGN(SuspendInstr);
11488};
11489
11490#undef DECLARE_INSTRUCTION
11491
11492class Environment : public ZoneAllocated {
11493 public:
11494 // Iterate the non-null values in the innermost level of an environment.
11495 class ShallowIterator : public ValueObject {
11496 public:
11497 explicit ShallowIterator(Environment* environment)
11498 : environment_(environment), index_(0) {}
11499
11500 ShallowIterator(const ShallowIterator& other)
11501 : ValueObject(),
11502 environment_(other.environment_),
11503 index_(other.index_) {}
11504
11505 ShallowIterator& operator=(const ShallowIterator& other) {
11506 environment_ = other.environment_;
11507 index_ = other.index_;
11508 return *this;
11509 }
11510
11511 Environment* environment() const { return environment_; }
11512
11513 void Advance() {
11514 ASSERT(!Done());
11515 ++index_;
11516 }
11517
11518 bool Done() const {
11519 return (environment_ == nullptr) || (index_ >= environment_->Length());
11520 }
11521
11522 Value* CurrentValue() const {
11523 ASSERT(!Done());
11524 ASSERT(environment_->values_[index_] != nullptr);
11525 return environment_->values_[index_];
11526 }
11527
11528 void SetCurrentValue(Value* value) {
11529 ASSERT(!Done());
11530 ASSERT(value != nullptr);
11531 environment_->values_[index_] = value;
11532 }
11533
11534 Location CurrentLocation() const {
11535 ASSERT(!Done());
11536 return environment_->locations_[index_];
11537 }
11538
11539 void SetCurrentLocation(Location loc) {
11540 ASSERT(!Done());
11541 environment_->locations_[index_] = loc;
11542 }
11543
11544 private:
11545 Environment* environment_;
11546 intptr_t index_;
11547 };
11548
11549 // Iterate all non-null values in an environment, including outer
11550 // environments. Note that the iterator skips empty environments.
11551 class DeepIterator : public ValueObject {
11552 public:
11553 explicit DeepIterator(Environment* environment) : iterator_(environment) {
11554 SkipDone();
11555 }
11556
11557 void Advance() {
11558 ASSERT(!Done());
11559 iterator_.Advance();
11560 SkipDone();
11561 }
11562
11563 bool Done() const { return iterator_.environment() == nullptr; }
11564
11565 Value* CurrentValue() const {
11566 ASSERT(!Done());
11567 return iterator_.CurrentValue();
11568 }
11569
11570 void SetCurrentValue(Value* value) {
11571 ASSERT(!Done());
11572 iterator_.SetCurrentValue(value);
11573 }
11574
11575 Location CurrentLocation() const {
11576 ASSERT(!Done());
11577 return iterator_.CurrentLocation();
11578 }
11579
11580 void SetCurrentLocation(Location loc) {
11581 ASSERT(!Done());
11582 iterator_.SetCurrentLocation(loc);
11583 }
11584
11585 private:
11586 void SkipDone() {
11587 while (!Done() && iterator_.Done()) {
11588 iterator_ = ShallowIterator(iterator_.environment()->outer());
11589 }
11590 }
11591
11592 ShallowIterator iterator_;
11593 };
11594
11595 // Construct an environment by constructing uses from an array of definitions.
11596 static Environment* From(Zone* zone,
11597 const GrowableArray<Definition*>& definitions,
11598 intptr_t fixed_parameter_count,
11599 intptr_t lazy_deopt_pruning_count,
11600 const ParsedFunction& parsed_function);
11601
11602 void set_locations(Location* locations) {
11603 ASSERT(locations_ == nullptr);
11604 locations_ = locations;
11605 }
11606
11607 // Get deopt_id associated with this environment.
11608 // Note that only outer environments have deopt id associated with
11609 // them (set by DeepCopyToOuter).
11610 intptr_t GetDeoptId() const {
11611 ASSERT(DeoptIdBits::decode(bitfield_) != DeoptId::kNone);
11612 return DeoptIdBits::decode(bitfield_);
11613 }
11614
11615 intptr_t LazyDeoptPruneCount() const {
11616 return LazyDeoptPruningBits::decode(bitfield_);
11617 }
11618
11619 bool LazyDeoptToBeforeDeoptId() const {
11620 return LazyDeoptToBeforeDeoptId::decode(bitfield_);
11621 }
11622
11623 void MarkAsLazyDeoptToBeforeDeoptId() {
11624 bitfield_ = LazyDeoptToBeforeDeoptId::update(true, bitfield_);
11625 // As eager and lazy deopts will target the before environment, we do not
11626 // want to prune inputs on lazy deopts.
11627 bitfield_ = LazyDeoptPruningBits::update(0, bitfield_);
11628 }
11629
11630 // This environment belongs to an optimistically hoisted instruction.
11631 bool IsHoisted() const { return Hoisted::decode(bitfield_); }
11632
11633 void MarkAsHoisted() { bitfield_ = Hoisted::update(true, bitfield_); }
11634
11635 Environment* GetLazyDeoptEnv(Zone* zone) {
11636 if (LazyDeoptToBeforeDeoptId()) {
11637 ASSERT(LazyDeoptPruneCount() == 0);
11638 }
11639 const intptr_t num_args_to_prune = LazyDeoptPruneCount();
11640 if (num_args_to_prune == 0) return this;
11641 return DeepCopy(zone, Length() - num_args_to_prune);
11642 }
11643
11644 Environment* outer() const { return outer_; }
11645
11646 Environment* Outermost() {
11647 Environment* result = this;
11648 while (result->outer() != nullptr)
11649 result = result->outer();
11650 return result;
11651 }
11652
11653 Value* ValueAt(intptr_t ix) const { return values_[ix]; }
11654
11655 void PushValue(Value* value);
11656
11657 intptr_t Length() const { return values_.length(); }
11658
11659 Location LocationAt(intptr_t index) const {
11660 ASSERT((index >= 0) && (index < values_.length()));
11661 return locations_[index];
11662 }
11663
11664 // The use index is the index in the flattened environment.
11665 Value* ValueAtUseIndex(intptr_t index) const {
11666 const Environment* env = this;
11667 while (index >= env->Length()) {
11668 ASSERT(env->outer_ != nullptr);
11669 index -= env->Length();
11670 env = env->outer_;
11671 }
11672 return env->ValueAt(index);
11673 }
11674
11675 intptr_t fixed_parameter_count() const { return fixed_parameter_count_; }
11676
11677 intptr_t CountArgsPushed() {
11678 intptr_t count = 0;
11679 for (Environment::DeepIterator it(this); !it.Done(); it.Advance()) {
11680 if (it.CurrentValue()->definition()->IsMoveArgument()) {
11681 count++;
11682 }
11683 }
11684 return count;
11685 }
11686
11687 const Function& function() const { return function_; }
11688
11689 Environment* DeepCopy(Zone* zone) const { return DeepCopy(zone, Length()); }
11690
11691 void DeepCopyTo(Zone* zone, Instruction* instr) const;
11692 void DeepCopyToOuter(Zone* zone,
11693 Instruction* instr,
11694 intptr_t outer_deopt_id) const;
11695
11696 void DeepCopyAfterTo(Zone* zone,
11697 Instruction* instr,
11698 intptr_t argc,
11699 Definition* dead,
11700 Definition* result) const;
11701
11702 void PrintTo(BaseTextBuffer* f) const;
11703 const char* ToCString() const;
11704
11705 // Deep copy an environment. The 'length' parameter may be less than the
11706 // environment's length in order to drop values (e.g., passed arguments)
11707 // from the copy.
11708 Environment* DeepCopy(Zone* zone, intptr_t length) const;
11709
11710 void Write(FlowGraphSerializer* s) const;
11711 explicit Environment(FlowGraphDeserializer* d);
11712
11713 private:
11714 friend class ShallowIterator;
11715 friend class compiler::BlockBuilder; // For Environment constructor.
11716
11717 class LazyDeoptPruningBits : public BitField<uintptr_t, uintptr_t, 0, 8> {};
11718 class LazyDeoptToBeforeDeoptId
11719 : public BitField<uintptr_t, bool, LazyDeoptPruningBits::kNextBit, 1> {};
11720 class Hoisted : public BitField<uintptr_t,
11721 bool,
11722 LazyDeoptToBeforeDeoptId::kNextBit,
11723 1> {};
11724 class DeoptIdBits : public BitField<uintptr_t,
11725 intptr_t,
11726 Hoisted::kNextBit,
11727 kBitsPerWord - Hoisted::kNextBit,
11728 /*sign_extend=*/true> {};
11729
11730 Environment(intptr_t length,
11731 intptr_t fixed_parameter_count,
11732 intptr_t lazy_deopt_pruning_count,
11733 const Function& function,
11734 Environment* outer)
11735 : values_(length),
11736 fixed_parameter_count_(fixed_parameter_count),
11737 bitfield_(DeoptIdBits::encode(DeoptId::kNone) |
11738 LazyDeoptToBeforeDeoptId::encode(false) |
11739 LazyDeoptPruningBits::encode(lazy_deopt_pruning_count)),
11740 function_(function),
11741 outer_(outer) {}
11742
11743 void SetDeoptId(intptr_t deopt_id) {
11744 bitfield_ = DeoptIdBits::update(deopt_id, bitfield_);
11745 }
11746 void SetLazyDeoptPruneCount(intptr_t value) {
11747 bitfield_ = LazyDeoptPruningBits::update(value, bitfield_);
11748 }
11749 void SetLazyDeoptToBeforeDeoptId(bool value) {
11750 bitfield_ = LazyDeoptToBeforeDeoptId::update(value, bitfield_);
11751 }
11752
11753 GrowableArray<Value*> values_;
11754 Location* locations_ = nullptr;
11755 const intptr_t fixed_parameter_count_;
11756 // Deoptimization id associated with this environment. Only set for
11757 // outer environments.
11758 uintptr_t bitfield_;
11759 const Function& function_;
11760 Environment* outer_;
11761
11762 DISALLOW_COPY_AND_ASSIGN(Environment);
11763};
11764
11765class InstructionVisitor : public ValueObject {
11766 public:
11767 InstructionVisitor() {}
11768 virtual ~InstructionVisitor() {}
11769
11770// Visit functions for instruction classes, with an empty default
11771// implementation.
11772#define DECLARE_VISIT_INSTRUCTION(ShortName, Attrs) \
11773 virtual void Visit##ShortName(ShortName##Instr* instr) {}
11774
11776
11777#undef DECLARE_VISIT_INSTRUCTION
11778
11779 private:
11780 DISALLOW_COPY_AND_ASSIGN(InstructionVisitor);
11781};
11782
11783// Visitor base class to visit each instruction and computation in a flow
11784// graph as defined by a reversed list of basic blocks.
11785class FlowGraphVisitor : public InstructionVisitor {
11786 public:
11787 explicit FlowGraphVisitor(const GrowableArray<BlockEntryInstr*>& block_order)
11788 : current_iterator_(nullptr), block_order_(&block_order) {}
11789 virtual ~FlowGraphVisitor() {}
11790
11791 ForwardInstructionIterator* current_iterator() const {
11792 return current_iterator_;
11793 }
11794
11795 // Visit each block in the block order, and for each block its
11796 // instructions in order from the block entry to exit.
11797 virtual void VisitBlocks();
11798
11799 protected:
11800 void set_block_order(const GrowableArray<BlockEntryInstr*>& block_order) {
11801 block_order_ = &block_order;
11802 }
11803
11804 ForwardInstructionIterator* current_iterator_;
11805
11806 private:
11807 const GrowableArray<BlockEntryInstr*>* block_order_;
11808 DISALLOW_COPY_AND_ASSIGN(FlowGraphVisitor);
11809};
11810
11811// Helper macros for platform ports.
11812#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name) \
11813 LocationSummary* Name::MakeLocationSummary(Zone* zone, bool opt) const { \
11814 UNIMPLEMENTED(); \
11815 return nullptr; \
11816 } \
11817 void Name::EmitNativeCode(FlowGraphCompiler* compiler) { \
11818 UNIMPLEMENTED(); \
11819 }
11820
11821template <intptr_t kExtraInputs>
11822StringPtr TemplateDartCall<kExtraInputs>::Selector() {
11823 if (auto static_call = this->AsStaticCall()) {
11824 return static_call->function().name();
11825 } else if (auto instance_call = this->AsInstanceCall()) {
11826 return instance_call->function_name().ptr();
11827 } else {
11828 UNREACHABLE();
11829 }
11830}
11831
11832inline bool Value::CanBe(const Object& value) {
11833 ConstantInstr* constant = definition()->AsConstant();
11834 return (constant == nullptr) || constant->value().ptr() == value.ptr();
11835}
11836#undef DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS
11837#undef DECLARE_CUSTOM_SERIALIZATION
11838#undef DECLARE_EMPTY_SERIALIZATION
11839
11840} // namespace dart
11841
11842#endif // RUNTIME_VM_COMPILER_BACKEND_IL_H_
static bool compare(const SkBitmap &ref, const SkIRect &iref, const SkBitmap &test, const SkIRect &itest)
Definition BlurTest.cpp:100
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
SkPoint pos
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static sk_sp< Effect > Create()
static std::unique_ptr< SkEncoder > Make(SkWStream *dst, const SkPixmap *src, const SkYUVAPixmaps *srcYUVA, const SkColorSpace *srcYUVAColorSpace, const SkJpegEncoder::Options &options)
static void encode(uint8_t output[16], const uint32_t input[4])
Definition SkMD5.cpp:240
static SkSize operator*(SkISize u, SkScalar s)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
bool operator!=(const sk_sp< T > &a, const sk_sp< U > &b)
Definition SkRefCnt.h:355
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define N
Definition beziers.cpp:19
bool Equals(const SkPath &a, const SkPath &b)
#define UNIMPLEMENTED
static const char * begin(const StringSlice &s)
Definition editor.cpp:252
@ kNormal
Default priority level.
Definition embedder.h:260
bool operator==(const FlutterPoint &a, const FlutterPoint &b)
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
VkInstance instance
Definition main.cc:48
SkBitmap source
Definition examples.cpp:28
static bool b
struct MyStruct s
struct MyStruct a[10]
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
void PrintTo(FlValue *v, std::ostream *os)
Definition fl_test.cc:78
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define VALUE_DEFN(name, val)
Definition il.h:2453
#define DECLARE_INSTRUCTION_BACKEND()
Definition il.h:591
#define DECLARE_COMPARISON_INSTRUCTION(type)
Definition il.h:614
#define DECLARE_VISIT_INSTRUCTION(ShortName, Attrs)
Definition il.h:11773
#define DECLARE_EMPTY_SERIALIZATION(Instr, BaseClass)
Definition il.h:666
#define DECLARE_INSTRUCTION_TYPE_CHECK(Name, Type)
Definition il.h:1155
#define DECLARE_TAG(type, attrs)
Definition il.h:958
#define FOR_EACH_INSTRUCTION(M)
Definition il.h:405
#define DECLARE_ATTRIBUTE(Attribute)
Definition il.h:701
#define DECLARE_ABSTRACT_INSTRUCTION(type)
Definition il.h:602
#define DECLARE_ATTRIBUTES_NAMED(names, values)
Definition il.h:702
#define FIELD_LIST(F)
Definition il.h:2070
#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Instr, BaseClass, FieldList)
Definition il.h:649
#define VALUE_CASE(name, val)
#define PRINT_OPERANDS_TO_SUPPORT
Definition il.h:700
#define FOR_EACH_ASSERT_ASSIGNABLE_KIND(V)
Definition il.h:4358
#define INSTRUCTION_TYPE_CHECK(Name, Attrs)
Definition il.h:1162
#define DECLARE_INSTRUCTION_NO_BACKEND(type)
Definition il.h:584
#define FOR_EACH_ALIAS_IDENTITY_VALUE(V)
Definition il.h:2413
#define KIND_DEFN(name)
Definition il.h:4364
#define PRINT_TO_SUPPORT
Definition il.h:699
#define DECLARE_INSTRUCTION(type)
Definition il.h:597
#define DECLARE_ENUM(Arity, Mask, Name,...)
Definition il.h:11249
#define DECLARE_CUSTOM_SERIALIZATION(Instr)
Definition il.h:661
#define SIMD_OP_LIST(M, BINARY_OP)
Definition il.h:11195
#define DECLARE_EXTRA_SERIALIZATION
Definition il.h:670
#define FOR_EACH_ABSTRACT_INSTRUCTION(M)
Definition il.h:553
size_t length
Win32Message message
double y
double x
const GrXPFactory * Get(SkBlendMode mode)
bool IsSupported(const SkMaskFilter *maskfilter)
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
bool Contains(const Container &container, const Value &value)
Definition copy.py:1
bool IsSmi(int64_t v)
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
constexpr intptr_t kBitsPerWord
Definition globals.h:514
InnerPointerAccess
Definition il.h:6246
DART_EXPORT bool IsNull(Dart_Handle object)
int32_t classid_t
Definition globals.h:524
StoreBarrierType
Definition il.h:6252
@ kNoStoreBarrier
Definition il.h:6252
@ kEmitStoreBarrier
Definition il.h:6252
@ kIllegalCid
Definition class_id.h:214
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
constexpr intptr_t kBitsPerByte
Definition globals.h:463
GrowableArray< Value * > InputsArray
Definition il.h:895
unibrow::Mapping< unibrow::Ecma262Canonicalize > Canonicalize
intptr_t LocationCount(Representation rep)
Definition locations.h:75
uintptr_t uword
Definition globals.h:501
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
bool IsAllocatableInNewSpace(intptr_t size)
Definition spaces.h:57
const Register FPREG
const intptr_t cid
ZoneGrowableArray< MoveArgumentInstr * > MoveArgumentsArray
Definition il.h:896
typename unwrap_enum< std::remove_cv_t< T >, std::is_enum< T >::value >::type serializable_type_t
Definition il.h:633
const char *const function_name
void(* NativeFunction)(NativeArguments *arguments)
const Register SPREG
AlignmentType
Definition il.h:6720
@ kUnalignedAccess
Definition il.h:6721
@ kAlignedAccess
Definition il.h:6722
bool IsStringClassId(intptr_t index)
Definition class_id.h:350
Definition dom.py:1
call(args)
Definition dom.py:159
Definition __init__.py:1
SINT Vec< 2 *N, T > join(const Vec< N, T > &lo, const Vec< N, T > &hi)
Definition SkVx.h:242
Definition ref_ptr.h:256
dest
Definition zip.py:79
SkScalar w
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define T
Point offset

◆ DECLARE_ATTRIBUTE

#define DECLARE_ATTRIBUTE (   Attribute)

Definition at line 701 of file il.h.

◆ DECLARE_ATTRIBUTES_NAMED

#define DECLARE_ATTRIBUTES_NAMED (   names,
  values 
)

Definition at line 702 of file il.h.

◆ DECLARE_COMPARISON_INSTRUCTION

#define DECLARE_COMPARISON_INSTRUCTION (   type)
Value:

Definition at line 614 of file il.h.

◆ DECLARE_COMPARISON_METHODS

#define DECLARE_COMPARISON_METHODS
Value:
virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
const; \
virtual Condition EmitComparisonCode(FlowGraphCompiler* compiler, \
BranchLabels labels);

Definition at line 608 of file il.h.

◆ DECLARE_CUSTOM_SERIALIZATION

#define DECLARE_CUSTOM_SERIALIZATION (   Instr)
Value:
public: \
virtual void WriteTo(FlowGraphSerializer* s); \
explicit Instr(FlowGraphDeserializer* d);

Definition at line 661 of file il.h.

662 : \
663 virtual void WriteTo(FlowGraphSerializer* s); \
664 explicit Instr(FlowGraphDeserializer* d);

◆ DECLARE_EMPTY_SERIALIZATION

#define DECLARE_EMPTY_SERIALIZATION (   Instr,
  BaseClass 
)
Value:
public: \
explicit Instr(FlowGraphDeserializer* d) : BaseClass(d) {}

Definition at line 666 of file il.h.

667 : \
668 explicit Instr(FlowGraphDeserializer* d) : BaseClass(d) {}

◆ DECLARE_ENUM

#define DECLARE_ENUM (   Arity,
  Mask,
  Name,
  ... 
)    k##Name,

Definition at line 11249 of file il.h.

◆ DECLARE_EXTRA_SERIALIZATION

#define DECLARE_EXTRA_SERIALIZATION
Value:
public: \
virtual void WriteExtra(FlowGraphSerializer* s); \
virtual void ReadExtra(FlowGraphDeserializer* d);

Definition at line 670 of file il.h.

671 : \
672 virtual void WriteExtra(FlowGraphSerializer* s); \
673 virtual void ReadExtra(FlowGraphDeserializer* d);

◆ DECLARE_INSTRUCTION

#define DECLARE_INSTRUCTION (   type)
Value:
DECLARE_INSTRUCTION_BACKEND()

Definition at line 597 of file il.h.

◆ DECLARE_INSTRUCTION_BACKEND

#define DECLARE_INSTRUCTION_BACKEND ( )
Value:
virtual LocationSummary* MakeLocationSummary(Zone* zone, bool optimizing) \
const; \
virtual void EmitNativeCode(FlowGraphCompiler* compiler);

Definition at line 591 of file il.h.

◆ DECLARE_INSTRUCTION_FIELD

#define DECLARE_INSTRUCTION_FIELD (   type,
  name 
)    type name;

Definition at line 641 of file il.h.

◆ DECLARE_INSTRUCTION_NO_BACKEND

#define DECLARE_INSTRUCTION_NO_BACKEND (   type)
Value:
virtual Tag tag() const { \
return k##type; \
} \
virtual void Accept(InstructionVisitor* visitor); \
DEFINE_INSTRUCTION_TYPE_CHECK(type)

Definition at line 584 of file il.h.

585 { \
586 return k##type; \
587 } \
588 virtual void Accept(InstructionVisitor* visitor); \
589 DEFINE_INSTRUCTION_TYPE_CHECK(type)

◆ DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS

#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS (   Instr,
  BaseClass,
  FieldList 
)
Value:
public: \
virtual void WriteTo(FlowGraphSerializer* s) { \
BaseClass::WriteTo(s); \
} \
explicit Instr(FlowGraphDeserializer* d) \
: BaseClass(d) FieldList(READ_INSTRUCTION_FIELD) {} \
\
private: \
#define READ_INSTRUCTION_FIELD(type, name)
Definition il.h:638
#define DECLARE_INSTRUCTION_FIELD(type, name)
Definition il.h:641
#define WRITE_INSTRUCTION_FIELD(type, name)
Definition il.h:635

Definition at line 649 of file il.h.

650 : \
651 virtual void WriteTo(FlowGraphSerializer* s) { \
652 BaseClass::WriteTo(s); \
653 FieldList(WRITE_INSTRUCTION_FIELD) \
654 } \
655 explicit Instr(FlowGraphDeserializer* d) \
656 : BaseClass(d) FieldList(READ_INSTRUCTION_FIELD) {} \
657 \
658 private: \

◆ DECLARE_INSTRUCTION_TYPE_CHECK

#define DECLARE_INSTRUCTION_TYPE_CHECK (   Name,
  Type 
)
Value:
bool Is##Name() const { return (As##Name() != nullptr); } \
Type* As##Name() { \
auto const_this = static_cast<const Instruction*>(this); \
return const_cast<Type*>(const_this->As##Name()); \
} \
virtual const Type* As##Name() const { return nullptr; }
ImplicitString Name
Definition DMSrcSink.h:38

Definition at line 1155 of file il.h.

1156 { return (As##Name() != nullptr); } \
1157 Type* As##Name() { \
1158 auto const_this = static_cast<const Instruction*>(this); \
1159 return const_cast<Type*>(const_this->As##Name()); \
1160 } \
1161 virtual const Type* As##Name() const { return nullptr; }

◆ DECLARE_TAG

#define DECLARE_TAG (   type,
  attrs 
)    k##type,

Definition at line 958 of file il.h.

◆ DECLARE_VISIT_INSTRUCTION

#define DECLARE_VISIT_INSTRUCTION (   ShortName,
  Attrs 
)     virtual void Visit##ShortName(ShortName##Instr* instr) {}

Definition at line 11773 of file il.h.

11774 {}

◆ DEFINE_INSTRUCTION_TYPE_CHECK

#define DEFINE_INSTRUCTION_TYPE_CHECK (   type)
Value:
virtual type##Instr* As##type() { \
return this; \
} \
virtual const type##Instr* As##type() const { \
return this; \
} \
virtual const char* DebugName() const { \
return #type; \
}

Definition at line 572 of file il.h.

573 { \
574 return this; \
575 } \
576 virtual const type##Instr* As##type() const { \
577 return this; \
578 } \
579 virtual const char* DebugName() const { \
580 return #type; \
581 }

◆ DEFINE_UNIMPLEMENTED_INSTRUCTION

#define DEFINE_UNIMPLEMENTED_INSTRUCTION (   Name)
Value:
LocationSummary* Name::MakeLocationSummary(Zone* zone, bool opt) const { \
UNIMPLEMENTED(); \
return nullptr; \
} \
void Name::EmitNativeCode(FlowGraphCompiler* compiler) { \
UNIMPLEMENTED(); \
}

Definition at line 11813 of file il.h.

11814 { \
11815 UNIMPLEMENTED(); \
11816 return nullptr; \
11817 } \
11818 void Name::EmitNativeCode(FlowGraphCompiler* compiler) { \
11819 UNIMPLEMENTED(); \
11820 }

◆ FIELD_LIST [1/111]

#define FIELD_LIST (   F)    F(ZoneGrowableArray<PhiInstr*>*, phis_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [2/111]

#define FIELD_LIST (   F)    F(double, edge_weight_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [3/111]

#define FIELD_LIST (   F)    F(const compiler::ffi::CallbackMarshaller&, marshaller_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [4/111]

#define FIELD_LIST (   F)    F(const intptr_t, indirect_id_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [5/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, env_index_) \
F(const intptr_t, param_index_) \
F(const Representation, representation_)
#define F(x)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [6/111]

#define FIELD_LIST (   F)
Value:
F(const compiler::ffi::CallbackMarshaller&, marshaller_) \
F(const intptr_t, def_index_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [7/111]

#define FIELD_LIST (   F)    F(const intptr_t, offset_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [8/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, offset_) \
F(const Representation, representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [9/111]

#define FIELD_LIST (   F)
Value:
F(const classid_t, src_cid_) \
F(const classid_t, dest_cid_) \
F(intptr_t, element_size_) \
F(bool, unboxed_inputs_) \
F(const bool, can_overlap_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [10/111]

#define FIELD_LIST (   F)    F(const Code&, code_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [11/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, representation_) \
F(const bool, is_register_move_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [12/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const Representation, representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [13/111]

#define FIELD_LIST (   F)    F(const compiler::ffi::CallbackMarshaller&, marshaller_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [14/111]

#define FIELD_LIST (   F)    F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [15/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const intptr_t, catch_try_index_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [16/111]

#define FIELD_LIST (   F)    F(const char*, message_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [17/111]

#define FIELD_LIST (   F)
Value:
F(double, edge_weight_) \
/* Parallel move that will be used by linear scan register allocator to */ \
/* connect live ranges at the end of the block and resolve phis. */ \
F(ParallelMoveInstr*, parallel_move_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [18/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(Token::Kind, kind_) \
/* Set by optimizer. */ \
F(intptr_t, operation_cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [19/111]

#define FIELD_LIST (   F)    F(ComparisonInstr*, comparison_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [20/111]

#define FIELD_LIST (   F)    F(const ICData::DeoptReasonId, deopt_reason_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [21/111]

#define FIELD_LIST (   F)
Value:
F(CompileType*, constrained_type_) \
F(bool, inserted_by_constant_propagation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [22/111]

#define FIELD_LIST (   F)    F(Range*, constraint_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [23/111]

#define FIELD_LIST (   F)
Value:
F(const Object&, value_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [24/111]

#define FIELD_LIST (   F)    F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [25/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const String&, dst_name_) \
F(const Kind, kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [26/111]

#define FIELD_LIST (   F)    F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [27/111]

#define FIELD_LIST (   F)    F(const Function&, target_function_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [28/111]

#define FIELD_LIST (   F)
Value:
F(const ICData*, ic_data_) \
F(const String&, function_name_) \
/* Binary op, unary op, kGET or kILLEGAL. */ \
F(const Token::Kind, token_kind_) \
F(const Function&, interface_target_) \
F(const Function&, tearoff_interface_target_) \
/* Inferred result type. */ \
F(CompileType*, result_type_) \
F(bool, has_unique_selector_) \
F(Code::EntryKind, entry_kind_) \
F(bool, receiver_is_not_smi_) \
F(bool, is_call_on_this_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [29/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, checked_argument_count_) \
F(const AbstractType*, receivers_static_type_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [30/111]

#define FIELD_LIST (   F)
Value:
F(const CallTargets&, targets_) \
F(const bool, complete_) \
F(intptr_t, total_call_count_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [31/111]

#define FIELD_LIST (   F)
Value:
F(const Function&, interface_target_) \
F(const compiler::TableSelector*, selector_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [32/111]

#define FIELD_LIST (   F)
Value:
/* True if the comparison must check for double or Mint and */ \
/* use value comparison instead. */ \
F(bool, needs_number_check_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [33/111]

#define FIELD_LIST (   F)    F(const ZoneGrowableArray<intptr_t>&, cid_results_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [34/111]

#define FIELD_LIST (   F)
Value:
F(const uword, lower_) \
F(const uword, upper_) \
F(const Representation, value_representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [35/111]

#define FIELD_LIST (   F)
Value:
F(bool, null_aware_) \
F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [36/111]

#define FIELD_LIST (   F)    F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [37/111]

#define FIELD_LIST (   F)
Value:
F(ComparisonInstr*, comparison_) \
F(const intptr_t, if_true_) \
F(const intptr_t, if_false_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [38/111]

#define FIELD_LIST (   F)
Value:
F(const ICData*, ic_data_) \
F(const intptr_t, call_count_) \
F(const Function&, function_) \
F(const ICData::RebindRule, rebind_rule_) \
/* Known or inferred result type. */ \
F(CompileType*, result_type_) \
/* 'True' for recognized list constructors. */ \
F(bool, is_known_list_constructor_) \
F(Code::EntryKind, entry_kind_) \
F(AliasIdentity, identity_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [39/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, representation_) \
F(const Function&, function_) \
F(AliasIdentity, identity_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [40/111]

#define FIELD_LIST (   F)
Value:
F(const LocalVariable&, local_) \
F(bool, is_last_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [41/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, num_temps_) \
F(const bool, has_input_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [42/111]

#define FIELD_LIST (   F)    F(ConstantInstr*, null_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [43/111]

#define FIELD_LIST (   F)
Value:
F(const LocalVariable&, local_) \
F(bool, is_dead_) \
F(bool, is_last_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [44/111]

#define FIELD_LIST (   F)
Value:
F(const String&, native_name_) \
F(const Function&, function_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [45/111]

#define FIELD_LIST (   F)
Value:
F(const compiler::ffi::CallMarshaller&, marshaller_) \
F(bool, is_leaf_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [46/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const UntaggedPcDescriptors::Kind, stub_kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [47/111]

#define FIELD_LIST (   F)
Value:
F(const Slot&, slot_) \
F(StoreBarrierType, emit_store_barrier_) \
F(compiler::Assembler::MemoryOrder, memory_order_) \
F(const TokenPosition, token_pos_) \
/* Marks initializing stores. E.g. in the constructor. */ \
F(const bool, is_initialization_) \
F(InnerPointerAccess, stores_inner_pointer_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [48/111]

#define FIELD_LIST (   F)    F(const Field&, field_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [49/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const bool, throw_exception_on_initialization_) \
F(bool, calls_initializer_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [50/111]

#define FIELD_LIST (   F)    F(const Field&, field_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [51/111]

#define FIELD_LIST (   F)
Value:
F(const Field&, field_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [52/111]

#define FIELD_LIST (   F)
Value:
F(const bool, index_unboxed_) \
F(const intptr_t, index_scale_) \
F(const intptr_t, class_id_) \
F(const AlignmentType, alignment_) \
F(const TokenPosition, token_pos_) \
/* derived from call */ \
F(CompileType*, result_type_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [53/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, class_id_) \
F(const TokenPosition, token_pos_) \
F(const intptr_t, element_count_) \
F(Representation, representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [54/111]

#define FIELD_LIST (   F)    F(const intptr_t, cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [55/111]

#define FIELD_LIST (   F)    F(const Slot&, scan_flags_field_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [56/111]

#define FIELD_LIST (   F)
Value:
F(StoreBarrierType, emit_store_barrier_) \
F(const bool, index_unboxed_) \
F(const intptr_t, index_scale_) \
F(const intptr_t, class_id_) \
F(const AlignmentType, alignment_) \
F(const TokenPosition, token_pos_) \
F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [57/111]

#define FIELD_LIST (   F)
Value:
F(const Array&, coverage_array_) \
F(const intptr_t, coverage_index_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [58/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const AbstractType&, type_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [59/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(AliasIdentity, identity_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [60/111]

#define FIELD_LIST (   F)
Value:
F(const Class&, cls_) \
F(const bool, has_type_arguments_) \
F(const Slot*, type_arguments_slot_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [61/111]

#define FIELD_LIST (   F)
Value:
F(const bool, has_instantiator_type_args_) \
F(const bool, is_generic_) \
F(const bool, is_tear_off_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [62/111]

#define FIELD_LIST (   F)    F(const intptr_t, num_context_variables_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [63/111]

#define FIELD_LIST (   F)    F(const RecordShape, shape_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [64/111]

#define FIELD_LIST (   F)    F(const RecordShape, shape_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [65/111]

#define FIELD_LIST (   F)
Value:
F(const Class&, cls_) \
F(intptr_t, length_or_shape_) \
F(const ZoneGrowableArray<const Slot*>&, slots_) \
F(bool, registers_remapped_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [66/111]

#define FIELD_LIST (   F)    F(const classid_t, class_id_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [67/111]

#define FIELD_LIST (   F)    F(const intptr_t, offset_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [68/111]

#define FIELD_LIST (   F)    F(const intptr_t, index_scale_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [69/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, representation_) \
F(const bool, input_can_be_smi_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [70/111]

#define FIELD_LIST (   F)
Value:
F(const Slot&, slot_) \
F(InnerPointerAccess, loads_inner_pointer_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [71/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const AbstractType&, type_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [72/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const Class&, instantiator_class_) \
F(const Function&, function_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [73/111]

#define FIELD_LIST (   F)    F(const ZoneGrowableArray<const Slot*>&, context_slots_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [74/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const ZoneGrowableArray<const Slot*>&, context_slots_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [75/111]

#define FIELD_LIST (   F)    F(const Representation, from_representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [76/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, representation_) \
F(SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [77/111]

#define FIELD_LIST (   F)    F(bool, is_truncating_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [78/111]

#define FIELD_LIST (   F)
Value:
F(const bool, handle_surrogates_) \
F(const intptr_t, cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [79/111]

#define FIELD_LIST (   F)
Value:
F(const MethodRecognizer::Kind, op_kind_) \
F(const intptr_t, result_cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [80/111]

#define FIELD_LIST (   F)
Value:
F(const Token::Kind, op_kind_) \
F(const TokenPosition, token_pos_) \
F(const SpeculativeMode, speculative_mode_) \
F(const Representation, representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [81/111]

#define FIELD_LIST (   F)    F(const MethodRecognizer::Kind, op_kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [82/111]

#define FIELD_LIST (   F)    F(const bool, smi_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [83/111]

#define FIELD_LIST (   F)    F(const Token::Kind, op_kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [84/111]

#define FIELD_LIST (   F)    F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [85/111]

#define FIELD_LIST (   F)
Value:
F(const Token::Kind, op_kind_) \
F(bool, can_overflow_) \
F(bool, is_truncating_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [86/111]

#define FIELD_LIST (   F)    F(Range*, right_range_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [87/111]

#define FIELD_LIST (   F)    F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [88/111]

#define FIELD_LIST (   F)    F(Range*, shift_range_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [89/111]

#define FIELD_LIST (   F)
Value:
F(const Token::Kind, op_kind_) \
F(const SpeculativeMode, speculative_mode_) \
F(const Representation, representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [90/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const intptr_t, stack_depth_) \
F(const intptr_t, loop_depth_) \
F(const Kind, kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [91/111]

#define FIELD_LIST (   F)    F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [92/111]

#define FIELD_LIST (   F)    F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [93/111]

#define FIELD_LIST (   F)    F(const MethodRecognizer::Kind, recognized_kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [94/111]

#define FIELD_LIST (   F)    F(const SpeculativeMode, speculative_mode_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [95/111]

#define FIELD_LIST (   F)    F(const Token::Kind, op_kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [96/111]

#define FIELD_LIST (   F)
Value:
F(const MethodRecognizer::Kind, recognized_kind_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [97/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, index_) \
F(const Representation, definition_rep_) \
F(const intptr_t, definition_cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [98/111]

#define FIELD_LIST (   F)
Value:
F(const intptr_t, lane_) \
F(const Representation, definition_rep_) \
F(const intptr_t, definition_cid_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [99/111]

#define FIELD_LIST (   F)    F(const Representation, from_representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [100/111]

#define FIELD_LIST (   F)
Value:
F(const Cids&, cids_) \
F(bool, is_bit_test_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [101/111]

#define FIELD_LIST (   F)    F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [102/111]

#define FIELD_LIST (   F)
Value:
F(const TokenPosition, token_pos_) \
F(const String&, function_name_) \
F(const ExceptionType, exception_type_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [103/111]

#define FIELD_LIST (   F)    F(CidRangeValue, cids_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [104/111]

#define FIELD_LIST (   F)    F(bool, generalized_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [105/111]

#define FIELD_LIST (   F)    F(const Kind, kind_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [106/111]

#define FIELD_LIST (   F)    F(ComparisonInstr*, comparison_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [107/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, from_representation_) \
F(const Representation, to_representation_) \
F(bool, is_truncating_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [108/111]

#define FIELD_LIST (   F)
Value:
F(const Representation, from_representation_) \
F(const Representation, to_representation_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [109/111]

#define FIELD_LIST (   F)
Value:
F(const Kind, kind_) \
F(intptr_t, mask_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [110/111]

#define FIELD_LIST (   F)
Value:
F(const StubId, stub_id_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FIELD_LIST [111/111]

#define FIELD_LIST (   F)
Value:
F(StubId, stub_id_) \
F(const intptr_t, resume_deopt_id_) \
F(const TokenPosition, token_pos_)

Definition at line 2070 of file il.h.

◆ FOR_EACH_ABSTRACT_INSTRUCTION

#define FOR_EACH_ABSTRACT_INSTRUCTION (   M)
Value:
M(Allocation, _) \
M(ArrayAllocation, _) \
M(BinaryIntegerOp, _) \
M(BlockEntry, _) \
M(BoxInteger, _) \
M(CheckBoundBase, _) \
M(Comparison, _) \
M(InstanceCallBase, _) \
M(ReturnBase, _) \
M(ShiftIntegerOp, _) \
M(UnaryIntegerOp, _) \
M(UnboxInteger, _)
#define M(PROC, DITHER)

Definition at line 553 of file il.h.

◆ FOR_EACH_ALIAS_IDENTITY_VALUE

#define FOR_EACH_ALIAS_IDENTITY_VALUE (   V)
Value:
V(Unknown, 0) \
V(NotAliased, 1) \
V(Aliased, 2) \
V(AllocationSinkingCandidate, 3)
#define V(name)
Definition raw_object.h:124

Definition at line 2413 of file il.h.

◆ FOR_EACH_ASSERT_ASSIGNABLE_KIND

#define FOR_EACH_ASSERT_ASSIGNABLE_KIND (   V)
Value:
V(ParameterCheck) \
V(InsertedByFrontend) \
V(FromSource) \
V(Unknown)

Definition at line 4358 of file il.h.

◆ FOR_EACH_INSTRUCTION

#define FOR_EACH_INSTRUCTION (   M)

Definition at line 405 of file il.h.

◆ FORWARD_DECLARATION

#define FORWARD_DECLARATION (   type,
  attrs 
)    class type##Instr;

Definition at line 567 of file il.h.

◆ INSTRUCTION_TYPE_CHECK

#define INSTRUCTION_TYPE_CHECK (   Name,
  Attrs 
)     DECLARE_INSTRUCTION_TYPE_CHECK(Name, Name##Instr)

Definition at line 1162 of file il.h.

◆ KIND_DEFN

#define KIND_DEFN (   name)    k##name,

Definition at line 4364 of file il.h.

◆ PRINT_OPERANDS_TO_SUPPORT

#define PRINT_OPERANDS_TO_SUPPORT

Definition at line 700 of file il.h.

◆ PRINT_TO_SUPPORT

#define PRINT_TO_SUPPORT

Definition at line 699 of file il.h.

◆ READ_INSTRUCTION_FIELD

#define READ_INSTRUCTION_FIELD (   type,
  name 
)
Value:
, name(static_cast<std::remove_cv_t<type>>( \
d->Read<serializable_type_t<type>>()))

Definition at line 638 of file il.h.

◆ SIMD_BINARY_FLOAT_OP_LIST

#define SIMD_BINARY_FLOAT_OP_LIST (   M,
  OP,
  T 
)
Value:
SIMD_BINARY_OP(OP, T, Add) \
SIMD_BINARY_OP(OP, T, Sub) \
SIMD_BINARY_OP(OP, T, Mul) \
SIMD_BINARY_OP(OP, T, Div) \
SIMD_BINARY_OP(M, T, Min) \
SIMD_BINARY_OP(M, T, Max)
#define SIMD_BINARY_OP(M, T, Name)
Definition il.h:11156

Definition at line 11160 of file il.h.

◆ SIMD_BINARY_INTEGER_OP_LIST

#define SIMD_BINARY_INTEGER_OP_LIST (   M,
  OP,
  T 
)
Value:
SIMD_BINARY_OP(OP, T, Add) \
SIMD_BINARY_OP(OP, T, Sub) \
SIMD_BINARY_OP(OP, T, BitAnd) \
SIMD_BINARY_OP(OP, T, BitOr) \
SIMD_BINARY_OP(OP, T, BitXor)

Definition at line 11170 of file il.h.

◆ SIMD_BINARY_OP

#define SIMD_BINARY_OP (   M,
  T,
  Name 
)    M(2, _, T##Name, (T, T), T)

Definition at line 11156 of file il.h.

◆ SIMD_CONVERSION

#define SIMD_CONVERSION (   M,
  FromType,
  ToType 
)     M(1, _, FromType##To##ToType, (FromType), ToType)

Definition at line 11185 of file il.h.

◆ SIMD_OP_LIST

#define SIMD_OP_LIST (   M,
  BINARY_OP 
)

Definition at line 11195 of file il.h.

◆ SIMD_PER_COMPONENT_XYZW

#define SIMD_PER_COMPONENT_XYZW (   M,
  Arity,
  Name,
  Inputs,
  Output 
)
Value:
M(Arity, _, Name##X, Inputs, Output) \
M(Arity, _, Name##Y, Inputs, Output) \
M(Arity, _, Name##Z, Inputs, Output) \
M(Arity, _, Name##W, Inputs, Output)
Output
Definition SkSLBench.cpp:61
static const SkScalar Y
static const SkScalar X
#define W
Definition aaa.cpp:17
#define Z

Definition at line 11178 of file il.h.

◆ VALUE_CASE

#define VALUE_CASE (   name,
  val 
)
Value:
case k##name: \
return #name;

◆ VALUE_DEFN

#define VALUE_DEFN (   name,
  val 
)    k##name = val,

Definition at line 2453 of file il.h.

◆ WRITE_INSTRUCTION_FIELD

#define WRITE_INSTRUCTION_FIELD (   type,
  name 
)
Value:
s->Write<serializable_type_t<type>>( \
static_cast<serializable_type_t<type>>(name));

Definition at line 635 of file il.h.