3080 {
3082 __ Comment(
"slow path %s operation",
name());
3083 }
3084 const bool use_shared_stub =
3087 const bool live_fpu_registers =
3089 const intptr_t num_args =
3094 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3095 if (use_shared_stub) {
3096 if (!has_frame) {
3097#if !defined(TARGET_ARCH_IA32)
3098 ASSERT(
__ constant_pool_allowed());
3099 __ set_constant_pool_allowed(
false);
3100#endif
3101 __ EnterDartFrame(0);
3102 }
3104#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3105 if (!has_frame) {
3106
3107 RESTORES_LR_FROM_FRAME({});
3108 }
3109#endif
3110 } else {
3112
3115 __ CallRuntime(runtime_entry_, num_args);
3116 }
3118 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id,
3121 compiler->RecordSafepoint(locs, num_args);
3122 if (!FLAG_precompiled_mode ||
3126
3129 }
else if (
compiler->is_optimizing()) {
3132 } else {
3135
3136 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt,
3138 }
3139 }
3140 if (!use_shared_stub) {
3142 }
3143}
static CompilerState & Current()
static intptr_t ToDeoptAfter(intptr_t deopt_id)
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
intptr_t deopt_id() const
RegisterSet * live_registers()
intptr_t FpuRegisterCount() const
compiler::Label * entry_label()
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
void static bool EmittingComments()
static constexpr intptr_t kInvalidTryIndex