Flutter Engine
The Flutter Engine
stub_code_compiler_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6
7// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9
10#define SHOULD_NOT_INCLUDE_RUNTIME
11
13
14#if defined(TARGET_ARCH_IA32)
15
16#include "vm/class_id.h"
17#include "vm/code_entry_kind.h"
21#include "vm/constants.h"
23#include "vm/instructions.h"
25#include "vm/tags.h"
26
27#define __ assembler->
28
29namespace dart {
30namespace compiler {
31
32// Ensures that [EAX] is a new object, if not it will be added to the remembered
33// set via a leaf runtime call.
34//
35// WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
36// The caller should simply call LeaveFrame() and return.
38 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
39 // the remembered set and/or deferred marking worklist. This test assumes a
40 // Page's TLAB use is always ascending.
41 Label done;
42 __ AndImmediate(ECX, EAX, target::kPageMask);
43 __ LoadFromOffset(ECX, ECX, target::Page::original_top_offset());
44 __ CompareRegisters(EAX, ECX);
45 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
46
47 {
48 LeafRuntimeScope rt(assembler,
49 /*frame_size=*/2 * target::kWordSize,
50 /*preserve_registers=*/false);
51 __ movl(Address(ESP, 1 * target::kWordSize), THR);
52 __ movl(Address(ESP, 0 * target::kWordSize), EAX);
53 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
54 }
55
56 __ Bind(&done);
57}
58
59// Input parameters:
60// ESP : points to return address.
61// ESP + 4 : address of last argument in argument array.
62// ESP + 4*EDX : address of first argument in argument array.
63// ESP + 4*EDX + 4 : address of return value.
64// ECX : address of the runtime function to call.
65// EDX : number of arguments to the call.
66// Must preserve callee saved registers EDI and EBX.
67void StubCodeCompiler::GenerateCallToRuntimeStub() {
68 const intptr_t thread_offset = target::NativeArguments::thread_offset();
69 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
70 const intptr_t argv_offset = target::NativeArguments::argv_offset();
71 const intptr_t retval_offset = target::NativeArguments::retval_offset();
72
73 __ movl(CODE_REG,
75 __ EnterStubFrame();
76
77 // Save exit frame information to enable stack walking as we are about
78 // to transition to Dart VM C++ code.
80
81 // Mark that the thread exited generated code through a runtime call.
84
85#if defined(DEBUG)
86 {
87 Label ok;
88 // Check that we are always entering from Dart code.
89 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
91 __ Stop("Not coming from Dart code.");
92 __ Bind(&ok);
93 }
94#endif
95
96 // Mark that the thread is executing VM code.
98
99 // Reserve space for arguments and align frame before entering C++ world.
100 __ AddImmediate(
101 ESP,
102 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize())));
104 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
105 }
106
107 // Pass NativeArguments structure by value and call runtime.
108 __ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
109 __ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
110 // Compute argv.
111 __ leal(EAX,
112 Address(EBP, EDX, TIMES_4,
113 target::frame_layout.param_end_from_fp * target::kWordSize));
114 __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
115 __ addl(EAX,
116 Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
117 __ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
118 __ call(ECX);
119
120 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
121
122 // Mark that the thread has not exited generated Dart code.
124 Immediate(0));
125
126 // Reset exit frame information in Isolate's mutator thread structure.
128 Immediate(0));
129
130 __ LeaveFrame();
131
132 // The following return can jump to a lazy-deopt stub, which assumes EAX
133 // contains a return value and will save it in a GC-visible way. We therefore
134 // have to ensure EAX does not contain any garbage value left from the C
135 // function we called (which has return type "void").
136 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
137 __ xorl(EAX, EAX);
138 __ ret();
139}
140
141void StubCodeCompiler::GenerateEnterSafepointStub() {
142 __ pushal();
143 __ subl(SPREG, Immediate(8));
144 __ movsd(Address(SPREG, 0), XMM0);
145
146 __ EnterFrame(0);
147 __ ReserveAlignedFrameSpace(0);
148 __ movl(EAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
149 __ call(EAX);
150 __ LeaveFrame();
151
152 __ movsd(XMM0, Address(SPREG, 0));
153 __ addl(SPREG, Immediate(8));
154 __ popal();
155 __ ret();
156}
157
158static void GenerateExitSafepointStubCommon(Assembler* assembler,
159 uword runtime_entry_offset) {
160 __ pushal();
161 __ subl(SPREG, Immediate(8));
162 __ movsd(Address(SPREG, 0), XMM0);
163
164 __ EnterFrame(0);
165 __ ReserveAlignedFrameSpace(0);
166
167 // Set the execution state to VM while waiting for the safepoint to end.
168 // This isn't strictly necessary but enables tests to check that we're not
169 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
172
173 __ movl(EAX, Address(THR, runtime_entry_offset));
174 __ call(EAX);
175 __ LeaveFrame();
176
177 __ movsd(XMM0, Address(SPREG, 0));
178 __ addl(SPREG, Immediate(8));
179 __ popal();
180 __ ret();
181}
182
183void StubCodeCompiler::GenerateExitSafepointStub() {
184 GenerateExitSafepointStubCommon(
185 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
186}
187
188void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
189 GenerateExitSafepointStubCommon(
190 assembler,
191 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
192}
193
194void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
196 Register tmp) {
197 // Only used in AOT.
198 __ Breakpoint();
199}
200
201// Calls a native function inside a safepoint.
202//
203// On entry:
204// Stack: set up for native call
205// EAX: target to call
206//
207// On exit:
208// Stack: preserved
209// EBX: clobbered (even though it's normally callee-saved)
210void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
211 __ popl(EBX);
212
213 __ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
214 __ TransitionGeneratedToNative(EAX, FPREG, ECX /*volatile*/,
215 /*enter_safepoint=*/true);
216 __ call(EAX);
217 __ TransitionNativeToGenerated(ECX /*volatile*/, /*leave_safepoint=*/true);
218
219 __ jmp(EBX);
220}
221
222void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
223 Label ret_4;
224
225 // EAX is volatile and doesn't hold any arguments.
227
228 Label body, load_tramp_addr;
229 const intptr_t kCallLength = 5;
231 ++i) {
232 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
233 // look up the current PC, then jump to the shared section. There's no easy
234 // way to get the PC in ia32 so we have to do a call, grab the return adress
235 // from the stack, then return here (mismatched call/ret causes problems),
236 // then jump to the shared section.
237 const intptr_t size_before = __ CodeSize();
238 __ call(&load_tramp_addr);
239 const intptr_t size_after = __ CodeSize();
240 ASSERT_EQUAL(size_after - size_before, kCallLength);
241 __ jmp(&body);
242 }
243
244 ASSERT_EQUAL(__ CodeSize(),
245 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
247
248 const intptr_t shared_stub_start = __ CodeSize();
249
250 __ Bind(&load_tramp_addr);
251 // Load the return adress into EAX, and subtract the size of the call
252 // instruction. This is our original trampoline address.
253 __ movl(EAX, Address(SPREG, 0));
254 __ subl(EAX, Immediate(kCallLength));
255 __ ret();
256
257 __ Bind(&body);
258
259 // Save THR and EBX which are callee-saved.
260 __ pushl(THR);
261 __ pushl(EBX);
262
263 // THR & return address
264 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
265
266 // Load the thread, verify the callback ID and exit the safepoint.
267 //
268 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to safe
269 // code size on this shared stub.
270 {
271 __ EnterFrame(0);
272 // entry_point, trampoline_type, &trampoline_type, &entry_point, trampoline
273 // ^------ GetFfiCallbackMetadata args ------^
274 __ ReserveAlignedFrameSpace(5 * target::kWordSize);
275
276 // Trampoline arg.
277 __ movl(Address(SPREG, 0 * target::kWordSize), EAX);
278
279 // Pointer to trampoline type stack slot.
280 __ movl(EAX, SPREG);
281 __ addl(EAX, Immediate(3 * target::kWordSize));
282 __ movl(Address(SPREG, 2 * target::kWordSize), EAX);
283
284 // Pointer to entry point stack slot.
285 __ addl(EAX, Immediate(target::kWordSize));
286 __ movl(Address(SPREG, 1 * target::kWordSize), EAX);
287
288 __ movl(EAX,
289 Immediate(reinterpret_cast<int64_t>(DLRT_GetFfiCallbackMetadata)));
290 __ call(EAX);
291 __ movl(THR, EAX);
292
293 // Save the trampoline type in EBX, and the entry point in ECX.
294 __ movl(EBX, Address(SPREG, 3 * target::kWordSize));
295 __ movl(ECX, Address(SPREG, 4 * target::kWordSize));
296
297 __ LeaveFrame();
298
299 // Save the trampoline type to the stack, because we'll need it after the
300 // call to decide whether to ret() or ret(4).
301 __ pushl(EBX);
302 }
303
306
307 Label async_callback;
308 Label done;
309
310 // If GetFfiCallbackMetadata returned a null thread, it means that the async
311 // callback was invoked after it was deleted. In this case, do nothing.
312 __ cmpl(THR, Immediate(0));
314
315 // Check the trampoline type to see how the callback should be invoked.
316 __ cmpl(EBX, Immediate(static_cast<uword>(
319
320 // Sync callback. The entry point contains the target function, so just call
321 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
322 // re-enter it afterwards.
323
324 // On entry to the function, there will be two extra slots on the stack:
325 // the saved THR and the return address. The target will know to skip them.
326 __ call(ECX);
327
328 // Takes care to not clobber *any* registers (besides scratch).
329 __ EnterFullSafepoint(/*scratch=*/ECX);
330
331 // Pop the trampoline type into ECX.
332 __ popl(ECX);
333
334 // Restore callee-saved registers.
335 __ popl(EBX);
336 __ popl(THR);
337
338 __ cmpl(ECX, Immediate(static_cast<uword>(
341 __ ret();
342
343 __ Bind(&ret_4);
344 __ ret(Immediate(4));
345
347
348 // Async callback. The entrypoint marshals the arguments into a message and
349 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
350 // entered a temporary isolate, so exit it afterwards.
351
352 // On entry to the function, there will be two extra slots on the stack:
353 // the saved THR and the return address. The target will know to skip them.
354 __ call(ECX);
355
356 // Exit the temporary isolate.
357 {
358 __ EnterFrame(0);
359 __ ReserveAlignedFrameSpace(0);
360
361 __ movl(EAX,
362 Immediate(reinterpret_cast<int64_t>(DLRT_ExitTemporaryIsolate)));
363 __ CallCFunction(EAX);
364
365 __ LeaveFrame();
366 }
367
368 __ Bind(&done);
369
370 // Pop the trampoline type into ECX.
371 __ popl(ECX);
372
373 // Restore callee-saved registers.
374 __ popl(EBX);
375 __ popl(THR);
376
377 // Stack delta is always 0 for async callbacks.
378 __ ret();
379
380 // 'kNativeCallbackSharedStubSize' is an upper bound because the exact
381 // instruction size can vary slightly based on OS calling conventions.
382 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
383 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
385
386#if defined(DEBUG)
387 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
388 __ Breakpoint();
389 }
390#endif
391}
392
393void StubCodeCompiler::GenerateSharedStubGeneric(
394 bool save_fpu_registers,
395 intptr_t self_code_stub_offset_from_thread,
396 bool allow_return,
397 std::function<void()> perform_runtime_call) {
398 // Only used in AOT.
399 __ Breakpoint();
400}
401
402void StubCodeCompiler::GenerateSharedStub(
403 bool save_fpu_registers,
404 const RuntimeEntry* target,
405 intptr_t self_code_stub_offset_from_thread,
406 bool allow_return,
407 bool store_runtime_result_in_result_register) {
408 // Only used in AOT.
409 __ Breakpoint();
410}
411
412void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
413 // Only used in AOT.
414 __ Breakpoint();
415}
416
417void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
418 // Only used in AOT.
419 __ Breakpoint();
420}
421
422void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
423 // Only used in AOT.
424 __ Breakpoint();
425}
426
427// Input parameters:
428// ESP : points to return address.
429// ESP + 4 : address of return value.
430// EAX : address of first argument in argument array.
431// ECX : address of the native function to call.
432// EDX : argc_tag including number of arguments and function kind.
433static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
434 Address wrapper_address) {
435 const intptr_t native_args_struct_offset =
437 const intptr_t thread_offset =
438 target::NativeArguments::thread_offset() + native_args_struct_offset;
439 const intptr_t argc_tag_offset =
440 target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
441 const intptr_t argv_offset =
442 target::NativeArguments::argv_offset() + native_args_struct_offset;
443 const intptr_t retval_offset =
444 target::NativeArguments::retval_offset() + native_args_struct_offset;
445
446 __ EnterStubFrame();
447
448 // Save exit frame information to enable stack walking as we are about
449 // to transition to dart VM code.
451
452 // Mark that the thread exited generated code through a runtime call.
455
456#if defined(DEBUG)
457 {
458 Label ok;
459 // Check that we are always entering from Dart code.
460 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
462 __ Stop("Not coming from Dart code.");
463 __ Bind(&ok);
464 }
465#endif
466
467 // Mark that the thread is executing native code.
469
470 // Reserve space for the native arguments structure, the outgoing parameters
471 // (pointer to the native arguments structure, the C function entry point)
472 // and align frame before entering the C++ world.
473 __ AddImmediate(
474 ESP,
475 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize()) -
476 (2 * target::kWordSize)));
478 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
479 }
480
481 // Pass NativeArguments structure by value and call native function.
482 // Set thread in NativeArgs.
483 __ movl(Address(ESP, thread_offset), THR);
484 // Set argc in NativeArguments.
485 __ movl(Address(ESP, argc_tag_offset), EDX);
486 // Set argv in NativeArguments.
487 __ movl(Address(ESP, argv_offset), EAX);
488 // Compute return value addr.
489 __ leal(EAX, Address(EBP, (target::frame_layout.param_end_from_fp + 1) *
491 // Set retval in NativeArguments.
492 __ movl(Address(ESP, retval_offset), EAX);
493 // Pointer to the NativeArguments.
494 __ leal(EAX, Address(ESP, 2 * target::kWordSize));
495 // Pass the pointer to the NativeArguments.
496 __ movl(Address(ESP, 0), EAX);
497
498 __ movl(Address(ESP, target::kWordSize), ECX); // Function to call.
499 __ call(wrapper_address);
500
501 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
502
503 // Mark that the thread has not exited generated Dart code.
505 Immediate(0));
506
507 // Reset exit frame information in Isolate's mutator thread structure.
509 Immediate(0));
510
511 __ LeaveFrame();
512 __ ret();
513}
514
515void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
516 GenerateCallNativeWithWrapperStub(
517 assembler,
518 Address(THR,
520}
521
522void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
523 GenerateCallNativeWithWrapperStub(
524 assembler,
525 Address(THR,
527}
528
529// Input parameters:
530// ESP : points to return address.
531// ESP + 4 : address of return value.
532// EAX : address of first argument in argument array.
533// ECX : address of the native function to call.
534// EDX : argc_tag including number of arguments and function kind.
535void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
536 GenerateCallNativeWithWrapperStub(
537 assembler,
538 Address(THR,
540}
541
542// Input parameters:
543// ARGS_DESC_REG: arguments descriptor array.
544void StubCodeCompiler::GenerateCallStaticFunctionStub() {
545 __ EnterStubFrame();
546 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
547 __ pushl(Immediate(0)); // Setup space on stack for return value.
548 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
549 __ popl(EAX); // Get Code object result.
550 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
551 // Remove the stub frame as we are about to jump to the dart function.
552 __ LeaveFrame();
553
554 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
555}
556
557// Called from a static call only when an invalid code has been entered
558// (invalid because its function was optimized or deoptimized).
559// ARGS_DESC_REG: arguments descriptor array.
560void StubCodeCompiler::GenerateFixCallersTargetStub() {
561 Label monomorphic;
562 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
563
564 // This was a static call.
565 __ EnterStubFrame();
566 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
567 __ pushl(Immediate(0)); // Setup space on stack for return value.
568 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
569 __ popl(EAX); // Get Code object.
570 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
571 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
572 __ LeaveFrame();
573 __ jmp(EAX);
574 __ int3();
575
576 __ Bind(&monomorphic);
577 // This was a switchable call.
578 __ EnterStubFrame();
579 __ pushl(Immediate(0)); // Result slot.
580 __ pushl(EBX); // Preserve receiver.
581 __ pushl(ECX); // Old cache value (also 2nd return value).
582 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
583 __ popl(ECX); // Get target cache object.
584 __ popl(EBX); // Restore receiver.
585 __ popl(CODE_REG); // Get target Code object.
588 __ LeaveFrame();
589 __ jmp(EAX);
590 __ int3();
591}
592
593// Called from object allocate instruction when the allocation stub has been
594// disabled.
595void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
596 __ EnterStubFrame();
597 __ pushl(Immediate(0)); // Setup space on stack for return value.
598 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
599 __ popl(EAX); // Get Code object.
600 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
601 __ LeaveFrame();
602 __ jmp(EAX);
603 __ int3();
604}
605
606// Called from object allocate instruction when the allocation stub for a
607// generic class has been disabled.
608void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
609 __ EnterStubFrame();
610 // Preserve type arguments register.
612 __ pushl(Immediate(0)); // Setup space on stack for return value.
613 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
614 __ popl(EAX); // Get Code object.
615 // Restore type arguments register.
617 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
618 __ LeaveFrame();
619 __ jmp(EAX);
620 __ int3();
621}
622
623// Input parameters:
624// EDX: smi-tagged argument count, may be zero.
625// EBP[target::frame_layout.param_end_from_fp + 1]: last argument.
626// Uses EAX, EBX, ECX, EDX, EDI.
627static void PushArrayOfArguments(Assembler* assembler) {
628 // Allocate array to store arguments of caller.
629 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
630 __ movl(ECX, raw_null); // Null element type for raw Array.
632 __ SmiUntag(EDX);
633 // EAX: newly allocated array.
634 // EDX: length of the array (was preserved by the stub).
635 __ pushl(EAX); // Array is in EAX and on top of stack.
636 __ leal(EBX,
637 Address(EBP, EDX, TIMES_4,
638 target::frame_layout.param_end_from_fp * target::kWordSize));
639 __ leal(ECX, FieldAddress(EAX, target::Array::data_offset()));
640 // EBX: address of first argument on stack.
641 // ECX: address of first argument in array.
642 Label loop, loop_condition;
643 __ jmp(&loop_condition, Assembler::kNearJump);
644 __ Bind(&loop);
645 __ movl(EDI, Address(EBX, 0));
646 // Generational barrier is needed, array is not necessarily in new space.
647 __ StoreIntoObject(EAX, Address(ECX, 0), EDI);
648 __ AddImmediate(ECX, Immediate(target::kWordSize));
649 __ AddImmediate(EBX, Immediate(-target::kWordSize));
650 __ Bind(&loop_condition);
651 __ decl(EDX);
653}
654
655// Used by eager and lazy deoptimization. Preserve result in EAX if necessary.
656// This stub translates optimized frame into unoptimized frame. The optimized
657// frame can contain values in registers and on stack, the unoptimized
658// frame contains all values on stack.
659// Deoptimization occurs in following steps:
660// - Push all registers that can contain values.
661// - Call C routine to copy the stack and saved registers into temporary buffer.
662// - Adjust caller's frame to correct unoptimized frame size.
663// - Fill the unoptimized frame.
664// - Materialize objects that require allocation (e.g. Double instances).
665// GC can occur only after frame is fully rewritten.
666// Stack after EnterDartFrame(0) below:
667// +------------------+
668// | PC marker | <- TOS
669// +------------------+
670// | Saved FP | <- FP of stub
671// +------------------+
672// | return-address | (deoptimization point)
673// +------------------+
674// | ... | <- SP of optimized frame
675//
676// Parts of the code cannot GC, part of the code can GC.
677static void GenerateDeoptimizationSequence(Assembler* assembler,
678 DeoptStubKind kind) {
679 // Leaf runtime function DeoptimizeCopyFrame expects a Dart frame.
680 __ EnterDartFrame(0);
681 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
682 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
683 const intptr_t saved_result_slot_from_fp =
686 const intptr_t saved_exception_slot_from_fp =
689 const intptr_t saved_stacktrace_slot_from_fp =
692 // Result in EAX is preserved as part of pushing all registers below.
693
694 // Push registers in their enumeration order: lowest register number at
695 // lowest address.
696 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
697 if (i == CODE_REG) {
698 // Save the original value of CODE_REG pushed before invoking this stub
699 // instead of the value used to call this stub.
700 __ pushl(Address(EBP, 2 * target::kWordSize));
701 } else {
702 __ pushl(static_cast<Register>(i));
703 }
704 }
705 __ subl(ESP, Immediate(kNumberOfXmmRegisters * kFpuRegisterSize));
706 intptr_t offset = 0;
707 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
708 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
709 __ movups(Address(ESP, offset), xmm_reg);
711 }
712
713 {
714 __ movl(ECX, ESP); // Preserve saved registers block.
715 LeafRuntimeScope rt(assembler,
716 /*frame_size=*/2 * target::kWordSize,
717 /*preserve_registers=*/false);
718 bool is_lazy =
719 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
720 __ movl(Address(ESP, 0 * target::kWordSize),
721 ECX); // Start of register block.
722 __ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
723 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
724 // Result (EAX) is stack-size (FP - SP) in bytes.
725 }
726
727 if (kind == kLazyDeoptFromReturn) {
728 // Restore result into EBX temporarily.
729 __ movl(EBX, Address(EBP, saved_result_slot_from_fp * target::kWordSize));
730 } else if (kind == kLazyDeoptFromThrow) {
731 // Restore result into EBX temporarily.
732 __ movl(EBX,
733 Address(EBP, saved_exception_slot_from_fp * target::kWordSize));
734 __ movl(ECX,
735 Address(EBP, saved_stacktrace_slot_from_fp * target::kWordSize));
736 }
737
738 __ LeaveDartFrame();
739 __ popl(EDX); // Preserve return address.
740 __ movl(ESP, EBP); // Discard optimized frame.
741 __ subl(ESP, EAX); // Reserve space for deoptimized frame.
742 __ pushl(EDX); // Restore return address.
743
744 // Leaf runtime function DeoptimizeFillFrame expects a Dart frame.
745 __ EnterDartFrame(0);
746 if (kind == kLazyDeoptFromReturn) {
747 __ pushl(EBX); // Preserve result as first local.
748 } else if (kind == kLazyDeoptFromThrow) {
749 __ pushl(EBX); // Preserve exception as first local.
750 __ pushl(ECX); // Preserve stacktrace as first local.
751 }
752 {
753 LeafRuntimeScope rt(assembler,
754 /*frame_size=*/1 * target::kWordSize,
755 /*preserve_registers=*/false);
756 __ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
757 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
758 }
759 if (kind == kLazyDeoptFromReturn) {
760 // Restore result into EBX.
761 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
763 } else if (kind == kLazyDeoptFromThrow) {
764 // Restore result into EBX.
765 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
767 __ movl(ECX, Address(EBP, (target::frame_layout.first_local_from_fp - 1) *
769 }
770 // Code above cannot cause GC.
771 __ LeaveDartFrame();
772
773 // Frame is fully rewritten at this point and it is safe to perform a GC.
774 // Materialize any objects that were deferred by FillFrame because they
775 // require allocation.
776 __ EnterStubFrame();
777 if (kind == kLazyDeoptFromReturn) {
778 __ pushl(EBX); // Preserve result, it will be GC-d here.
779 } else if (kind == kLazyDeoptFromThrow) {
780 // Preserve CODE_REG for one more runtime call.
781 __ pushl(CODE_REG);
782 __ pushl(EBX); // Preserve exception, it will be GC-d here.
783 __ pushl(ECX); // Preserve stacktrace, it will be GC-d here.
784 }
785 __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
786 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
787 // Result tells stub how many bytes to remove from the expression stack
788 // of the bottom-most frame. They were used as materialization arguments.
789 __ popl(EBX);
790 __ SmiUntag(EBX);
791 if (kind == kLazyDeoptFromReturn) {
792 __ popl(EAX); // Restore result.
793 } else if (kind == kLazyDeoptFromThrow) {
794 __ popl(EDX); // Restore stacktrace.
795 __ popl(EAX); // Restore exception.
796 __ popl(CODE_REG);
797 }
798 __ LeaveStubFrame();
799
800 __ popl(ECX); // Pop return address.
801 __ addl(ESP, EBX); // Remove materialization arguments.
802 __ pushl(ECX); // Push return address.
803 // The caller is responsible for emitting the return instruction.
804
805 if (kind == kLazyDeoptFromThrow) {
806 // Unoptimized frame is now ready to accept the exception. Rethrow it to
807 // find the right handler. Ask rethrow machinery to bypass debugger it
808 // was already notified about this exception.
809 __ EnterStubFrame();
810 __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
811 __ pushl(EAX); // Exception
812 __ pushl(EDX); // Stacktrace
813 __ pushl(Immediate(target::ToRawSmi(1))); // Bypass debugger.
814 __ CallRuntime(kReThrowRuntimeEntry, 3);
815 __ LeaveStubFrame();
816 }
817}
818
819// EAX: result, must be preserved
820void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
821 // Return address for "call" to deopt stub.
822 __ pushl(Immediate(kZapReturnAddress));
823 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
824 __ ret();
825}
826
827// EAX: exception, must be preserved
828// EDX: stacktrace, must be preserved
829void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
830 // Return address for "call" to deopt stub.
831 __ pushl(Immediate(kZapReturnAddress));
832 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
833 __ ret();
834}
835
836void StubCodeCompiler::GenerateDeoptimizeStub() {
837 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
838 __ ret();
839}
840
841static void GenerateNoSuchMethodDispatcherCode(Assembler* assembler) {
842 __ EnterStubFrame();
843 __ movl(EDX, FieldAddress(
845
846 // Load the receiver.
847 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
848 __ movl(EAX,
849 Address(EBP, EDI, TIMES_HALF_WORD_SIZE,
850 target::frame_layout.param_end_from_fp * target::kWordSize));
851 __ pushl(Immediate(0)); // Setup space on stack for result.
852 __ pushl(EAX); // Receiver.
853 __ pushl(ECX); // ICData/MegamorphicCache.
854 __ pushl(EDX); // Arguments descriptor array.
855
856 // Adjust arguments count.
857 __ cmpl(
859 Immediate(0));
860 __ movl(EDX, EDI);
861 Label args_count_ok;
862 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
863 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
864 __ Bind(&args_count_ok);
865
866 // EDX: Smi-tagged arguments array length.
867 PushArrayOfArguments(assembler);
868 const intptr_t kNumArgs = 4;
869 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
870 __ Drop(4);
871 __ popl(EAX); // Return value.
872 __ LeaveFrame();
873 __ ret();
874}
875
876void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
877 GenerateNoSuchMethodDispatcherCode(assembler);
878}
879
880// Called for inline allocation of arrays.
881// Input registers (preserved):
882// AllocateArrayABI::kLengthReg: array length as Smi.
883// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
884// Output registers:
885// AllocateArrayABI::kResultReg: newly allocated array.
886// Clobbered:
887// EBX, EDI
888void StubCodeCompiler::GenerateAllocateArrayStub() {
889 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
890 Label slow_case;
891 // Compute the size to be allocated, it is based on the array length
892 // and is computed as:
893 // RoundedAllocationSize(
894 // (array_length * kwordSize) + target::Array::header_size()).
895 // Assert that length is a Smi.
897 __ j(NOT_ZERO, &slow_case);
898
899 // Check for maximum allowed length.
900 const Immediate& max_len =
902 __ cmpl(AllocateArrayABI::kLengthReg, max_len);
903 __ j(ABOVE, &slow_case);
904
905 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case,
907
908 const intptr_t fixed_size_plus_alignment_padding =
911 // AllocateArrayABI::kLengthReg is Smi.
913 fixed_size_plus_alignment_padding));
914 ASSERT(kSmiTagShift == 1);
916
917 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
918 // AllocateArrayABI::kLengthReg: array length as Smi.
919 // EBX: allocation size.
920
921 const intptr_t cid = kArrayCid;
923 Address(THR, target::Thread::top_offset()));
925 __ j(CARRY, &slow_case);
926
927 // Check if the allocation fits into the remaining space.
928 // AllocateArrayABI::kResultReg: potential new object start.
929 // EBX: potential next object start.
930 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
931 // AllocateArrayABI::kLengthReg: array length as Smi).
932 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
933 __ j(ABOVE_EQUAL, &slow_case);
934 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
935
936 // Successfully allocated the object(s), now update top to point to
937 // next object start and initialize the object.
938 __ movl(Address(THR, target::Thread::top_offset()), EBX);
941
942 // Initialize the tags.
943 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
944 // EBX: allocation size.
945 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
946 // AllocateArrayABI::kLengthReg: array length as Smi.
947 {
948 Label size_tag_overflow, done;
949 __ movl(EDI, EBX);
951 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
955
956 __ Bind(&size_tag_overflow);
957 __ movl(EDI, Immediate(0));
958 __ Bind(&done);
959
960 // Get the class index and insert it into the tags.
962 __ orl(EDI, Immediate(tags));
963 __ movl(FieldAddress(AllocateArrayABI::kResultReg,
965 EDI); // Tags.
966 }
967 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
968 // EBX: allocation size.
969 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
970 // AllocateArrayABI::kLengthReg: Array length as Smi (preserved).
971 // Store the type argument field.
972 // No generational barrier needed, since we store into a new object.
973 __ StoreIntoObjectNoBarrier(
975 FieldAddress(AllocateArrayABI::kResultReg,
978
979 // Set the length field.
980 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
981 FieldAddress(AllocateArrayABI::kResultReg,
984
985 // Initialize all array elements to raw_null.
986 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
987 // EBX: allocation size.
988 // EDI: iterator which initially points to the start of the variable
989 // data area to be initialized.
990 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
991 // AllocateArrayABI::kLengthReg: array length as Smi.
992 __ leal(EBX, FieldAddress(AllocateArrayABI::kResultReg, EBX, TIMES_1, 0));
993 __ leal(EDI, FieldAddress(AllocateArrayABI::kResultReg,
995 Label loop;
996 __ Bind(&loop);
997 for (intptr_t offset = 0; offset < target::kObjectAlignment;
999 // No generational barrier needed, since we are storing null.
1000 __ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1001 Address(EDI, offset), NullObject());
1002 }
1003 // Safe to only check every kObjectAlignment bytes instead of each word.
1005 __ addl(EDI, Immediate(target::kObjectAlignment));
1006 __ cmpl(EDI, EBX);
1007 __ j(UNSIGNED_LESS, &loop);
1008 __ WriteAllocationCanary(EBX); // Fix overshoot.
1009 __ ret();
1010
1011 // Unable to allocate the array using the fast inline code, just call
1012 // into the runtime.
1013 __ Bind(&slow_case);
1014 }
1015 // Create a stub frame as we are pushing some objects on the stack before
1016 // calling into the runtime.
1017 __ EnterStubFrame();
1018 __ pushl(Immediate(0)); // Setup space on stack for return value.
1019 __ pushl(AllocateArrayABI::kLengthReg); // Array length as Smi.
1020 __ pushl(AllocateArrayABI::kTypeArgumentsReg); // Type arguments.
1021 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1022
1023 // Write-barrier elimination might be enabled for this array (depending on the
1024 // array length). To be sure we will check if the allocated object is in old
1025 // space and if so call a leaf runtime to add it to the remembered set.
1028
1029 __ popl(AllocateArrayABI::kTypeArgumentsReg); // Pop type arguments.
1030 __ popl(AllocateArrayABI::kLengthReg); // Pop array length argument.
1031 __ popl(AllocateArrayABI::kResultReg); // Pop return value from return slot.
1032 __ LeaveFrame();
1033 __ ret();
1034}
1035
1036// Called when invoking dart code from C++ (VM code).
1037// Input parameters:
1038// ESP : points to return address.
1039// ESP + 4 : code object of the dart function to call.
1040// ESP + 8 : arguments descriptor array.
1041// ESP + 12 : arguments array.
1042// ESP + 16 : current thread.
1043// Uses EAX, EDX, ECX, EDI as temporary registers.
1044void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1045 const intptr_t kTargetCodeOffset = 2 * target::kWordSize;
1046 const intptr_t kArgumentsDescOffset = 3 * target::kWordSize;
1047 const intptr_t kArgumentsOffset = 4 * target::kWordSize;
1048 const intptr_t kThreadOffset = 5 * target::kWordSize;
1049 __ EnterFrame(0);
1050
1051 // Push code object to PC marker slot.
1052 __ movl(EAX, Address(EBP, kThreadOffset));
1054
1055 // Save C++ ABI callee-saved registers.
1056 __ pushl(EBX);
1057 __ pushl(ESI);
1058 __ pushl(EDI);
1059
1060 // Set up THR, which caches the current thread in Dart code.
1061 __ movl(THR, EAX);
1062
1063#if defined(USING_SHADOW_CALL_STACK)
1064#error Unimplemented
1065#endif
1066
1067 // Save the current VMTag on the stack.
1069 __ pushl(ECX);
1070
1071 // Save top resource and top exit frame info. Use EDX as a temporary register.
1072 // StackFrameIterator reads the top exit frame info saved in this frame.
1073 __ movl(EDX, Address(THR, target::Thread::top_resource_offset()));
1074 __ pushl(EDX);
1075 __ movl(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1077 __ pushl(EAX);
1079 Immediate(0));
1080 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be
1081 // kept in sync with the code below.
1082 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -8);
1084 __ pushl(EDX);
1086 Immediate(0));
1087
1088 // In debug mode, verify that we've pushed the top exit frame info at the
1089 // correct offset from FP.
1090 __ EmitEntryFrameVerification();
1091
1092 // Mark that the thread is executing Dart code. Do this after initializing the
1093 // exit link for the profiler.
1094 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
1095
1096 // Load arguments descriptor array into EDX.
1097 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1098
1099 // Load number of arguments into EBX and adjust count for type arguments.
1100 __ movl(EBX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
1101 __ cmpl(
1103 Immediate(0));
1104 Label args_count_ok;
1105 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1106 __ addl(EBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1107 __ Bind(&args_count_ok);
1108 // Save number of arguments as Smi on stack, replacing ArgumentsDesc.
1109 __ movl(Address(EBP, kArgumentsDescOffset), EBX);
1110 __ SmiUntag(EBX);
1111
1112 // Set up arguments for the dart call.
1113 Label push_arguments;
1114 Label done_push_arguments;
1115 __ testl(EBX, EBX); // check if there are arguments.
1116 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1117 __ movl(EAX, Immediate(0));
1118
1119 // Compute address of 'arguments array' data area into EDI.
1120 __ movl(EDI, Address(EBP, kArgumentsOffset));
1121 __ leal(EDI, FieldAddress(EDI, target::Array::data_offset()));
1122
1123 __ Bind(&push_arguments);
1124 __ movl(ECX, Address(EDI, EAX, TIMES_4, 0));
1125 __ pushl(ECX);
1126 __ incl(EAX);
1127 __ cmpl(EAX, EBX);
1128 __ j(LESS, &push_arguments, Assembler::kNearJump);
1129 __ Bind(&done_push_arguments);
1130
1131 // Call the dart code entrypoint.
1132 __ movl(EAX, Address(EBP, kTargetCodeOffset));
1133 __ call(FieldAddress(EAX, target::Code::entry_point_offset()));
1134
1135 // Read the saved number of passed arguments as Smi.
1136 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1137 // Get rid of arguments pushed on the stack.
1138 __ leal(ESP, Address(ESP, EDX, TIMES_2, 0)); // EDX is a Smi.
1139
1140 // Restore the saved top exit frame info and top resource back into the
1141 // Isolate structure.
1144 __ popl(Address(THR, target::Thread::top_resource_offset()));
1145
1146 // Restore the current VMTag from the stack.
1148
1149#if defined(USING_SHADOW_CALL_STACK)
1150#error Unimplemented
1151#endif
1152
1153 // Restore C++ ABI callee-saved registers.
1154 __ popl(EDI);
1155 __ popl(ESI);
1156 __ popl(EBX);
1157
1158 // Restore the frame pointer.
1159 __ LeaveFrame();
1160
1161 __ ret();
1162}
1163
1164// Helper to generate space allocation of context stub.
1165// This does not initialise the fields of the context.
1166// Input:
1167// EDX: number of context variables.
1168// Output:
1169// EAX: new allocated Context object.
1170// Clobbered:
1171// EBX
1172static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1173 Label* slow_case) {
1174 // First compute the rounded instance size.
1175 // EDX: number of context variables.
1176 intptr_t fixed_size_plus_alignment_padding =
1179 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1181
1182 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, EAX));
1183
1184 // Now allocate the object.
1185 // EDX: number of context variables.
1186 __ movl(EAX, Address(THR, target::Thread::top_offset()));
1187 __ addl(EBX, EAX);
1188 // Check if the allocation fits into the remaining space.
1189 // EAX: potential new object.
1190 // EBX: potential next object start.
1191 // EDX: number of context variables.
1192 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1193#if defined(DEBUG)
1194 static auto const kJumpLength = Assembler::kFarJump;
1195#else
1196 static auto const kJumpLength = Assembler::kNearJump;
1197#endif // DEBUG
1198 __ j(ABOVE_EQUAL, slow_case, kJumpLength);
1199 __ CheckAllocationCanary(EAX);
1200
1201 // Successfully allocated the object, now update top to point to
1202 // next object start and initialize the object.
1203 // EAX: new object.
1204 // EBX: next object start.
1205 // EDX: number of context variables.
1206 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1207 // EBX: Size of allocation in bytes.
1208 __ subl(EBX, EAX);
1209 __ addl(EAX, Immediate(kHeapObjectTag));
1210 // Generate isolate-independent code to allow sharing between isolates.
1211
1212 // Calculate the size tag.
1213 // EAX: new object.
1214 // EDX: number of context variables.
1215 {
1216 Label size_tag_overflow, done;
1217 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1220 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1223 __ jmp(&done);
1224
1225 __ Bind(&size_tag_overflow);
1226 // Set overflow size tag value.
1227 __ movl(EBX, Immediate(0));
1228
1229 __ Bind(&done);
1230 // EAX: new object.
1231 // EDX: number of context variables.
1232 // EBX: size and bit tags.
1233 uword tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
1234 __ orl(EBX, Immediate(tags));
1235 __ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
1236 }
1237
1238 // Setup up number of context variables field.
1239 // EAX: new object.
1240 // EDX: number of context variables as integer value (not object).
1241 __ movl(FieldAddress(EAX, target::Context::num_variables_offset()), EDX);
1242}
1243
1244// Called for inline allocation of contexts.
1245// Input:
1246// EDX: number of context variables.
1247// Output:
1248// EAX: new allocated Context object.
1249// Clobbered:
1250// EBX, EDX
1251void StubCodeCompiler::GenerateAllocateContextStub() {
1252 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1253 Label slow_case;
1254
1255 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1256
1257 // Setup the parent field.
1258 // EAX: new object.
1259 // EDX: number of context variables.
1260 // No generational barrier needed, since we are storing null.
1261 __ StoreObjectIntoObjectNoBarrier(
1262 EAX, FieldAddress(EAX, target::Context::parent_offset()), NullObject());
1263
1264 // Initialize the context variables.
1265 // EAX: new object.
1266 // EDX: number of context variables.
1267 {
1268 Label loop, entry;
1269 __ leal(EBX, FieldAddress(EAX, target::Context::variable_offset(0)));
1270
1271 __ jmp(&entry, Assembler::kNearJump);
1272 __ Bind(&loop);
1273 __ decl(EDX);
1274 // No generational barrier needed, since we are storing null.
1275 __ StoreObjectIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
1276 NullObject());
1277 __ Bind(&entry);
1278 __ cmpl(EDX, Immediate(0));
1280 }
1281
1282 // Done allocating and initializing the context.
1283 // EAX: new object.
1284 __ ret();
1285
1286 __ Bind(&slow_case);
1287 }
1288 // Create a stub frame as we are pushing some objects on the stack before
1289 // calling into the runtime.
1290 __ EnterStubFrame();
1291 __ pushl(Immediate(0)); // Setup space on stack for return value.
1292 __ SmiTag(EDX);
1293 __ pushl(EDX);
1294 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1295 __ popl(EAX); // Pop number of context variables argument.
1296 __ popl(EAX); // Pop the new context object.
1297
1298 // Write-barrier elimination might be enabled for this context (depending on
1299 // the size). To be sure we will check if the allocated object is in old
1300 // space and if so call a leaf runtime to add it to the remembered set.
1302
1303 // EAX: new object
1304 // Restore the frame pointer.
1305 __ LeaveFrame();
1306
1307 __ ret();
1308}
1309
1310// Called for clone of contexts.
1311// Input:
1312// ECX: context variable.
1313// Output:
1314// EAX: new allocated Context object.
1315// Clobbered:
1316// EBX, ECX, EDX
1317void StubCodeCompiler::GenerateCloneContextStub() {
1318 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1319 Label slow_case;
1320
1321 // Load num. variable in the existing context.
1322 __ movl(EDX, FieldAddress(ECX, target::Context::num_variables_offset()));
1323
1324 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1325
1326 // Setup the parent field.
1327 // EAX: new object.
1328 // ECX: old object to clone.
1329 __ movl(EBX, FieldAddress(ECX, target::Context::parent_offset()));
1330 __ StoreIntoObjectNoBarrier(
1331 EAX, FieldAddress(EAX, target::Context::parent_offset()), EBX);
1332
1333 // Initialize the context variables.
1334 // EAX: new context.
1335 // ECX: context to clone.
1336 // EDX: number of context variables.
1337 {
1338 Label loop, entry;
1339 __ jmp(&entry, Assembler::kNearJump);
1340
1341 __ Bind(&loop);
1342 __ decl(EDX);
1343
1344 __ movl(EBX, FieldAddress(ECX, EDX, TIMES_4,
1346 __ StoreIntoObjectNoBarrier(
1347 EAX,
1348 FieldAddress(EAX, EDX, TIMES_4, target::Context::variable_offset(0)),
1349 EBX);
1350
1351 __ Bind(&entry);
1352 __ cmpl(EDX, Immediate(0));
1354 }
1355
1356 // Done allocating and initializing the context.
1357 // EAX: new object.
1358 __ ret();
1359
1360 __ Bind(&slow_case);
1361 }
1362
1363 // Create a stub frame as we are pushing some objects on the stack before
1364 // calling into the runtime.
1365 __ EnterStubFrame();
1366 __ pushl(Immediate(0)); // Setup space on stack for return value.
1367 __ pushl(ECX);
1368 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Allocate context.
1369 __ popl(EAX); // Pop number of context variables argument.
1370 __ popl(EAX); // Pop the new context object.
1371
1372 // Write-barrier elimination might be enabled for this context (depending on
1373 // the size). To be sure we will check if the allocated object is in old
1374 // space and if so call a leaf runtime to add it to the remembered set.
1376
1377 // EAX: new object
1378 // Restore the frame pointer.
1379 __ LeaveFrame();
1380 __ ret();
1381}
1382
1383void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1384 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1385 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1386
1387 Register reg = static_cast<Register>(i);
1388 intptr_t start = __ CodeSize();
1390 __ movl(kWriteBarrierObjectReg, reg);
1393 __ ret();
1394 intptr_t end = __ CodeSize();
1395
1398 }
1399}
1400
1401// Helper stub to implement Assembler::StoreIntoObject/Array.
1402// Input parameters:
1403// EDX: Object (old)
1404// EBX: Value (old or new)
1405// EDI: Slot
1406// If EAX is new, add EDX to the store buffer. Otherwise EAX is old, mark EAX
1407// and add it to the mark list.
1411static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1412 // Save values being destroyed.
1413 __ pushl(EAX);
1414 __ pushl(ECX);
1415
1416 Label skip_marking;
1417 __ movl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1420 __ j(ZERO, &skip_marking);
1421
1422 {
1423 // Atomically clear kNotMarkedBit.
1424 Label retry, is_new, done;
1425 __ movl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1426 __ Bind(&retry);
1427 __ movl(ECX, EAX);
1428 __ testl(ECX, Immediate(1 << target::UntaggedObject::kNotMarkedBit));
1429 __ j(ZERO, &done); // Marked by another thread.
1430 __ andl(ECX, Immediate(~(1 << target::UntaggedObject::kNotMarkedBit)));
1431 // Cmpxchgq: compare value = implicit operand EAX, new value = ECX.
1432 // On failure, EAX is updated with the current value.
1433 __ LockCmpxchgl(FieldAddress(EBX, target::Object::tags_offset()), ECX);
1434 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1435
1436 __ testl(EBX,
1438 __ j(NOT_ZERO, &is_new);
1439
1440 auto mark_stack_push = [&](intptr_t offset, const RuntimeEntry& entry) {
1441 __ movl(EAX, Address(THR, offset));
1443 __ movl(Address(EAX, ECX, TIMES_4,
1445 EBX);
1446 __ incl(ECX);
1448 __ cmpl(ECX, Immediate(target::MarkingStackBlock::kSize));
1449 __ j(NOT_EQUAL, &done);
1450
1451 {
1452 LeafRuntimeScope rt(assembler,
1453 /*frame_size=*/1 * target::kWordSize,
1454 /*preserve_registers=*/true);
1455 __ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
1456 rt.Call(entry, 1);
1457 }
1458 };
1459
1461 kOldMarkingStackBlockProcessRuntimeEntry);
1462 __ jmp(&done);
1463
1464 __ Bind(&is_new);
1466 kNewMarkingStackBlockProcessRuntimeEntry);
1467
1468 __ Bind(&done);
1469 }
1470
1471 Label add_to_remembered_set, remember_card;
1472 __ Bind(&skip_marking);
1473 __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
1475 __ andl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1477 __ j(NOT_ZERO, &add_to_remembered_set, Assembler::kNearJump);
1478 __ popl(ECX); // Unspill.
1479 __ popl(EAX); // Unspill.
1480 __ ret();
1481
1482 __ Bind(&add_to_remembered_set);
1483 if (cards) {
1484 __ testl(FieldAddress(EDX, target::Object::tags_offset()),
1486 __ j(NOT_ZERO, &remember_card, Assembler::kFarJump); // Unlikely.
1487 } else {
1488#if defined(DEBUG)
1489 Label ok;
1490 __ testl(FieldAddress(EDX, target::Object::tags_offset()),
1493 __ Stop("Wrong barrier");
1494 __ Bind(&ok);
1495#endif
1496 }
1497
1498 {
1499 // Atomically clear kOldAndNotRememberedBit.
1500 Label retry, done;
1501 __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
1502 __ Bind(&retry);
1503 __ movl(ECX, EAX);
1504 __ testl(ECX,
1506 __ j(ZERO, &done); // Remembered by another thread.
1507 __ andl(ECX,
1509 // Cmpxchgl: compare value = implicit operand EAX, new value = ECX.
1510 // On failure, EAX is updated with the current value.
1511 __ LockCmpxchgl(FieldAddress(EDX, target::Object::tags_offset()), ECX);
1512 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1513
1514 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1515 // StoreBufferBlock and add the address to the pointers_.
1516 // Spilled: EAX, ECX
1517 // EDX: Address being stored
1520 __ movl(
1522 EDX);
1523
1524 // Increment top_ and check for overflow.
1525 // Spilled: EAX, ECX
1526 // ECX: top_
1527 // EAX: StoreBufferBlock
1528 __ incl(ECX);
1530 __ cmpl(ECX, Immediate(target::StoreBufferBlock::kSize));
1531 __ j(NOT_EQUAL, &done);
1532
1533 {
1534 LeafRuntimeScope rt(assembler,
1535 /*frame_size=*/1 * target::kWordSize,
1536 /*preserve_registers=*/true);
1537 __ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
1538 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1539 }
1540
1541 __ Bind(&done);
1542 __ popl(ECX);
1543 __ popl(EAX);
1544 __ ret();
1545 }
1546 if (cards) {
1547 Label remember_card_slow;
1548
1549 // Get card table.
1550 __ Bind(&remember_card);
1551 __ movl(EAX, EDX); // Object.
1552 __ andl(EAX, Immediate(target::kPageMask)); // Page.
1553 __ cmpl(Address(EAX, target::Page::card_table_offset()), Immediate(0));
1554 __ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
1555
1556 // Atomically dirty the card.
1557 __ pushl(EBX);
1558 __ subl(EDI, EAX); // Offset in page.
1559 __ movl(EAX,
1560 Address(EAX, target::Page::card_table_offset())); // Card table.
1561 __ movl(ECX, EDI);
1562 __ shrl(EDI,
1564 target::kBitsPerWordLog2)); // Word offset.
1565 __ shrl(ECX, Immediate(target::Page::kBytesPerCardLog2));
1566 __ movl(EBX, Immediate(1));
1567 __ shll(EBX, ECX); // Bit mask. (Shift amount is mod 32.)
1568 __ lock();
1569 __ orl(Address(EAX, EDI, TIMES_4, 0), EBX);
1570 __ popl(EBX);
1571 __ popl(ECX);
1572 __ popl(EAX);
1573 __ ret();
1574
1575 // Card table not yet allocated.
1576 __ Bind(&remember_card_slow);
1577
1578 {
1579 LeafRuntimeScope rt(assembler,
1580 /*frame_size=*/2 * target::kWordSize,
1581 /*preserve_registers=*/true);
1582 __ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
1583 __ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
1584 rt.Call(kRememberCardRuntimeEntry, 2);
1585 }
1586 __ popl(ECX);
1587 __ popl(EAX);
1588 __ ret();
1589 }
1590}
1591
1592void StubCodeCompiler::GenerateWriteBarrierStub() {
1593 GenerateWriteBarrierStubHelper(assembler, false);
1594}
1595
1596void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1597 GenerateWriteBarrierStubHelper(assembler, true);
1598}
1599
1600void StubCodeCompiler::GenerateAllocateObjectStub() {
1601 __ int3();
1602}
1603
1604void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1605 __ int3();
1606}
1607
1608void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1609 __ int3();
1610}
1611
1612// Called for inline allocation of objects.
1613// Input parameters:
1614// ESP : points to return address.
1615// AllocateObjectABI::kTypeArgumentsPos : type arguments object
1616// (only if class is parameterized).
1617// Uses AllocateObjectABI::kResultReg, EBX, ECX, EDI as temporary registers.
1618// Returns patch_code_pc offset where patching code for disabling the stub
1619// has been generated (similar to regularly generated Dart code).
1621 UnresolvedPcRelativeCalls* unresolved_calls,
1622 const Class& cls,
1623 const Code& allocate_object,
1624 const Code& allocat_object_parametrized) {
1625 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
1626 // The generated code is different if the class is parameterized.
1627 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
1628 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
1630 // kInlineInstanceSize is a constant used as a threshold for determining
1631 // when the object initialization should be done as a loop or as
1632 // straight line code.
1633 const int kInlineInstanceSize = 12; // In words.
1634 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1635 ASSERT(instance_size > 0);
1636
1637 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1638 // (if is_cls_parameterized).
1639 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
1642 Label slow_case;
1643 // Allocate the object and update top to point to
1644 // next object start and initialize the allocated object.
1645 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1646 // (if is_cls_parameterized).
1648 Address(THR, target::Thread::top_offset()));
1649 __ leal(EBX, Address(AllocateObjectABI::kResultReg, instance_size));
1650 // Check if the allocation fits into the remaining space.
1651 // AllocateObjectABI::kResultReg: potential new object start.
1652 // EBX: potential next object start.
1653 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1654 __ j(ABOVE_EQUAL, &slow_case);
1655 __ CheckAllocationCanary(AllocateObjectABI::kResultReg);
1656 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1657
1658 // AllocateObjectABI::kResultReg: new object start (untagged).
1659 // EBX: next object start.
1660 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1661 // (if is_cls_parameterized).
1662 // Set the tags.
1665 instance_size);
1666 __ movl(
1668 Immediate(tags));
1670
1671 // Initialize the remaining words of the object.
1672
1673 // AllocateObjectABI::kResultReg: new object (tagged).
1674 // EBX: next object start.
1675 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1676 // (if is_cls_parameterized).
1677 // First try inlining the initialization without a loop.
1678 if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
1679 // Check if the object contains any non-header fields.
1680 // Small objects are initialized using a consecutive set of writes.
1681 for (intptr_t current_offset = target::Instance::first_field_offset();
1682 current_offset < instance_size;
1683 current_offset += target::kWordSize) {
1684 __ StoreObjectIntoObjectNoBarrier(
1686 FieldAddress(AllocateObjectABI::kResultReg, current_offset),
1687 NullObject());
1688 }
1689 } else {
1690 __ leal(ECX, FieldAddress(AllocateObjectABI::kResultReg,
1692 // Loop until the whole object is initialized.
1693 // AllocateObjectABI::kResultReg: new object (tagged).
1694 // EBX: next object start.
1695 // ECX: next word to be initialized.
1696 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1697 // (if is_cls_parameterized).
1698 Label loop;
1699 __ Bind(&loop);
1700 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1702 __ StoreObjectIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
1703 Address(ECX, offset), NullObject());
1704 }
1705 // Safe to only check every kObjectAlignment bytes instead of each word.
1707 __ addl(ECX, Immediate(target::kObjectAlignment));
1708 __ cmpl(ECX, EBX);
1709 __ j(UNSIGNED_LESS, &loop);
1710 __ WriteAllocationCanary(EBX); // Fix overshoot.
1711 }
1712 if (is_cls_parameterized) {
1713 // AllocateObjectABI::kResultReg: new object (tagged).
1714 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
1715 // Set the type arguments in the new object.
1716 const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
1717 __ StoreIntoObjectNoBarrier(
1721 }
1722 // Done allocating and initializing the instance.
1723 // AllocateObjectABI::kResultReg: new object (tagged).
1724 __ ret();
1725
1726 __ Bind(&slow_case);
1727 }
1728 // If is_cls_parameterized:
1729 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
1730 // Create a stub frame as we are pushing some objects on the stack before
1731 // calling into the runtime.
1732 __ EnterStubFrame();
1733 __ pushl(raw_null); // Setup space on stack for return value.
1734 __ PushObject(
1735 CastHandle<Object>(cls)); // Push class of object to be allocated.
1736 if (is_cls_parameterized) {
1737 // Push type arguments of object to be allocated.
1739 } else {
1740 __ pushl(raw_null); // Push null type arguments.
1741 }
1742 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1743 __ popl(AllocateObjectABI::kResultReg); // Drop type arguments.
1744 __ popl(AllocateObjectABI::kResultReg); // Drop class.
1745 __ popl(AllocateObjectABI::kResultReg); // Pop allocated object.
1746
1748 // Write-barrier elimination is enabled for [cls] and we therefore need to
1749 // ensure that the object is in new-space or has remembered bit set.
1751 }
1752
1753 // AllocateObjectABI::kResultReg: new object
1754 // Restore the frame pointer.
1755 __ LeaveFrame();
1756 __ ret();
1757}
1758
1759// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1760// from the entry code of a dart function after an error in passed argument
1761// name or number is detected.
1762// Input parameters:
1763// ESP : points to return address.
1764// ESP + 4 : address of last argument.
1765// EDX : arguments descriptor array.
1766// Uses EAX, EBX, EDI as temporary registers.
1767void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
1768 __ EnterStubFrame();
1769
1770 // Load the receiver.
1771 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
1772 __ movl(EAX,
1773 Address(EBP, EDI, TIMES_2,
1774 target::frame_layout.param_end_from_fp * target::kWordSize));
1775
1776 // Load the function.
1777 __ movl(EBX, FieldAddress(EAX, target::Closure::function_offset()));
1778
1779 __ pushl(Immediate(0)); // Setup space on stack for result from noSuchMethod.
1780 __ pushl(EAX); // Receiver.
1781 __ pushl(EBX); // Function.
1782 __ pushl(EDX); // Arguments descriptor array.
1783
1784 // Adjust arguments count.
1785 __ cmpl(
1787 Immediate(0));
1788 __ movl(EDX, EDI);
1789 Label args_count_ok;
1790 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1791 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1792 __ Bind(&args_count_ok);
1793
1794 // EDX: Smi-tagged arguments array length.
1795 PushArrayOfArguments(assembler);
1796
1797 const intptr_t kNumArgs = 4;
1798 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
1799 // noSuchMethod on closures always throws an error, so it will never return.
1800 __ int3();
1801}
1802
1803// Cannot use function object from ICData as it may be the inlined
1804// function and not the top-scope function.
1806 Register ic_reg = ECX;
1807 Register func_reg = EAX;
1808 if (FLAG_trace_optimized_ic_calls) {
1809 __ EnterStubFrame();
1810 __ pushl(func_reg); // Preserve
1811 __ pushl(ic_reg); // Preserve.
1812 __ pushl(ic_reg); // Argument.
1813 __ pushl(func_reg); // Argument.
1814 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1815 __ popl(EAX); // Discard argument;
1816 __ popl(EAX); // Discard argument;
1817 __ popl(ic_reg); // Restore.
1818 __ popl(func_reg); // Restore.
1819 __ LeaveFrame();
1820 }
1821 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1822}
1823
1824// Loads function into 'temp_reg'.
1826 if (FLAG_optimization_counter_threshold >= 0) {
1827 Register func_reg = temp_reg;
1828 ASSERT(func_reg != IC_DATA_REG);
1829 __ Comment("Increment function counter");
1830 __ movl(func_reg,
1831 FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
1832 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1833 }
1834}
1835
1836// Note: ECX must be preserved.
1837// Attempt a quick Smi operation for known operations ('kind'). The ICData
1838// must have been primed with a Smi/Smi check that will be used for counting
1839// the invocations.
1840static void EmitFastSmiOp(Assembler* assembler,
1841 Token::Kind kind,
1842 intptr_t num_args,
1843 Label* not_smi_or_overflow) {
1844 __ Comment("Fast Smi op");
1845 ASSERT(num_args == 2);
1846 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left
1847 __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Right
1848 __ movl(EBX, EDI);
1849 __ orl(EBX, EAX);
1850 __ testl(EBX, Immediate(kSmiTagMask));
1851 __ j(NOT_ZERO, not_smi_or_overflow, Assembler::kNearJump);
1852 switch (kind) {
1853 case Token::kADD: {
1854 __ addl(EAX, EDI);
1855 __ j(OVERFLOW, not_smi_or_overflow, Assembler::kNearJump);
1856 break;
1857 }
1858 case Token::kLT: {
1859 Label done, is_true;
1860 __ cmpl(EAX, EDI);
1861 __ setcc(GREATER_EQUAL, AL);
1862 __ movzxb(EAX, AL); // EAX := EAX < EDI ? 0 : 1
1863 __ movl(EAX,
1867 break;
1868 }
1869 case Token::kEQ: {
1870 Label done, is_true;
1871 __ cmpl(EAX, EDI);
1872 __ setcc(NOT_EQUAL, AL);
1873 __ movzxb(EAX, AL); // EAX := EAX == EDI ? 0 : 1
1874 __ movl(EAX,
1878 break;
1879 }
1880 default:
1881 UNIMPLEMENTED();
1882 }
1883
1884 // ECX: IC data object.
1885 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1886 // EBX: ic_data_array with check entries: classes and target functions.
1887 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1888#if defined(DEBUG)
1889 // Check that first entry is for Smi/Smi.
1890 Label error, ok;
1891 const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
1892 __ cmpl(Address(EBX, 0 * target::kWordSize), imm_smi_cid);
1894 __ cmpl(Address(EBX, 1 * target::kWordSize), imm_smi_cid);
1896 __ Bind(&error);
1897 __ Stop("Incorrect IC data");
1898 __ Bind(&ok);
1899#endif
1900 if (FLAG_optimization_counter_threshold >= 0) {
1901 const intptr_t count_offset =
1903 // Update counter, ignore overflow.
1904 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
1905 }
1906 __ ret();
1907}
1908
1909// Generate inline cache check for 'num_args'.
1910// EBX: receiver (if instance call)
1911// ECX: ICData
1912// ESP[0]: return address
1913// Control flow:
1914// - If receiver is null -> jump to IC miss.
1915// - If receiver is Smi -> load Smi class.
1916// - If receiver is not-Smi -> load receiver's class.
1917// - Check if 'num_args' (including receiver) match any IC data group.
1918// - Match found -> jump to target.
1919// - Match not found -> jump to IC miss.
1921 intptr_t num_args,
1922 const RuntimeEntry& handle_ic_miss,
1923 Token::Kind kind,
1924 Optimized optimized,
1925 CallType type,
1926 Exactness exactness) {
1927 GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
1928 optimized, type, exactness,
1930 __ BindUncheckedEntryPoint();
1931 GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
1932 optimized, type, exactness,
1934}
1935
1937 intptr_t num_args,
1938 const RuntimeEntry& handle_ic_miss,
1939 Token::Kind kind,
1940 Optimized optimized,
1941 CallType type,
1942 Exactness exactness,
1943 CodeEntryKind entry_kind) {
1944 if (optimized == kOptimized) {
1946 } else {
1947 GenerateUsageCounterIncrement(/* scratch */ EAX);
1948 }
1949
1950 ASSERT(num_args == 1 || num_args == 2);
1951#if defined(DEBUG)
1952 {
1953 Label ok;
1954 // Check that the IC data array has NumArgsTested() == num_args.
1955 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1956 __ movl(EAX, FieldAddress(ECX, target::ICData::state_bits_offset()));
1957 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
1958 __ andl(EAX, Immediate(target::ICData::NumArgsTestedMask()));
1959 __ cmpl(EAX, Immediate(num_args));
1961 __ Stop("Incorrect stub for IC data");
1962 __ Bind(&ok);
1963 }
1964#endif // DEBUG
1965
1966#if !defined(PRODUCT)
1967 Label stepping, done_stepping;
1968 if (optimized == kUnoptimized) {
1969 __ Comment("Check single stepping");
1970 __ LoadIsolate(EAX);
1971 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
1972 __ j(NOT_EQUAL, &stepping);
1973 __ Bind(&done_stepping);
1974 }
1975#endif
1976 Label not_smi_or_overflow;
1977 if (kind != Token::kILLEGAL) {
1978 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
1979 }
1980 __ Bind(&not_smi_or_overflow);
1981
1982 __ Comment("Extract ICData initial values and receiver cid");
1983 // ECX: IC data object (preserved).
1984 // Load arguments descriptor into EDX.
1985 __ movl(
1988 // Loop that checks if there is an IC data match.
1989 Label loop, found, miss;
1990 // ECX: IC data object (preserved).
1991 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1992 // EBX: ic_data_array with check entries: classes and target functions.
1993 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1994 // EBX: points directly to the first ic data array element.
1995
1996 // Get argument descriptor into EAX. In the 1-argument case this is the
1997 // last time we need the argument descriptor, and we reuse EAX for the
1998 // class IDs from the IC descriptor. In the 2-argument case we preserve
1999 // the argument descriptor in EAX.
2000 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
2002 if (num_args == 1) {
2003 // Load receiver into EDI.
2004 __ movl(EDI,
2005 Address(ESP, EAX, TIMES_2, 0)); // EAX (argument count) is Smi.
2006 __ LoadTaggedClassIdMayBeSmi(EAX, EDI);
2007 // EAX: receiver class ID as Smi.
2008 }
2009
2010 __ Comment("ICData loop");
2011
2012 // We unroll the generic one that is generated once more than the others.
2013 bool optimize = kind == Token::kILLEGAL;
2014 const intptr_t target_offset =
2016 const intptr_t count_offset =
2018 const intptr_t exactness_offset =
2020 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2021 num_args, exactness == kCheckExactness) *
2023
2024 __ Bind(&loop);
2025 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2026 Label update;
2027 if (num_args == 1) {
2028 __ movl(EDI, Address(EBX, 0));
2029 __ cmpl(EDI, EAX); // Class id match?
2030 __ j(EQUAL, &found); // Break.
2031 __ addl(EBX, Immediate(entry_size)); // Next entry.
2032 __ cmpl(EDI, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2033 } else {
2034 ASSERT(num_args == 2);
2035 // Load receiver into EDI.
2036 __ movl(EDI, Address(ESP, EAX, TIMES_2, 0));
2037 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
2038 __ cmpl(EDI, Address(EBX, 0)); // Class id match?
2039 __ j(NOT_EQUAL, &update); // Continue.
2040
2041 // Load second argument into EDI.
2042 __ movl(EDI, Address(ESP, EAX, TIMES_2, -target::kWordSize));
2043 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
2044 __ cmpl(EDI, Address(EBX, target::kWordSize)); // Class id match?
2045 __ j(EQUAL, &found); // Break.
2046
2047 __ Bind(&update);
2048 __ addl(EBX, Immediate(entry_size)); // Next entry.
2049 __ cmpl(Address(EBX, -entry_size),
2050 Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2051 }
2052
2053 if (unroll == 0) {
2054 __ j(NOT_EQUAL, &loop);
2055 } else {
2056 __ j(EQUAL, &miss);
2057 }
2058 }
2059
2060 __ Bind(&miss);
2061 __ Comment("IC miss");
2062 // Compute address of arguments (first read number of arguments from
2063 // arguments descriptor array and then compute address on the stack).
2064 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
2066 __ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
2067 // Create a stub frame as we are pushing some objects on the stack before
2068 // calling into the runtime.
2069 __ EnterStubFrame();
2070 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
2071 __ pushl(ECX); // Preserve IC data object.
2072 __ pushl(Immediate(0)); // Result slot.
2073 // Push call arguments.
2074 for (intptr_t i = 0; i < num_args; i++) {
2075 __ movl(EBX, Address(EAX, -target::kWordSize * i));
2076 __ pushl(EBX);
2077 }
2078 __ pushl(ECX); // Pass IC data object.
2079 __ CallRuntime(handle_ic_miss, num_args + 1);
2080 // Remove the call arguments pushed earlier, including the IC data object.
2081 for (intptr_t i = 0; i < num_args + 1; i++) {
2082 __ popl(EAX);
2083 }
2084 __ popl(FUNCTION_REG); // Pop returned function object into EAX.
2085 __ popl(ECX); // Restore IC data array.
2086 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
2087 __ LeaveFrame();
2088 Label call_target_function;
2089 ASSERT(!FLAG_precompiled_mode);
2090 __ jmp(&call_target_function);
2091
2092 __ Bind(&found);
2093 // EBX: Pointer to an IC data check group.
2094 Label call_target_function_through_unchecked_entry;
2095 if (exactness == kCheckExactness) {
2096 Label exactness_ok;
2097 ASSERT(num_args == 1);
2098 __ movl(EDI, Address(EBX, exactness_offset));
2099 __ cmpl(EDI, Immediate(target::ToRawSmi(
2101 __ j(LESS, &exactness_ok);
2102 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2103
2104 // Check trivial exactness.
2105 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2106 // because we only emit calls to this stub when it is not null.
2107 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
2109 __ movl(EAX, Address(ESP, EAX, TIMES_2, 0)); // Receiver
2110 // EDI contains an offset to type arguments in words as a smi,
2111 // hence TIMES_2. EAX is guaranteed to be non-smi because it is expected
2112 // to have type arguments.
2113 __ movl(EDI,
2114 FieldAddress(EAX, EDI, TIMES_2, 0)); // Receiver's type arguments
2115 __ movl(EAX,
2117 __ cmpl(EDI, FieldAddress(EAX, target::Type::arguments_offset()));
2118 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2119
2120 // Update exactness state (not-exact anymore).
2121 __ movl(Address(EBX, exactness_offset),
2122 Immediate(target::ToRawSmi(
2124 __ Bind(&exactness_ok);
2125 }
2126
2127 if (FLAG_optimization_counter_threshold >= 0) {
2128 __ Comment("Update caller's counter");
2129 // Ignore overflow.
2130 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2131 }
2132
2133 __ movl(FUNCTION_REG, Address(EBX, target_offset));
2134 __ Bind(&call_target_function);
2135 __ Comment("Call target");
2136 // EAX: Target function.
2137 __ jmp(FieldAddress(FUNCTION_REG,
2139
2140 if (exactness == kCheckExactness) {
2141 __ Bind(&call_target_function_through_unchecked_entry);
2142 if (FLAG_optimization_counter_threshold >= 0) {
2143 __ Comment("Update ICData counter");
2144 // Ignore overflow.
2145 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2146 }
2147 __ Comment("Call target (via unchecked entry point)");
2148 __ LoadCompressed(FUNCTION_REG, Address(EBX, target_offset));
2151 }
2152
2153#if !defined(PRODUCT)
2154 if (optimized == kUnoptimized) {
2155 __ Bind(&stepping);
2156 __ EnterStubFrame();
2157 __ pushl(EBX); // Preserve receiver.
2158 __ pushl(ECX); // Preserve ICData.
2159 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2160 __ popl(ECX); // Restore ICData.
2161 __ popl(EBX); // Restore receiver.
2162 __ LeaveFrame();
2163 __ jmp(&done_stepping);
2164 }
2165#endif
2166}
2167
2168// EBX: receiver
2169// ECX: ICData
2170// ESP[0]: return address
2171void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2173 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2175}
2176
2177// EBX: receiver
2178// ECX: ICData
2179// ESP[0]: return address
2180void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2182 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2184}
2185
2186void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
2187 __ Stop("Unimplemented");
2188}
2189
2190void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
2191 __ Stop("Unimplemented");
2192}
2193
2194// EBX: receiver
2195// ECX: ICData
2196// ESP[0]: return address
2197void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2199 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2201}
2202
2203// EBX: receiver
2204// ECX: ICData
2205// ESP[0]: return address
2206void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2208 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2210}
2211
2212// EBX: receiver
2213// ECX: ICData
2214// ESP[0]: return address
2215void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2217 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2219}
2220
2221// EBX: receiver
2222// ECX: ICData
2223// ESP[0]: return address
2224void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2226 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2228}
2229
2230// EBX: receiver
2231// ECX: ICData
2232// EAX: Function
2233// ESP[0]: return address
2234void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2236 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2238}
2239
2240// EBX: receiver
2241// ECX: ICData
2242// EAX: Function
2243// ESP[0]: return address
2244void StubCodeCompiler::
2245 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2247 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2249}
2250
2251// EBX: receiver
2252// ECX: ICData
2253// EAX: Function
2254// ESP[0]: return address
2255void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2257 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2259}
2260
2261// ECX: ICData
2262// ESP[0]: return address
2263static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
2264 StubCodeCompiler* stub_code_compiler,
2265 CodeEntryKind entry_kind) {
2266 stub_code_compiler->GenerateUsageCounterIncrement(/* scratch */ EAX);
2267 auto* const assembler = stub_code_compiler->assembler;
2268
2269#if defined(DEBUG)
2270 {
2271 Label ok;
2272 // Check that the IC data array has NumArgsTested() == num_args.
2273 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2274 __ movl(EBX, FieldAddress(ECX, target::ICData::state_bits_offset()));
2275 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2276 __ andl(EBX, Immediate(target::ICData::NumArgsTestedMask()));
2277 __ cmpl(EBX, Immediate(0));
2279 __ Stop("Incorrect IC data for unoptimized static call");
2280 __ Bind(&ok);
2281 }
2282#endif // DEBUG
2283
2284#if !defined(PRODUCT)
2285 // Check single stepping.
2286 Label stepping, done_stepping;
2287 __ LoadIsolate(EAX);
2288 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
2289 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2290 __ Bind(&done_stepping);
2291#endif
2292
2293 // ECX: IC data object (preserved).
2294 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
2295 // EBX: ic_data_array with entries: target functions and count.
2296 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
2297 // EBX: points directly to the first ic data array element.
2298 const intptr_t target_offset =
2300 const intptr_t count_offset =
2302
2303 if (FLAG_optimization_counter_threshold >= 0) {
2304 // Increment count for this call, ignore overflow.
2305 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2306 }
2307
2308 // Load arguments descriptor into EDX.
2309 __ movl(
2312
2313 // Get function and call it, if possible.
2314 __ movl(FUNCTION_REG, Address(EBX, target_offset));
2315 __ jmp(FieldAddress(FUNCTION_REG,
2317
2318#if !defined(PRODUCT)
2319 __ Bind(&stepping);
2320 __ EnterStubFrame();
2321 __ pushl(ECX);
2322 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2323 __ popl(ECX);
2324 __ LeaveFrame();
2325 __ jmp(&done_stepping, Assembler::kNearJump);
2326#endif
2327}
2328
2329void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2330 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
2332 __ BindUncheckedEntryPoint();
2333 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
2335}
2336
2337// ECX: ICData
2338// ESP[0]: return address
2339void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2341 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2343}
2344
2345// ECX: ICData
2346// ESP[0]: return address
2347void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2349 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2351}
2352
2353// Stub for compiling a function and jumping to the compiled code.
2354// ARGS_DESC_REG: Arguments descriptor.
2355// FUNCTION_REG: Function.
2356void StubCodeCompiler::GenerateLazyCompileStub() {
2357 __ EnterStubFrame();
2358 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
2359 __ pushl(FUNCTION_REG); // Pass function.
2360 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2361 __ popl(FUNCTION_REG); // Restore function.
2362 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
2363 __ LeaveFrame();
2364
2366}
2367
2368// ECX: Contains an ICData.
2369void StubCodeCompiler::GenerateICCallBreakpointStub() {
2370#if defined(PRODUCT)
2371 __ Stop("No debugging in PRODUCT mode");
2372#else
2373 __ EnterStubFrame();
2374 __ pushl(EBX); // Preserve receiver.
2375 __ pushl(ECX); // Preserve ICData.
2376 __ pushl(Immediate(0)); // Room for result.
2377 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2378 __ popl(EAX); // Code of original stub.
2379 __ popl(ECX); // Restore ICData.
2380 __ popl(EBX); // Restore receiver.
2381 __ LeaveFrame();
2382 // Jump to original stub.
2383 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2384#endif // defined(PRODUCT)
2385}
2386
2387void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2388#if defined(PRODUCT)
2389 __ Stop("No debugging in PRODUCT mode");
2390#else
2391 __ EnterStubFrame();
2392 __ pushl(ECX); // Preserve ICData.
2393 __ pushl(Immediate(0)); // Room for result.
2394 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2395 __ popl(EAX); // Code of original stub.
2396 __ popl(ECX); // Restore ICData.
2397 __ LeaveFrame();
2398 // Jump to original stub.
2399 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2400#endif // defined(PRODUCT)
2401}
2402
2403void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2404#if defined(PRODUCT)
2405 __ Stop("No debugging in PRODUCT mode");
2406#else
2407 __ EnterStubFrame();
2408 // Room for result. Debugger stub returns address of the
2409 // unpatched runtime stub.
2410 __ pushl(Immediate(0)); // Room for result.
2411 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2412 __ popl(EAX); // Code of the original stub
2413 __ LeaveFrame();
2414 // Jump to original stub.
2415 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2416#endif // defined(PRODUCT)
2417}
2418
2419// Called only from unoptimized code.
2420void StubCodeCompiler::GenerateDebugStepCheckStub() {
2421#if defined(PRODUCT)
2422 __ Stop("No debugging in PRODUCT mode");
2423#else
2424 // Check single stepping.
2425 Label stepping, done_stepping;
2426 __ LoadIsolate(EAX);
2427 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2428 __ cmpl(EAX, Immediate(0));
2429 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2430 __ Bind(&done_stepping);
2431 __ ret();
2432
2433 __ Bind(&stepping);
2434 __ EnterStubFrame();
2435 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2436 __ LeaveFrame();
2437 __ jmp(&done_stepping, Assembler::kNearJump);
2438#endif // defined(PRODUCT)
2439}
2440
2441// Constants used for generating subtype test cache lookup stubs.
2442// We represent the depth of as a depth from the top of the stack at the
2443// start of the stub. That is, depths for input values are non-negative and
2444// depths for values pushed during the stub are negative.
2445
2446struct STCInternal : AllStatic {
2447 // Used to initialize depths for conditionally-pushed values.
2448 static constexpr intptr_t kNoDepth = kIntptrMin;
2449
2450 // These inputs are always on the stack when the SubtypeNTestCacheStub is
2451 // called. These absolute depths will be converted to relative depths within
2452 // the stub to compensate for additional pushed values.
2453 static constexpr intptr_t kFunctionTypeArgumentsDepth = 1;
2454 static constexpr intptr_t kInstantiatorTypeArgumentsDepth = 2;
2455 static constexpr intptr_t kDestinationTypeDepth = 3;
2456 static constexpr intptr_t kInstanceDepth = 4;
2457 static constexpr intptr_t kCacheDepth = 5;
2458
2459 // Non-stack values are stored in non-kInstanceReg registers from TypeTestABI.
2460 static constexpr Register kCacheArrayReg =
2462 static constexpr Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2463 static constexpr Register kInstanceCidOrSignatureReg =
2465 static constexpr Register kInstanceInstantiatorTypeArgumentsReg =
2467};
2468
2469static void GenerateSubtypeTestCacheLoop(
2470 Assembler* assembler,
2471 int n,
2472 intptr_t original_tos_offset,
2473 intptr_t parent_function_type_args_depth,
2474 intptr_t delayed_type_args_depth,
2475 Label* found,
2476 Label* not_found,
2477 Label* next_iteration) {
2478 const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
2479
2480 // Compares a value at the given depth from the stack to the value in src.
2481 auto compare_to_stack = [&](Register src, intptr_t depth) {
2482 ASSERT(original_tos_offset + depth >= 0);
2483 __ CompareToStack(src, original_tos_offset + depth);
2484 };
2485
2486 __ LoadAcquireCompressedFromOffset(
2487 STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2490 __ cmpl(STCInternal::kScratchReg, raw_null);
2491 __ j(EQUAL, not_found, Assembler::kNearJump);
2492 __ cmpl(STCInternal::kScratchReg, STCInternal::kInstanceCidOrSignatureReg);
2493 if (n == 1) {
2494 __ j(EQUAL, found, Assembler::kNearJump);
2495 return;
2496 }
2497 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2498 __ cmpl(STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2499 Address(STCInternal::kCacheArrayReg,
2502 if (n == 2) {
2503 __ j(EQUAL, found, Assembler::kNearJump);
2504 return;
2505 }
2506 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2507 __ movl(STCInternal::kScratchReg,
2508 Address(STCInternal::kCacheArrayReg,
2511 compare_to_stack(STCInternal::kScratchReg,
2512 STCInternal::kInstantiatorTypeArgumentsDepth);
2513 if (n == 3) {
2514 __ j(EQUAL, found, Assembler::kNearJump);
2515 return;
2516 }
2517 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2518 __ movl(STCInternal::kScratchReg,
2519 Address(STCInternal::kCacheArrayReg,
2522 compare_to_stack(STCInternal::kScratchReg,
2523 STCInternal::kFunctionTypeArgumentsDepth);
2524 if (n == 4) {
2525 __ j(EQUAL, found, Assembler::kNearJump);
2526 return;
2527 }
2528 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2529 __ movl(
2530 STCInternal::kScratchReg,
2531 Address(
2532 STCInternal::kCacheArrayReg,
2535 compare_to_stack(STCInternal::kScratchReg, parent_function_type_args_depth);
2536 if (n == 5) {
2537 __ j(EQUAL, found, Assembler::kNearJump);
2538 return;
2539 }
2540 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2541 __ movl(
2542 STCInternal::kScratchReg,
2543 Address(
2544 STCInternal::kCacheArrayReg,
2547 compare_to_stack(STCInternal::kScratchReg, delayed_type_args_depth);
2548 if (n == 6) {
2549 __ j(EQUAL, found, Assembler::kNearJump);
2550 return;
2551 }
2552 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2553 __ movl(
2554 STCInternal::kScratchReg,
2555 Address(STCInternal::kCacheArrayReg,
2557 compare_to_stack(STCInternal::kScratchReg,
2558 STCInternal::kDestinationTypeDepth);
2559 __ j(EQUAL, found, Assembler::kNearJump);
2560}
2561
2562// Used to check class and type arguments. Arguments passed on stack:
2563// TOS + 0: return address.
2564// TOS + 1: function type arguments (only used if n >= 4, can be raw_null).
2565// TOS + 2: instantiator type arguments (only used if n >= 3, can be raw_null).
2566// TOS + 3: destination_type (only used if n >= 7).
2567// TOS + 4: instance.
2568// TOS + 5: SubtypeTestCache.
2569//
2570// No registers are preserved by this stub.
2571//
2572// Result in SubtypeTestCacheReg::kResultReg: null -> not found, otherwise
2573// result (true or false).
2574void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2575 int n) {
2576 ASSERT(n >= 1);
2578 // If we need the parent function type arguments for a closure, we also need
2579 // the delayed type arguments, so this case will never happen.
2580 ASSERT(n != 5);
2581
2582 const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
2583
2584 __ LoadFromStack(TypeTestABI::kInstanceReg, STCInternal::kInstanceDepth);
2585
2586 // Loop initialization (moved up here to avoid having all dependent loads
2587 // after each other)
2588 __ LoadFromStack(STCInternal::kCacheArrayReg, STCInternal::kCacheDepth);
2589#if defined(DEBUG)
2590 // Verify the STC we received has exactly as many inputs as this stub expects.
2591 Label search_stc;
2592 __ LoadFromSlot(STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2593 Slot::SubtypeTestCache_num_inputs());
2594 __ CompareImmediate(STCInternal::kScratchReg, n);
2595 __ BranchIf(EQUAL, &search_stc, Assembler::kNearJump);
2596 __ Breakpoint();
2597 __ Bind(&search_stc);
2598#endif
2599 // We avoid a load-acquire barrier here by relying on the fact that all other
2600 // loads from the array are data-dependent loads.
2601 __ movl(STCInternal::kCacheArrayReg,
2602 FieldAddress(STCInternal::kCacheArrayReg,
2604
2605 // There is a maximum size for linear caches that is smaller than the size
2606 // of any hash-based cache, so we check the size of the backing array to
2607 // determine if this is a linear or hash-based cache.
2608 __ LoadFromSlot(STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2609 Slot::Array_length());
2610 __ CompareImmediate(STCInternal::kScratchReg,
2612 // For IA32, we never handle hash caches in the stub, as there's too much
2613 // register pressure.
2614 Label is_linear;
2616 // Return null so that we'll continue to the runtime for hash-based caches.
2618 __ ret();
2619 __ Bind(&is_linear);
2620 __ AddImmediate(STCInternal::kCacheArrayReg,
2622
2623 Label loop, not_closure;
2624 if (n >= 3) {
2625 __ LoadClassIdMayBeSmi(STCInternal::kInstanceCidOrSignatureReg,
2627 } else {
2628 __ LoadClassId(STCInternal::kInstanceCidOrSignatureReg,
2630 }
2631 __ cmpl(STCInternal::kInstanceCidOrSignatureReg, Immediate(kClosureCid));
2632 __ j(NOT_EQUAL, &not_closure, Assembler::kNearJump);
2633
2634 // Closure handling.
2635 {
2636 __ movl(STCInternal::kInstanceCidOrSignatureReg,
2637 FieldAddress(TypeTestABI::kInstanceReg,
2639 __ movl(STCInternal::kInstanceCidOrSignatureReg,
2640 FieldAddress(STCInternal::kInstanceCidOrSignatureReg,
2642 if (n >= 2) {
2643 __ movl(
2644 STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2645 FieldAddress(TypeTestABI::kInstanceReg,
2647 }
2648 if (n >= 5) {
2649 __ pushl(FieldAddress(TypeTestABI::kInstanceReg,
2651 }
2652 if (n >= 6) {
2653 __ pushl(FieldAddress(TypeTestABI::kInstanceReg,
2655 }
2656 __ jmp(&loop, Assembler::kNearJump);
2657 }
2658
2659 // Non-Closure handling.
2660 {
2661 __ Bind(&not_closure);
2662 if (n >= 2) {
2663 Label has_no_type_arguments;
2664 __ LoadClassById(STCInternal::kScratchReg,
2665 STCInternal::kInstanceCidOrSignatureReg);
2666 __ movl(STCInternal::kInstanceInstantiatorTypeArgumentsReg, raw_null);
2667 __ movl(
2668 STCInternal::kScratchReg,
2669 FieldAddress(STCInternal::kScratchReg,
2670 target::Class::
2671 host_type_arguments_field_offset_in_words_offset()));
2672 __ cmpl(STCInternal::kScratchReg,
2674 __ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
2675 __ movl(STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2676 FieldAddress(TypeTestABI::kInstanceReg, STCInternal::kScratchReg,
2677 TIMES_4, 0));
2678 __ Bind(&has_no_type_arguments);
2679 }
2680 __ SmiTag(STCInternal::kInstanceCidOrSignatureReg);
2681 if (n >= 5) {
2682 __ pushl(raw_null); // parent function.
2683 }
2684 if (n >= 6) {
2685 __ pushl(raw_null); // delayed.
2686 }
2687 }
2688
2689 // Offset of the original top of the stack from the current top of stack.
2690 intptr_t original_tos_offset = 0;
2691
2692 // Additional data conditionally stored on the stack use negative depths
2693 // that will be non-negative when adjusted for original_tos_offset. We
2694 // initialize conditionally pushed values to kNoInput for extra checking.
2695 intptr_t kInstanceParentFunctionTypeArgumentsDepth = STCInternal::kNoDepth;
2696 intptr_t kInstanceDelayedFunctionTypeArgumentsDepth = STCInternal::kNoDepth;
2697
2698 // Now that instance handling is done, both the delayed and parent function
2699 // type arguments stack slots have been set, so any input uses must be
2700 // offset by the new values and the new values can now be accessed in
2701 // the following code without issue when n >= 6.
2702 if (n >= 5) {
2703 original_tos_offset++;
2704 kInstanceParentFunctionTypeArgumentsDepth = -original_tos_offset;
2705 }
2706 if (n >= 6) {
2707 original_tos_offset++;
2708 kInstanceDelayedFunctionTypeArgumentsDepth = -original_tos_offset;
2709 }
2710
2711 Label found, not_found, done, next_iteration;
2712
2713 // Loop header.
2714 __ Bind(&loop);
2715 GenerateSubtypeTestCacheLoop(assembler, n, original_tos_offset,
2716 kInstanceParentFunctionTypeArgumentsDepth,
2717 kInstanceDelayedFunctionTypeArgumentsDepth,
2718 &found, &not_found, &next_iteration);
2719 __ Bind(&next_iteration);
2720 __ addl(STCInternal::kCacheArrayReg,
2721 Immediate(target::kWordSize *
2723 __ jmp(&loop, Assembler::kNearJump);
2724
2725 __ Bind(&found);
2726 if (n >= 5) {
2727 __ Drop(original_tos_offset);
2728 }
2730 Address(STCInternal::kCacheArrayReg,
2732 __ ret();
2733
2734 __ Bind(&not_found);
2735 if (n >= 5) {
2736 __ Drop(original_tos_offset);
2737 }
2738 // In the not found case, even though the field that determines occupancy was
2739 // null, another thread might be updating the cache and in the middle of
2740 // filling in the entry. Thus, we load the null object explicitly instead of
2741 // just using the (possibly mid-update) test result field.
2743 __ ret();
2744}
2745
2746// Return the current stack pointer address, used to do stack alignment checks.
2747// TOS + 0: return address
2748// Result in EAX.
2749void StubCodeCompiler::GenerateGetCStackPointerStub() {
2750 __ leal(EAX, Address(ESP, target::kWordSize));
2751 __ ret();
2752}
2753
2754// Jump to a frame on the call stack.
2755// TOS + 0: return address
2756// TOS + 1: program_counter
2757// TOS + 2: stack_pointer
2758// TOS + 3: frame_pointer
2759// TOS + 4: thread
2760// No Result.
2761void StubCodeCompiler::GenerateJumpToFrameStub() {
2762 __ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
2763 __ movl(EBP,
2764 Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
2765 __ movl(EBX,
2766 Address(ESP, 1 * target::kWordSize)); // Load target PC into EBX.
2767 __ movl(ESP,
2768 Address(ESP, 2 * target::kWordSize)); // Load target stack_pointer.
2769#if defined(USING_SHADOW_CALL_STACK)
2770#error Unimplemented
2771#endif
2772
2773 Label exit_through_non_ffi;
2774 // Check if we exited generated from FFI. If so do transition - this is needed
2775 // because normally runtime calls transition back to generated via destructor
2776 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
2777 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
2778 // have this boilerplate, don't have this stack resource, have to transition
2779 // explicitly.
2780 __ cmpl(compiler::Address(
2782 compiler::Immediate(target::Thread::exit_through_ffi()));
2783 __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
2784 __ TransitionNativeToGenerated(ECX, /*leave_safepoint=*/true,
2785 /*ignore_unwind_in_progress=*/true);
2786 __ Bind(&exit_through_non_ffi);
2787
2788 // Set tag.
2789 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
2790 // Clear top exit frame.
2792 Immediate(0));
2793 __ jmp(EBX); // Jump to the exception handler code.
2794}
2795
2796// Run an exception handler. Execution comes from JumpToFrame stub.
2797//
2798// The arguments are stored in the Thread object.
2799// No result.
2800void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2803 __ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
2804
2807
2808 // Load the exception from the current thread.
2809 Address exception_addr(THR, target::Thread::active_exception_offset());
2810 __ movl(kExceptionObjectReg, exception_addr);
2811 __ movl(exception_addr, ECX);
2812
2813 // Load the stacktrace from the current thread.
2814 Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
2815 __ movl(kStackTraceObjectReg, stacktrace_addr);
2816 __ movl(stacktrace_addr, ECX);
2817
2818 __ jmp(EBX); // Jump to continuation point.
2819}
2820
2821// Deoptimize a frame on the call stack before rewinding.
2822// The arguments are stored in the Thread object.
2823// No result.
2824void StubCodeCompiler::GenerateDeoptForRewindStub() {
2825 // Push the deopt pc.
2826 __ pushl(Address(THR, target::Thread::resume_pc_offset()));
2827 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2828
2829 // After we have deoptimized, jump to the correct frame.
2830 __ EnterStubFrame();
2831 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2832 __ LeaveFrame();
2833 __ int3();
2834}
2835
2836// Calls to the runtime to optimize the given function.
2837// EBX: function to be reoptimized.
2838// ARGS_DESC_REG: argument descriptor (preserved).
2839void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2841 __ EnterStubFrame();
2842 __ pushl(ARGS_DESC_REG);
2843 __ pushl(Immediate(0)); // Setup space on stack for return value.
2844 __ pushl(EBX);
2845 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2846 __ popl(EAX); // Discard argument.
2847 __ popl(FUNCTION_REG); // Get Function object
2848 __ popl(ARGS_DESC_REG); // Restore argument descriptor.
2849 __ LeaveFrame();
2850 __ movl(CODE_REG,
2853 __ int3();
2854}
2855
2856// Does identical check (object references are equal or not equal) with special
2857// checks for boxed numbers.
2858// Return ZF set.
2859// Note: A Mint cannot contain a value that would fit in Smi.
2860static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2861 const Register left,
2862 const Register right,
2863 const Register temp) {
2864 Label reference_compare, done, check_mint;
2865 // If any of the arguments is Smi do reference compare.
2866 __ testl(left, Immediate(kSmiTagMask));
2867 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2868 __ testl(right, Immediate(kSmiTagMask));
2869 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2870
2871 // Value compare for two doubles.
2872 __ CompareClassId(left, kDoubleCid, temp);
2873 __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
2874 __ CompareClassId(right, kDoubleCid, temp);
2876
2877 // Double values bitwise compare.
2878 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2879 0 * target::kWordSize));
2880 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2881 0 * target::kWordSize));
2883 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2884 1 * target::kWordSize));
2885 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2886 1 * target::kWordSize));
2888
2889 __ Bind(&check_mint);
2890 __ CompareClassId(left, kMintCid, temp);
2891 __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
2892 __ CompareClassId(right, kMintCid, temp);
2894 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2895 0 * target::kWordSize));
2896 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2897 0 * target::kWordSize));
2899 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2900 1 * target::kWordSize));
2901 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2902 1 * target::kWordSize));
2904
2905 __ Bind(&reference_compare);
2906 __ cmpl(left, right);
2907 __ Bind(&done);
2908}
2909
2910// Called only from unoptimized code. All relevant registers have been saved.
2911// TOS + 0: return address
2912// TOS + 1: right argument.
2913// TOS + 2: left argument.
2914// Returns ZF set.
2915void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
2916#if !defined(PRODUCT)
2917 // Check single stepping.
2918 Label stepping, done_stepping;
2919 __ LoadIsolate(EAX);
2920 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2921 __ cmpl(EAX, Immediate(0));
2922 __ j(NOT_EQUAL, &stepping);
2923 __ Bind(&done_stepping);
2924#endif
2925
2926 const Register left = EAX;
2927 const Register right = EDX;
2928 const Register temp = ECX;
2929 __ movl(left, Address(ESP, 2 * target::kWordSize));
2930 __ movl(right, Address(ESP, 1 * target::kWordSize));
2931 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2932 __ ret();
2933
2934#if !defined(PRODUCT)
2935 __ Bind(&stepping);
2936 __ EnterStubFrame();
2937 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2938 __ LeaveFrame();
2939 __ jmp(&done_stepping);
2940#endif
2941}
2942
2943// Called from optimized code only.
2944// TOS + 0: return address
2945// TOS + 1: right argument.
2946// TOS + 2: left argument.
2947// Returns ZF set.
2948void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
2949 const Register left = EAX;
2950 const Register right = EDX;
2951 const Register temp = ECX;
2952 __ movl(left, Address(ESP, 2 * target::kWordSize));
2953 __ movl(right, Address(ESP, 1 * target::kWordSize));
2954 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2955 __ ret();
2956}
2957
2958// Called from megamorphic calls.
2959// EBX: receiver (passed to target)
2960// IC_DATA_REG: target::MegamorphicCache (preserved)
2961// Passed to target:
2962// EBX: target entry point
2963// FUNCTION_REG: target function
2964// ARGS_DESC_REG: argument descriptor
2965void StubCodeCompiler::GenerateMegamorphicCallStub() {
2966 // Jump if receiver is a smi.
2967 Label smi_case;
2968 // Check if object (in tmp) is a Smi.
2969 __ testl(EBX, Immediate(kSmiTagMask));
2970 // Jump out of line for smi case.
2971 __ j(ZERO, &smi_case, Assembler::kNearJump);
2972
2973 // Loads the cid of the instance.
2974 __ LoadClassId(EAX, EBX);
2975
2976 Label cid_loaded;
2977 __ Bind(&cid_loaded);
2978 __ pushl(EBX); // save receiver
2979 __ movl(EBX,
2981 __ movl(EDI, FieldAddress(IC_DATA_REG,
2983 // EDI: cache buckets array.
2984 // EBX: mask as a smi.
2985
2986 // Tag cid as a smi.
2987 __ addl(EAX, EAX);
2988
2989 // Compute the table index.
2991 // Use leal and subl multiply with 7 == 8 - 1.
2992 __ leal(EDX, Address(EAX, TIMES_8, 0));
2993 __ subl(EDX, EAX);
2994
2995 Label loop;
2996 __ Bind(&loop);
2997 __ andl(EDX, EBX);
2998
2999 const intptr_t base = target::Array::data_offset();
3000 Label probe_failed;
3001 // EDX is smi tagged, but table entries are two words, so TIMES_4.
3002 __ cmpl(EAX, FieldAddress(EDI, EDX, TIMES_4, base));
3003 __ j(NOT_EQUAL, &probe_failed, Assembler::kNearJump);
3004
3005 Label load_target;
3006 __ Bind(&load_target);
3007 // Call the target found in the cache. For a class id match, this is a
3008 // proper target for the given name and arguments descriptor. If the
3009 // illegal class id was found, the target is a cache miss handler that can
3010 // be invoked as a normal Dart function.
3011 __ movl(FUNCTION_REG,
3012 FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
3013 __ movl(ARGS_DESC_REG,
3014 FieldAddress(IC_DATA_REG,
3016 __ popl(EBX); // restore receiver
3018
3019 __ Bind(&probe_failed);
3020 // Probe failed, check if it is a miss.
3021 __ cmpl(FieldAddress(EDI, EDX, TIMES_4, base),
3022 Immediate(target::ToRawSmi(kIllegalCid)));
3023 Label miss;
3024 __ j(ZERO, &miss, Assembler::kNearJump);
3025
3026 // Try next entry in the table.
3027 __ AddImmediate(EDX, Immediate(target::ToRawSmi(1)));
3028 __ jmp(&loop);
3029
3030 // Load cid for the Smi case.
3031 __ Bind(&smi_case);
3032 __ movl(EAX, Immediate(kSmiCid));
3033 __ jmp(&cid_loaded);
3034
3035 __ Bind(&miss);
3036 __ popl(EBX); // restore receiver
3037 GenerateSwitchableCallMissStub();
3038}
3039
3040void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3041 __ int3(); // AOT only.
3042}
3043
3044void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3045 __ int3(); // AOT only.
3046}
3047
3048// Called from switchable IC calls.
3049// EBX: receiver
3050void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3051 __ movl(CODE_REG,
3053 __ EnterStubFrame();
3054 __ pushl(EBX); // Preserve receiver.
3055
3056 __ pushl(Immediate(0)); // Result slot.
3057 __ pushl(Immediate(0)); // Arg0: stub out.
3058 __ pushl(EBX); // Arg1: Receiver
3059 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3060 __ popl(ECX);
3061 __ popl(CODE_REG); // result = stub
3062 __ popl(ECX); // result = IC
3063
3064 __ popl(EBX); // Restore receiver.
3065 __ LeaveFrame();
3066
3067 __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3069 __ jmp(EAX);
3070}
3071
3072void StubCodeCompiler::GenerateSingleTargetCallStub() {
3073 __ int3(); // AOT only.
3074}
3075
3076static ScaleFactor GetScaleFactor(intptr_t size) {
3077 switch (size) {
3078 case 1:
3079 return TIMES_1;
3080 case 2:
3081 return TIMES_2;
3082 case 4:
3083 return TIMES_4;
3084 case 8:
3085 return TIMES_8;
3086 case 16:
3087 return TIMES_16;
3088 }
3089 UNREACHABLE();
3090 return static_cast<ScaleFactor>(0);
3091}
3092
3093void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3095 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3096 ScaleFactor scale_factor = GetScaleFactor(element_size);
3097
3100
3101 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3102 // Save length argument for possible runtime call, as
3103 // EAX is clobbered.
3104 Label call_runtime;
3106
3107 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, ECX));
3109 /* Check that length is a positive Smi. */
3110 /* EDI: requested array length argument. */
3111 __ testl(EDI, Immediate(kSmiTagMask));
3112 __ j(NOT_ZERO, &call_runtime);
3113 __ SmiUntag(EDI);
3114 /* Check for length >= 0 && length <= max_len. */
3115 /* EDI: untagged array length. */
3116 __ cmpl(EDI, Immediate(max_len));
3117 __ j(ABOVE, &call_runtime);
3118 /* Special case for scaling by 16. */
3119 if (scale_factor == TIMES_16) {
3120 /* double length of array. */
3121 __ addl(EDI, EDI);
3122 /* only scale by 8. */
3123 scale_factor = TIMES_8;
3124 }
3125
3126 const intptr_t fixed_size_plus_alignment_padding =
3129 __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding));
3131 __ movl(EAX, Address(THR, target::Thread::top_offset()));
3132 __ movl(EBX, EAX);
3133 /* EDI: allocation size. */
3134 __ addl(EBX, EDI);
3135 __ j(CARRY, &call_runtime);
3136
3137 /* Check if the allocation fits into the remaining space. */
3138 /* EAX: potential new object start. */
3139 /* EBX: potential next object start. */
3140 /* EDI: allocation size. */
3141 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
3142 __ j(ABOVE_EQUAL, &call_runtime);
3143 __ CheckAllocationCanary(EAX);
3144
3145 /* Successfully allocated the object(s), now update top to point to */
3146 /* next object start and initialize the object. */
3147 __ movl(Address(THR, target::Thread::top_offset()), EBX);
3148 __ addl(EAX, Immediate(kHeapObjectTag));
3149
3150 /* Initialize the tags. */
3151 /* EAX: new object start as a tagged pointer. */
3152 /* EBX: new object end address. */
3153 /* EDI: allocation size. */
3154 {
3155 Label size_tag_overflow, done;
3157 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
3161 __ Bind(&size_tag_overflow);
3162 __ movl(EDI, Immediate(0));
3163 __ Bind(&done);
3164 /* Get the class index and insert it into the tags. */
3165 uword tags =
3166 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3167 __ orl(EDI, Immediate(tags));
3168 __ movl(FieldAddress(EAX, target::Object::tags_offset()),
3169 EDI); /* Tags. */
3170 }
3171
3172 /* Set the length field. */
3173 /* EAX: new object start as a tagged pointer. */
3174 /* EBX: new object end address. */
3175 __ popl(EDI); /* Array length. */
3176 __ StoreIntoObjectNoBarrier(
3177 EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI);
3178
3179 /* Initialize all array elements to 0. */
3180 /* EAX: new object start as a tagged pointer. */
3181 /* EBX: new object end address. */
3182 /* EDI: iterator which initially points to the start of the variable */
3183 /* ECX: scratch register. */
3184 /* data area to be initialized. */
3185 __ xorl(ECX, ECX); /* Zero. */
3186 __ leal(EDI, FieldAddress(EAX, target::TypedData::HeaderSize()));
3187 __ StoreInternalPointer(
3188 EAX, FieldAddress(EAX, target::PointerBase::data_offset()), EDI);
3189 Label loop;
3190 __ Bind(&loop);
3191 for (intptr_t offset = 0; offset < target::kObjectAlignment;
3193 __ movl(Address(EDI, offset), ECX);
3194 }
3195 // Safe to only check every kObjectAlignment bytes instead of each word.
3197 __ addl(EDI, Immediate(target::kObjectAlignment));
3198 __ cmpl(EDI, EBX);
3199 __ j(UNSIGNED_LESS, &loop);
3200 __ WriteAllocationCanary(EBX); // Fix overshoot.
3201
3202 __ ret();
3203
3204 __ Bind(&call_runtime);
3206 }
3207
3208 __ EnterStubFrame();
3209 __ PushObject(Object::null_object()); // Make room for the result.
3210 __ pushl(Immediate(target::ToRawSmi(cid)));
3212 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3213 __ Drop(2); // Drop arguments.
3215 __ LeaveStubFrame();
3216 __ ret();
3217}
3218
3219} // namespace compiler
3220
3221} // namespace dart
3222
3223#endif // defined(TARGET_ARCH_IA32)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition: assert.h:313
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
virtual bool WillAllocateNewOrRemembered() const
Definition: il.h:7451
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageSize
static intptr_t ActivationFrameAlignment()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxLinearCacheSize
Definition: object.h:7912
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
static Address VMTagAddress()
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
void GenerateNArgsCheckInlineCacheStubForEntryKind(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness, CodeEntryKind entry_kind)
static word type_arguments_offset()
static const word kMaxNewSpaceElements
Definition: runtime_api.h:595
static bool TraceAllocation(const dart::Class &klass)
Definition: runtime_api.cc:543
static intptr_t NumTypeArguments(const dart::Class &klass)
Definition: runtime_api.cc:530
static uword GetInstanceSize(const dart::Class &handle)
Definition: runtime_api.cc:515
static const word kNoTypeArguments
Definition: runtime_api.h:486
static classid_t GetId(const dart::Class &handle)
Definition: runtime_api.cc:441
static intptr_t TypeArgumentsFieldOffset(const dart::Class &klass)
Definition: runtime_api.cc:539
static word delayed_type_arguments_offset()
static word function_type_arguments_offset()
static word instantiator_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word variable_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static bool IsAllocatableInNewSpace(intptr_t instance_size)
static word ExactnessIndexFor(word num_args)
Definition: runtime_api.cc:615
static word TestEntryLengthFor(word num_args, bool exactness_check)
Definition: runtime_api.cc:619
static word receivers_static_type_offset()
static word TargetIndexFor(word num_args)
Definition: runtime_api.cc:611
static word CountIndexFor(word num_args)
Definition: runtime_api.cc:607
static const word kNumCallWrapperArguments
Definition: runtime_api.h:1513
static word original_top_offset()
static const word kBytesPerCardLog2
Definition: runtime_api.h:1487
static const word kInstanceDelayedFunctionTypeArguments
Definition: runtime_api.h:1440
static const word kInstanceParentFunctionTypeArguments
Definition: runtime_api.h:1439
static word auto_scope_native_wrapper_entry_point_offset()
static word OffsetFromThread(const dart::Object &object)
Definition: runtime_api.cc:927
static word active_exception_offset()
static word exit_through_ffi_offset()
static uword exit_through_runtime_call()
Definition: runtime_api.cc:919
static word new_marking_stack_block_offset()
static word invoke_dart_code_stub_offset()
static word no_scope_native_wrapper_entry_point_offset()
static word top_exit_frame_info_offset()
static word switchable_call_miss_stub_offset()
static word store_buffer_block_offset()
static word write_barrier_entry_point_offset()
static word active_stacktrace_offset()
static word bootstrap_native_wrapper_entry_point_offset()
static word write_barrier_mask_offset()
static word call_to_runtime_stub_offset()
static word execution_state_offset()
static word old_marking_stack_block_offset()
static const word kGenerationalBarrierMask
Definition: runtime_api.h:434
#define UNIMPLEMENTED
#define ASSERT(E)
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr word kBitsPerWordLog2
Definition: runtime_api.h:290
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
static constexpr intptr_t kObjectAlignment
Definition: runtime_api.h:313
FrameLayout frame_layout
Definition: stack_frame.cc:76
word TypedDataMaxNewSpaceElements(classid_t cid)
Definition: runtime_api.cc:255
word TypedDataElementSizeInBytes(classid_t cid)
Definition: runtime_api.cc:251
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
const Object & NullObject()
Definition: runtime_api.cc:149
const Code & StubCodeAllocateArray()
Definition: runtime_api.cc:294
Definition: dart_vm.cc:33
const Register kWriteBarrierSlotReg
@ TIMES_16
const Register THR
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
constexpr intptr_t kIntptrMin
Definition: globals.h:556
static constexpr uword kZapReturnAddress
@ kIllegalCid
Definition: class_id.h:214
uintptr_t uword
Definition: globals.h:501
void DLRT_ExitTemporaryIsolate()
const Register CODE_REG
@ OVERFLOW
@ GREATER_EQUAL
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ LESS_EQUAL
@ UNSIGNED_LESS
@ NOT_EQUAL
@ ABOVE_EQUAL
Definition: constants_x86.h:16
@ POSITIVE
Definition: constants_x86.h:33
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
static constexpr bool IsArgumentRegister(Register reg)
Definition: constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
const Register kStackTraceObjectReg
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
const Register SPREG
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const int kFpuRegisterSize
@ kLazyDeoptFromThrow
@ kLazyDeoptFromReturn
@ kNumberOfXmmRegisters
def call(args)
Definition: dom.py:159
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static bool is_linear(SkPoint p0, SkPoint p1, SkPoint p2)
Definition: update.py:1
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kNewObjectBitPosition
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg