Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
stub_code_compiler_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6
7// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9
10#define SHOULD_NOT_INCLUDE_RUNTIME
11
13
14#if defined(TARGET_ARCH_IA32)
15
16#include "vm/class_id.h"
17#include "vm/code_entry_kind.h"
21#include "vm/constants.h"
23#include "vm/instructions.h"
25#include "vm/tags.h"
26
27#define __ assembler->
28
29namespace dart {
30namespace compiler {
31
32// Ensures that [EAX] is a new object, if not it will be added to the remembered
33// set via a leaf runtime call.
34//
35// WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
36// The caller should simply call LeaveFrame() and return.
38 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
39 // the remembered set and/or deferred marking worklist. This test assumes a
40 // Page's TLAB use is always ascending.
41 Label done;
42 __ AndImmediate(ECX, EAX, target::kPageMask);
43 __ LoadFromOffset(ECX, ECX, target::Page::original_top_offset());
44 __ CompareRegisters(EAX, ECX);
45 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
46
47 {
48 LeafRuntimeScope rt(assembler,
49 /*frame_size=*/2 * target::kWordSize,
50 /*preserve_registers=*/false);
51 __ movl(Address(ESP, 1 * target::kWordSize), THR);
52 __ movl(Address(ESP, 0 * target::kWordSize), EAX);
53 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
54 }
55
56 __ Bind(&done);
57}
58
59// Input parameters:
60// ESP : points to return address.
61// ESP + 4 : address of last argument in argument array.
62// ESP + 4*EDX : address of first argument in argument array.
63// ESP + 4*EDX + 4 : address of return value.
64// ECX : address of the runtime function to call.
65// EDX : number of arguments to the call.
66// Must preserve callee saved registers EDI and EBX.
67void StubCodeCompiler::GenerateCallToRuntimeStub() {
68 const intptr_t thread_offset = target::NativeArguments::thread_offset();
69 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
70 const intptr_t argv_offset = target::NativeArguments::argv_offset();
71 const intptr_t retval_offset = target::NativeArguments::retval_offset();
72
73 __ movl(CODE_REG,
74 Address(THR, target::Thread::call_to_runtime_stub_offset()));
75 __ EnterStubFrame();
76
77 // Save exit frame information to enable stack walking as we are about
78 // to transition to Dart VM C++ code.
79 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
80
81 // Mark that the thread exited generated code through a runtime call.
82 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
83 Immediate(target::Thread::exit_through_runtime_call()));
84
85#if defined(DEBUG)
86 {
87 Label ok;
88 // Check that we are always entering from Dart code.
89 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
91 __ Stop("Not coming from Dart code.");
92 __ Bind(&ok);
93 }
94#endif
95
96 // Mark that the thread is executing VM code.
98
99 // Reserve space for arguments and align frame before entering C++ world.
100 __ AddImmediate(
101 ESP,
102 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize())));
104 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
105 }
106
107 // Pass NativeArguments structure by value and call runtime.
108 __ movl(Address(ESP, thread_offset), THR); // Set thread in NativeArgs.
109 __ movl(Address(ESP, argc_tag_offset), EDX); // Set argc in NativeArguments.
110 // Compute argv.
111 __ leal(EAX,
112 Address(EBP, EDX, TIMES_4,
113 target::frame_layout.param_end_from_fp * target::kWordSize));
114 __ movl(Address(ESP, argv_offset), EAX); // Set argv in NativeArguments.
115 __ addl(EAX,
116 Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
117 __ movl(Address(ESP, retval_offset), EAX); // Set retval in NativeArguments.
118 __ call(ECX);
119
120 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
121
122 // Mark that the thread has not exited generated Dart code.
123 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
124 Immediate(0));
125
126 // Reset exit frame information in Isolate's mutator thread structure.
127 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
128 Immediate(0));
129
130 __ LeaveFrame();
131
132 // The following return can jump to a lazy-deopt stub, which assumes EAX
133 // contains a return value and will save it in a GC-visible way. We therefore
134 // have to ensure EAX does not contain any garbage value left from the C
135 // function we called (which has return type "void").
136 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
137 __ xorl(EAX, EAX);
138 __ ret();
139}
140
141void StubCodeCompiler::GenerateEnterSafepointStub() {
142 __ pushal();
143 __ subl(SPREG, Immediate(8));
144 __ movsd(Address(SPREG, 0), XMM0);
145
146 __ EnterFrame(0);
147 __ ReserveAlignedFrameSpace(0);
148 __ movl(EAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
149 __ call(EAX);
150 __ LeaveFrame();
151
152 __ movsd(XMM0, Address(SPREG, 0));
153 __ addl(SPREG, Immediate(8));
154 __ popal();
155 __ ret();
156}
157
158static void GenerateExitSafepointStubCommon(Assembler* assembler,
159 uword runtime_entry_offset) {
160 __ pushal();
161 __ subl(SPREG, Immediate(8));
162 __ movsd(Address(SPREG, 0), XMM0);
163
164 __ EnterFrame(0);
165 __ ReserveAlignedFrameSpace(0);
166
167 // Set the execution state to VM while waiting for the safepoint to end.
168 // This isn't strictly necessary but enables tests to check that we're not
169 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
170 __ movl(Address(THR, target::Thread::execution_state_offset()),
171 Immediate(target::Thread::vm_execution_state()));
172
173 __ movl(EAX, Address(THR, runtime_entry_offset));
174 __ call(EAX);
175 __ LeaveFrame();
176
177 __ movsd(XMM0, Address(SPREG, 0));
178 __ addl(SPREG, Immediate(8));
179 __ popal();
180 __ ret();
181}
182
183void StubCodeCompiler::GenerateExitSafepointStub() {
184 GenerateExitSafepointStubCommon(
185 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
186}
187
188void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
189 GenerateExitSafepointStubCommon(
190 assembler,
191 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
192}
193
194void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
195 Register dst,
196 Register tmp) {
197 // Only used in AOT.
198 __ Breakpoint();
199}
200
201// Calls a native function inside a safepoint.
202//
203// On entry:
204// Stack: set up for native call
205// EAX: target to call
206//
207// On exit:
208// Stack: preserved
209// EBX: clobbered (even though it's normally callee-saved)
210void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
211 __ popl(EBX);
212
213 __ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
214 __ TransitionGeneratedToNative(EAX, FPREG, ECX /*volatile*/,
215 /*enter_safepoint=*/true);
216 __ call(EAX);
217 __ TransitionNativeToGenerated(ECX /*volatile*/, /*leave_safepoint=*/true);
218
219 __ jmp(EBX);
220}
221
222void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
223 Label ret_4;
224
225 // EAX is volatile and doesn't hold any arguments.
227
228 Label body, load_tramp_addr;
229 const intptr_t kCallLength = 5;
231 ++i) {
232 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
233 // look up the current PC, then jump to the shared section. There's no easy
234 // way to get the PC in ia32 so we have to do a call, grab the return adress
235 // from the stack, then return here (mismatched call/ret causes problems),
236 // then jump to the shared section.
237 const intptr_t size_before = __ CodeSize();
238 __ call(&load_tramp_addr);
239 const intptr_t size_after = __ CodeSize();
240 ASSERT_EQUAL(size_after - size_before, kCallLength);
241 __ jmp(&body);
242 }
243
244 ASSERT_EQUAL(__ CodeSize(),
245 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
247
248 const intptr_t shared_stub_start = __ CodeSize();
249
250 __ Bind(&load_tramp_addr);
251 // Load the return adress into EAX, and subtract the size of the call
252 // instruction. This is our original trampoline address.
253 __ movl(EAX, Address(SPREG, 0));
254 __ subl(EAX, Immediate(kCallLength));
255 __ ret();
256
257 __ Bind(&body);
258
259 // Save THR and EBX which are callee-saved.
260 __ pushl(THR);
261 __ pushl(EBX);
262
263 // THR & return address
264 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
265
266 // Load the thread, verify the callback ID and exit the safepoint.
267 //
268 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to safe
269 // code size on this shared stub.
270 {
271 __ EnterFrame(0);
272 // entry_point, trampoline_type, &trampoline_type, &entry_point, trampoline
273 // ^------ GetFfiCallbackMetadata args ------^
274 __ ReserveAlignedFrameSpace(5 * target::kWordSize);
275
276 // Trampoline arg.
277 __ movl(Address(SPREG, 0 * target::kWordSize), EAX);
278
279 // Pointer to trampoline type stack slot.
280 __ movl(EAX, SPREG);
281 __ addl(EAX, Immediate(3 * target::kWordSize));
282 __ movl(Address(SPREG, 2 * target::kWordSize), EAX);
283
284 // Pointer to entry point stack slot.
285 __ addl(EAX, Immediate(target::kWordSize));
286 __ movl(Address(SPREG, 1 * target::kWordSize), EAX);
287
288 __ movl(EAX,
289 Immediate(reinterpret_cast<int64_t>(DLRT_GetFfiCallbackMetadata)));
290 __ call(EAX);
291 __ movl(THR, EAX);
292
293 // Save the trampoline type in EBX, and the entry point in ECX.
294 __ movl(EBX, Address(SPREG, 3 * target::kWordSize));
295 __ movl(ECX, Address(SPREG, 4 * target::kWordSize));
296
297 __ LeaveFrame();
298
299 // Save the trampoline type to the stack, because we'll need it after the
300 // call to decide whether to ret() or ret(4).
301 __ pushl(EBX);
302 }
303
306
307 Label async_callback;
308 Label done;
309
310 // If GetFfiCallbackMetadata returned a null thread, it means that the async
311 // callback was invoked after it was deleted. In this case, do nothing.
312 __ cmpl(THR, Immediate(0));
314
315 // Check the trampoline type to see how the callback should be invoked.
316 __ cmpl(EBX, Immediate(static_cast<uword>(
319
320 // Sync callback. The entry point contains the target function, so just call
321 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
322 // re-enter it afterwards.
323
324 // On entry to the function, there will be two extra slots on the stack:
325 // the saved THR and the return address. The target will know to skip them.
326 __ call(ECX);
327
328 // Takes care to not clobber *any* registers (besides scratch).
329 __ EnterFullSafepoint(/*scratch=*/ECX);
330
331 // Pop the trampoline type into ECX.
332 __ popl(ECX);
333
334 // Restore callee-saved registers.
335 __ popl(EBX);
336 __ popl(THR);
337
338 __ cmpl(ECX, Immediate(static_cast<uword>(
341 __ ret();
342
343 __ Bind(&ret_4);
344 __ ret(Immediate(4));
345
346 __ Bind(&async_callback);
347
348 // Async callback. The entrypoint marshals the arguments into a message and
349 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
350 // entered a temporary isolate, so exit it afterwards.
351
352 // On entry to the function, there will be two extra slots on the stack:
353 // the saved THR and the return address. The target will know to skip them.
354 __ call(ECX);
355
356 // Exit the temporary isolate.
357 {
358 __ EnterFrame(0);
359 __ ReserveAlignedFrameSpace(0);
360
361 __ movl(EAX,
362 Immediate(reinterpret_cast<int64_t>(DLRT_ExitTemporaryIsolate)));
363 __ CallCFunction(EAX);
364
365 __ LeaveFrame();
366 }
367
368 __ Bind(&done);
369
370 // Pop the trampoline type into ECX.
371 __ popl(ECX);
372
373 // Restore callee-saved registers.
374 __ popl(EBX);
375 __ popl(THR);
376
377 // Stack delta is always 0 for async callbacks.
378 __ ret();
379
380 // 'kNativeCallbackSharedStubSize' is an upper bound because the exact
381 // instruction size can vary slightly based on OS calling conventions.
382 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
383 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
385
386#if defined(DEBUG)
387 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
388 __ Breakpoint();
389 }
390#endif
391}
392
393void StubCodeCompiler::GenerateSharedStubGeneric(
394 bool save_fpu_registers,
395 intptr_t self_code_stub_offset_from_thread,
396 bool allow_return,
397 std::function<void()> perform_runtime_call) {
398 // Only used in AOT.
399 __ Breakpoint();
400}
401
402void StubCodeCompiler::GenerateSharedStub(
403 bool save_fpu_registers,
404 const RuntimeEntry* target,
405 intptr_t self_code_stub_offset_from_thread,
406 bool allow_return,
407 bool store_runtime_result_in_result_register) {
408 // Only used in AOT.
409 __ Breakpoint();
410}
411
412void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
413 // Only used in AOT.
414 __ Breakpoint();
415}
416
417void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
418 // Only used in AOT.
419 __ Breakpoint();
420}
421
422void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
423 // Only used in AOT.
424 __ Breakpoint();
425}
426
427// Input parameters:
428// ESP : points to return address.
429// ESP + 4 : address of return value.
430// EAX : address of first argument in argument array.
431// ECX : address of the native function to call.
432// EDX : argc_tag including number of arguments and function kind.
433static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
434 Address wrapper_address) {
435 const intptr_t native_args_struct_offset =
436 target::NativeEntry::kNumCallWrapperArguments * target::kWordSize;
437 const intptr_t thread_offset =
438 target::NativeArguments::thread_offset() + native_args_struct_offset;
439 const intptr_t argc_tag_offset =
440 target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
441 const intptr_t argv_offset =
442 target::NativeArguments::argv_offset() + native_args_struct_offset;
443 const intptr_t retval_offset =
444 target::NativeArguments::retval_offset() + native_args_struct_offset;
445
446 __ EnterStubFrame();
447
448 // Save exit frame information to enable stack walking as we are about
449 // to transition to dart VM code.
450 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()), EBP);
451
452 // Mark that the thread exited generated code through a runtime call.
453 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
454 Immediate(target::Thread::exit_through_runtime_call()));
455
456#if defined(DEBUG)
457 {
458 Label ok;
459 // Check that we are always entering from Dart code.
460 __ cmpl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
462 __ Stop("Not coming from Dart code.");
463 __ Bind(&ok);
464 }
465#endif
466
467 // Mark that the thread is executing native code.
469
470 // Reserve space for the native arguments structure, the outgoing parameters
471 // (pointer to the native arguments structure, the C function entry point)
472 // and align frame before entering the C++ world.
473 __ AddImmediate(
474 ESP,
475 Immediate(-static_cast<int32_t>(target::NativeArguments::StructSize()) -
476 (2 * target::kWordSize)));
478 __ andl(ESP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
479 }
480
481 // Pass NativeArguments structure by value and call native function.
482 // Set thread in NativeArgs.
483 __ movl(Address(ESP, thread_offset), THR);
484 // Set argc in NativeArguments.
485 __ movl(Address(ESP, argc_tag_offset), EDX);
486 // Set argv in NativeArguments.
487 __ movl(Address(ESP, argv_offset), EAX);
488 // Compute return value addr.
489 __ leal(EAX, Address(EBP, (target::frame_layout.param_end_from_fp + 1) *
490 target::kWordSize));
491 // Set retval in NativeArguments.
492 __ movl(Address(ESP, retval_offset), EAX);
493 // Pointer to the NativeArguments.
494 __ leal(EAX, Address(ESP, 2 * target::kWordSize));
495 // Pass the pointer to the NativeArguments.
496 __ movl(Address(ESP, 0), EAX);
497
498 __ movl(Address(ESP, target::kWordSize), ECX); // Function to call.
499 __ call(wrapper_address);
500
501 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
502
503 // Mark that the thread has not exited generated Dart code.
504 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
505 Immediate(0));
506
507 // Reset exit frame information in Isolate's mutator thread structure.
508 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
509 Immediate(0));
510
511 __ LeaveFrame();
512 __ ret();
513}
514
515void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
516 GenerateCallNativeWithWrapperStub(
517 assembler,
518 Address(THR,
519 target::Thread::no_scope_native_wrapper_entry_point_offset()));
520}
521
522void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
523 GenerateCallNativeWithWrapperStub(
524 assembler,
525 Address(THR,
526 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
527}
528
529// Input parameters:
530// ESP : points to return address.
531// ESP + 4 : address of return value.
532// EAX : address of first argument in argument array.
533// ECX : address of the native function to call.
534// EDX : argc_tag including number of arguments and function kind.
535void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
536 GenerateCallNativeWithWrapperStub(
537 assembler,
538 Address(THR,
539 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
540}
541
542// Input parameters:
543// ARGS_DESC_REG: arguments descriptor array.
544void StubCodeCompiler::GenerateCallStaticFunctionStub() {
545 __ EnterStubFrame();
546 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
547 __ pushl(Immediate(0)); // Setup space on stack for return value.
548 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
549 __ popl(EAX); // Get Code object result.
550 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
551 // Remove the stub frame as we are about to jump to the dart function.
552 __ LeaveFrame();
553
554 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
555}
556
557// Called from a static call only when an invalid code has been entered
558// (invalid because its function was optimized or deoptimized).
559// ARGS_DESC_REG: arguments descriptor array.
560void StubCodeCompiler::GenerateFixCallersTargetStub() {
561 Label monomorphic;
562 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
563
564 // This was a static call.
565 __ EnterStubFrame();
566 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
567 __ pushl(Immediate(0)); // Setup space on stack for return value.
568 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
569 __ popl(EAX); // Get Code object.
570 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
571 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
572 __ LeaveFrame();
573 __ jmp(EAX);
574 __ int3();
575
576 __ Bind(&monomorphic);
577 // This was a switchable call.
578 __ EnterStubFrame();
579 __ pushl(Immediate(0)); // Result slot.
580 __ pushl(EBX); // Preserve receiver.
581 __ pushl(ECX); // Old cache value (also 2nd return value).
582 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
583 __ popl(ECX); // Get target cache object.
584 __ popl(EBX); // Restore receiver.
585 __ popl(CODE_REG); // Get target Code object.
586 __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
588 __ LeaveFrame();
589 __ jmp(EAX);
590 __ int3();
591}
592
593// Called from object allocate instruction when the allocation stub has been
594// disabled.
595void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
596 __ EnterStubFrame();
597 __ pushl(Immediate(0)); // Setup space on stack for return value.
598 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
599 __ popl(EAX); // Get Code object.
600 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
601 __ LeaveFrame();
602 __ jmp(EAX);
603 __ int3();
604}
605
606// Called from object allocate instruction when the allocation stub for a
607// generic class has been disabled.
608void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
609 __ EnterStubFrame();
610 // Preserve type arguments register.
612 __ pushl(Immediate(0)); // Setup space on stack for return value.
613 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
614 __ popl(EAX); // Get Code object.
615 // Restore type arguments register.
617 __ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
618 __ LeaveFrame();
619 __ jmp(EAX);
620 __ int3();
621}
622
623// Input parameters:
624// EDX: smi-tagged argument count, may be zero.
625// EBP[target::frame_layout.param_end_from_fp + 1]: last argument.
626// Uses EAX, EBX, ECX, EDX, EDI.
627static void PushArrayOfArguments(Assembler* assembler) {
628 // Allocate array to store arguments of caller.
629 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
630 __ movl(ECX, raw_null); // Null element type for raw Array.
632 __ SmiUntag(EDX);
633 // EAX: newly allocated array.
634 // EDX: length of the array (was preserved by the stub).
635 __ pushl(EAX); // Array is in EAX and on top of stack.
636 __ leal(EBX,
637 Address(EBP, EDX, TIMES_4,
638 target::frame_layout.param_end_from_fp * target::kWordSize));
639 __ leal(ECX, FieldAddress(EAX, target::Array::data_offset()));
640 // EBX: address of first argument on stack.
641 // ECX: address of first argument in array.
642 Label loop, loop_condition;
643 __ jmp(&loop_condition, Assembler::kNearJump);
644 __ Bind(&loop);
645 __ movl(EDI, Address(EBX, 0));
646 // Generational barrier is needed, array is not necessarily in new space.
647 __ StoreIntoObject(EAX, Address(ECX, 0), EDI);
648 __ AddImmediate(ECX, Immediate(target::kWordSize));
649 __ AddImmediate(EBX, Immediate(-target::kWordSize));
650 __ Bind(&loop_condition);
651 __ decl(EDX);
653}
654
655// Used by eager and lazy deoptimization. Preserve result in EAX if necessary.
656// This stub translates optimized frame into unoptimized frame. The optimized
657// frame can contain values in registers and on stack, the unoptimized
658// frame contains all values on stack.
659// Deoptimization occurs in following steps:
660// - Push all registers that can contain values.
661// - Call C routine to copy the stack and saved registers into temporary buffer.
662// - Adjust caller's frame to correct unoptimized frame size.
663// - Fill the unoptimized frame.
664// - Materialize objects that require allocation (e.g. Double instances).
665// GC can occur only after frame is fully rewritten.
666// Stack after EnterDartFrame(0) below:
667// +------------------+
668// | PC marker | <- TOS
669// +------------------+
670// | Saved FP | <- FP of stub
671// +------------------+
672// | return-address | (deoptimization point)
673// +------------------+
674// | ... | <- SP of optimized frame
675//
676// Parts of the code cannot GC, part of the code can GC.
677static void GenerateDeoptimizationSequence(Assembler* assembler,
678 DeoptStubKind kind) {
679 // Leaf runtime function DeoptimizeCopyFrame expects a Dart frame.
680 __ EnterDartFrame(0);
681 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
682 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
683 const intptr_t saved_result_slot_from_fp =
684 target::frame_layout.first_local_from_fp + 1 -
686 const intptr_t saved_exception_slot_from_fp =
687 target::frame_layout.first_local_from_fp + 1 -
689 const intptr_t saved_stacktrace_slot_from_fp =
690 target::frame_layout.first_local_from_fp + 1 -
692 // Result in EAX is preserved as part of pushing all registers below.
693
694 // Push registers in their enumeration order: lowest register number at
695 // lowest address.
696 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
697 if (i == CODE_REG) {
698 // Save the original value of CODE_REG pushed before invoking this stub
699 // instead of the value used to call this stub.
700 __ pushl(Address(EBP, 2 * target::kWordSize));
701 } else {
702 __ pushl(static_cast<Register>(i));
703 }
704 }
705 __ subl(ESP, Immediate(kNumberOfXmmRegisters * kFpuRegisterSize));
706 intptr_t offset = 0;
707 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
708 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
709 __ movups(Address(ESP, offset), xmm_reg);
711 }
712
713 {
714 __ movl(ECX, ESP); // Preserve saved registers block.
715 LeafRuntimeScope rt(assembler,
716 /*frame_size=*/2 * target::kWordSize,
717 /*preserve_registers=*/false);
718 bool is_lazy =
719 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
720 __ movl(Address(ESP, 0 * target::kWordSize),
721 ECX); // Start of register block.
722 __ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
723 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
724 // Result (EAX) is stack-size (FP - SP) in bytes.
725 }
726
727 if (kind == kLazyDeoptFromReturn) {
728 // Restore result into EBX temporarily.
729 __ movl(EBX, Address(EBP, saved_result_slot_from_fp * target::kWordSize));
730 } else if (kind == kLazyDeoptFromThrow) {
731 // Restore result into EBX temporarily.
732 __ movl(EBX,
733 Address(EBP, saved_exception_slot_from_fp * target::kWordSize));
734 __ movl(ECX,
735 Address(EBP, saved_stacktrace_slot_from_fp * target::kWordSize));
736 }
737
738 __ LeaveDartFrame();
739 __ popl(EDX); // Preserve return address.
740 __ movl(ESP, EBP); // Discard optimized frame.
741 __ subl(ESP, EAX); // Reserve space for deoptimized frame.
742 __ pushl(EDX); // Restore return address.
743
744 // Leaf runtime function DeoptimizeFillFrame expects a Dart frame.
745 __ EnterDartFrame(0);
746 if (kind == kLazyDeoptFromReturn) {
747 __ pushl(EBX); // Preserve result as first local.
748 } else if (kind == kLazyDeoptFromThrow) {
749 __ pushl(EBX); // Preserve exception as first local.
750 __ pushl(ECX); // Preserve stacktrace as first local.
751 }
752 {
753 LeafRuntimeScope rt(assembler,
754 /*frame_size=*/1 * target::kWordSize,
755 /*preserve_registers=*/false);
756 __ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
757 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
758 }
759 if (kind == kLazyDeoptFromReturn) {
760 // Restore result into EBX.
761 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
762 target::kWordSize));
763 } else if (kind == kLazyDeoptFromThrow) {
764 // Restore result into EBX.
765 __ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
766 target::kWordSize));
767 __ movl(ECX, Address(EBP, (target::frame_layout.first_local_from_fp - 1) *
768 target::kWordSize));
769 }
770 // Code above cannot cause GC.
771 __ LeaveDartFrame();
772
773 // Frame is fully rewritten at this point and it is safe to perform a GC.
774 // Materialize any objects that were deferred by FillFrame because they
775 // require allocation.
776 __ EnterStubFrame();
777 if (kind == kLazyDeoptFromReturn) {
778 __ pushl(EBX); // Preserve result, it will be GC-d here.
779 } else if (kind == kLazyDeoptFromThrow) {
780 // Preserve CODE_REG for one more runtime call.
781 __ pushl(CODE_REG);
782 __ pushl(EBX); // Preserve exception, it will be GC-d here.
783 __ pushl(ECX); // Preserve stacktrace, it will be GC-d here.
784 }
785 __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
786 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
787 // Result tells stub how many bytes to remove from the expression stack
788 // of the bottom-most frame. They were used as materialization arguments.
789 __ popl(EBX);
790 __ SmiUntag(EBX);
791 if (kind == kLazyDeoptFromReturn) {
792 __ popl(EAX); // Restore result.
793 } else if (kind == kLazyDeoptFromThrow) {
794 __ popl(EDX); // Restore stacktrace.
795 __ popl(EAX); // Restore exception.
796 __ popl(CODE_REG);
797 }
798 __ LeaveStubFrame();
799
800 __ popl(ECX); // Pop return address.
801 __ addl(ESP, EBX); // Remove materialization arguments.
802 __ pushl(ECX); // Push return address.
803 // The caller is responsible for emitting the return instruction.
804
805 if (kind == kLazyDeoptFromThrow) {
806 // Unoptimized frame is now ready to accept the exception. Rethrow it to
807 // find the right handler. Ask rethrow machinery to bypass debugger it
808 // was already notified about this exception.
809 __ EnterStubFrame();
810 __ pushl(Immediate(target::ToRawSmi(0))); // Space for the result.
811 __ pushl(EAX); // Exception
812 __ pushl(EDX); // Stacktrace
813 __ pushl(Immediate(target::ToRawSmi(1))); // Bypass debugger.
814 __ CallRuntime(kReThrowRuntimeEntry, 3);
815 __ LeaveStubFrame();
816 }
817}
818
819// EAX: result, must be preserved
820void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
821 // Return address for "call" to deopt stub.
822 __ pushl(Immediate(kZapReturnAddress));
823 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
824 __ ret();
825}
826
827// EAX: exception, must be preserved
828// EDX: stacktrace, must be preserved
829void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
830 // Return address for "call" to deopt stub.
831 __ pushl(Immediate(kZapReturnAddress));
832 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
833 __ ret();
834}
835
836void StubCodeCompiler::GenerateDeoptimizeStub() {
837 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
838 __ ret();
839}
840
841static void GenerateNoSuchMethodDispatcherCode(Assembler* assembler) {
842 __ EnterStubFrame();
843 __ movl(EDX, FieldAddress(
844 ECX, target::CallSiteData::arguments_descriptor_offset()));
845
846 // Load the receiver.
847 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
848 __ movl(EAX,
849 Address(EBP, EDI, TIMES_HALF_WORD_SIZE,
850 target::frame_layout.param_end_from_fp * target::kWordSize));
851 __ pushl(Immediate(0)); // Setup space on stack for result.
852 __ pushl(EAX); // Receiver.
853 __ pushl(ECX); // ICData/MegamorphicCache.
854 __ pushl(EDX); // Arguments descriptor array.
855
856 // Adjust arguments count.
857 __ cmpl(
858 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
859 Immediate(0));
860 __ movl(EDX, EDI);
861 Label args_count_ok;
862 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
863 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
864 __ Bind(&args_count_ok);
865
866 // EDX: Smi-tagged arguments array length.
867 PushArrayOfArguments(assembler);
868 const intptr_t kNumArgs = 4;
869 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
870 __ Drop(4);
871 __ popl(EAX); // Return value.
872 __ LeaveFrame();
873 __ ret();
874}
875
876static void GenerateDispatcherCode(Assembler* assembler,
877 Label* call_target_function) {
878 __ Comment("NoSuchMethodDispatch");
879 // When lazily generated invocation dispatchers are disabled, the
880 // miss-handler may return null.
881 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
882 __ cmpl(EAX, raw_null);
883 __ j(NOT_EQUAL, call_target_function);
884 GenerateNoSuchMethodDispatcherCode(assembler);
885}
886
887void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
888 GenerateNoSuchMethodDispatcherCode(assembler);
889}
890
891// Called for inline allocation of arrays.
892// Input registers (preserved):
893// AllocateArrayABI::kLengthReg: array length as Smi.
894// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
895// Output registers:
896// AllocateArrayABI::kResultReg: newly allocated array.
897// Clobbered:
898// EBX, EDI
899void StubCodeCompiler::GenerateAllocateArrayStub() {
900 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
901 Label slow_case;
902 // Compute the size to be allocated, it is based on the array length
903 // and is computed as:
904 // RoundedAllocationSize(
905 // (array_length * kwordSize) + target::Array::header_size()).
906 // Assert that length is a Smi.
908 __ j(NOT_ZERO, &slow_case);
909
910 // Check for maximum allowed length.
911 const Immediate& max_len =
912 Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
913 __ cmpl(AllocateArrayABI::kLengthReg, max_len);
914 __ j(ABOVE, &slow_case);
915
916 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case,
918
919 const intptr_t fixed_size_plus_alignment_padding =
920 target::Array::header_size() +
922 // AllocateArrayABI::kLengthReg is Smi.
924 fixed_size_plus_alignment_padding));
925 ASSERT(kSmiTagShift == 1);
927
928 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
929 // AllocateArrayABI::kLengthReg: array length as Smi.
930 // EBX: allocation size.
931
932 const intptr_t cid = kArrayCid;
934 Address(THR, target::Thread::top_offset()));
936 __ j(CARRY, &slow_case);
937
938 // Check if the allocation fits into the remaining space.
939 // AllocateArrayABI::kResultReg: potential new object start.
940 // EBX: potential next object start.
941 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
942 // AllocateArrayABI::kLengthReg: array length as Smi).
943 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
944 __ j(ABOVE_EQUAL, &slow_case);
945 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
946
947 // Successfully allocated the object(s), now update top to point to
948 // next object start and initialize the object.
949 __ movl(Address(THR, target::Thread::top_offset()), EBX);
952
953 // Initialize the tags.
954 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
955 // EBX: allocation size.
956 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
957 // AllocateArrayABI::kLengthReg: array length as Smi.
958 {
959 Label size_tag_overflow, done;
960 __ movl(EDI, EBX);
961 __ cmpl(EDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
962 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
963 __ shll(EDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
966
967 __ Bind(&size_tag_overflow);
968 __ movl(EDI, Immediate(0));
969 __ Bind(&done);
970
971 // Get the class index and insert it into the tags.
973 __ orl(EDI, Immediate(tags));
974 __ movl(FieldAddress(AllocateArrayABI::kResultReg,
975 target::Object::tags_offset()),
976 EDI); // Tags.
977 }
978 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
979 // EBX: allocation size.
980 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
981 // AllocateArrayABI::kLengthReg: Array length as Smi (preserved).
982 // Store the type argument field.
983 // No generational barrier needed, since we store into a new object.
984 __ StoreIntoObjectNoBarrier(
986 FieldAddress(AllocateArrayABI::kResultReg,
987 target::Array::type_arguments_offset()),
989
990 // Set the length field.
991 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
992 FieldAddress(AllocateArrayABI::kResultReg,
993 target::Array::length_offset()),
995
996 // Initialize all array elements to raw_null.
997 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
998 // EBX: allocation size.
999 // EDI: iterator which initially points to the start of the variable
1000 // data area to be initialized.
1001 // AllocateArrayABI::kTypeArgumentsReg: array type arguments.
1002 // AllocateArrayABI::kLengthReg: array length as Smi.
1003 __ leal(EBX, FieldAddress(AllocateArrayABI::kResultReg, EBX, TIMES_1, 0));
1004 __ leal(EDI, FieldAddress(AllocateArrayABI::kResultReg,
1005 target::Array::header_size()));
1006 Label loop;
1007 __ Bind(&loop);
1008 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1009 offset += target::kWordSize) {
1010 // No generational barrier needed, since we are storing null.
1011 __ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1012 Address(EDI, offset), NullObject());
1013 }
1014 // Safe to only check every kObjectAlignment bytes instead of each word.
1015 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
1016 __ addl(EDI, Immediate(target::kObjectAlignment));
1017 __ cmpl(EDI, EBX);
1018 __ j(UNSIGNED_LESS, &loop);
1019 __ WriteAllocationCanary(EBX); // Fix overshoot.
1020 __ ret();
1021
1022 // Unable to allocate the array using the fast inline code, just call
1023 // into the runtime.
1024 __ Bind(&slow_case);
1025 }
1026 // Create a stub frame as we are pushing some objects on the stack before
1027 // calling into the runtime.
1028 __ EnterStubFrame();
1029 __ pushl(Immediate(0)); // Setup space on stack for return value.
1030 __ pushl(AllocateArrayABI::kLengthReg); // Array length as Smi.
1031 __ pushl(AllocateArrayABI::kTypeArgumentsReg); // Type arguments.
1032 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1033
1034 // Write-barrier elimination might be enabled for this array (depending on the
1035 // array length). To be sure we will check if the allocated object is in old
1036 // space and if so call a leaf runtime to add it to the remembered set.
1037 __ movl(AllocateArrayABI::kResultReg, Address(ESP, 2 * target::kWordSize));
1039
1040 __ popl(AllocateArrayABI::kTypeArgumentsReg); // Pop type arguments.
1041 __ popl(AllocateArrayABI::kLengthReg); // Pop array length argument.
1042 __ popl(AllocateArrayABI::kResultReg); // Pop return value from return slot.
1043 __ LeaveFrame();
1044 __ ret();
1045}
1046
1047// Called when invoking dart code from C++ (VM code).
1048// Input parameters:
1049// ESP : points to return address.
1050// ESP + 4 : code object of the dart function to call.
1051// ESP + 8 : arguments descriptor array.
1052// ESP + 12 : arguments array.
1053// ESP + 16 : current thread.
1054// Uses EAX, EDX, ECX, EDI as temporary registers.
1055void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1056 const intptr_t kTargetCodeOffset = 2 * target::kWordSize;
1057 const intptr_t kArgumentsDescOffset = 3 * target::kWordSize;
1058 const intptr_t kArgumentsOffset = 4 * target::kWordSize;
1059 const intptr_t kThreadOffset = 5 * target::kWordSize;
1060 __ EnterFrame(0);
1061
1062 // Push code object to PC marker slot.
1063 __ movl(EAX, Address(EBP, kThreadOffset));
1064 __ pushl(Address(EAX, target::Thread::invoke_dart_code_stub_offset()));
1065
1066 // Save C++ ABI callee-saved registers.
1067 __ pushl(EBX);
1068 __ pushl(ESI);
1069 __ pushl(EDI);
1070
1071 // Set up THR, which caches the current thread in Dart code.
1072 __ movl(THR, EAX);
1073
1074#if defined(USING_SHADOW_CALL_STACK)
1075#error Unimplemented
1076#endif
1077
1078 // Save the current VMTag on the stack.
1080 __ pushl(ECX);
1081
1082 // Save top resource and top exit frame info. Use EDX as a temporary register.
1083 // StackFrameIterator reads the top exit frame info saved in this frame.
1084 __ movl(EDX, Address(THR, target::Thread::top_resource_offset()));
1085 __ pushl(EDX);
1086 __ movl(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1087 __ movl(EAX, Address(THR, target::Thread::exit_through_ffi_offset()));
1088 __ pushl(EAX);
1089 __ movl(Address(THR, target::Thread::exit_through_ffi_offset()),
1090 Immediate(0));
1091 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be
1092 // kept in sync with the code below.
1093 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -8);
1094 __ movl(EDX, Address(THR, target::Thread::top_exit_frame_info_offset()));
1095 __ pushl(EDX);
1096 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
1097 Immediate(0));
1098
1099 // In debug mode, verify that we've pushed the top exit frame info at the
1100 // correct offset from FP.
1101 __ EmitEntryFrameVerification();
1102
1103 // Mark that the thread is executing Dart code. Do this after initializing the
1104 // exit link for the profiler.
1105 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
1106
1107 // Load arguments descriptor array into EDX.
1108 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1109
1110 // Load number of arguments into EBX and adjust count for type arguments.
1111 __ movl(EBX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
1112 __ cmpl(
1113 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
1114 Immediate(0));
1115 Label args_count_ok;
1116 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1117 __ addl(EBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1118 __ Bind(&args_count_ok);
1119 // Save number of arguments as Smi on stack, replacing ArgumentsDesc.
1120 __ movl(Address(EBP, kArgumentsDescOffset), EBX);
1121 __ SmiUntag(EBX);
1122
1123 // Set up arguments for the dart call.
1124 Label push_arguments;
1125 Label done_push_arguments;
1126 __ testl(EBX, EBX); // check if there are arguments.
1127 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1128 __ movl(EAX, Immediate(0));
1129
1130 // Compute address of 'arguments array' data area into EDI.
1131 __ movl(EDI, Address(EBP, kArgumentsOffset));
1132 __ leal(EDI, FieldAddress(EDI, target::Array::data_offset()));
1133
1134 __ Bind(&push_arguments);
1135 __ movl(ECX, Address(EDI, EAX, TIMES_4, 0));
1136 __ pushl(ECX);
1137 __ incl(EAX);
1138 __ cmpl(EAX, EBX);
1139 __ j(LESS, &push_arguments, Assembler::kNearJump);
1140 __ Bind(&done_push_arguments);
1141
1142 // Call the dart code entrypoint.
1143 __ movl(EAX, Address(EBP, kTargetCodeOffset));
1144 __ call(FieldAddress(EAX, target::Code::entry_point_offset()));
1145
1146 // Read the saved number of passed arguments as Smi.
1147 __ movl(EDX, Address(EBP, kArgumentsDescOffset));
1148 // Get rid of arguments pushed on the stack.
1149 __ leal(ESP, Address(ESP, EDX, TIMES_2, 0)); // EDX is a Smi.
1150
1151 // Restore the saved top exit frame info and top resource back into the
1152 // Isolate structure.
1153 __ popl(Address(THR, target::Thread::top_exit_frame_info_offset()));
1154 __ popl(Address(THR, target::Thread::exit_through_ffi_offset()));
1155 __ popl(Address(THR, target::Thread::top_resource_offset()));
1156
1157 // Restore the current VMTag from the stack.
1159
1160#if defined(USING_SHADOW_CALL_STACK)
1161#error Unimplemented
1162#endif
1163
1164 // Restore C++ ABI callee-saved registers.
1165 __ popl(EDI);
1166 __ popl(ESI);
1167 __ popl(EBX);
1168
1169 // Restore the frame pointer.
1170 __ LeaveFrame();
1171
1172 __ ret();
1173}
1174
1175// Helper to generate space allocation of context stub.
1176// This does not initialise the fields of the context.
1177// Input:
1178// EDX: number of context variables.
1179// Output:
1180// EAX: new allocated Context object.
1181// Clobbered:
1182// EBX
1183static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1184 Label* slow_case) {
1185 // First compute the rounded instance size.
1186 // EDX: number of context variables.
1187 intptr_t fixed_size_plus_alignment_padding =
1188 (target::Context::header_size() +
1190 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1192
1193 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, EAX));
1194
1195 // Now allocate the object.
1196 // EDX: number of context variables.
1197 __ movl(EAX, Address(THR, target::Thread::top_offset()));
1198 __ addl(EBX, EAX);
1199 // Check if the allocation fits into the remaining space.
1200 // EAX: potential new object.
1201 // EBX: potential next object start.
1202 // EDX: number of context variables.
1203 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1204#if defined(DEBUG)
1205 static auto const kJumpLength = Assembler::kFarJump;
1206#else
1207 static auto const kJumpLength = Assembler::kNearJump;
1208#endif // DEBUG
1209 __ j(ABOVE_EQUAL, slow_case, kJumpLength);
1210 __ CheckAllocationCanary(EAX);
1211
1212 // Successfully allocated the object, now update top to point to
1213 // next object start and initialize the object.
1214 // EAX: new object.
1215 // EBX: next object start.
1216 // EDX: number of context variables.
1217 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1218 // EBX: Size of allocation in bytes.
1219 __ subl(EBX, EAX);
1220 __ addl(EAX, Immediate(kHeapObjectTag));
1221 // Generate isolate-independent code to allow sharing between isolates.
1222
1223 // Calculate the size tag.
1224 // EAX: new object.
1225 // EDX: number of context variables.
1226 {
1227 Label size_tag_overflow, done;
1228 __ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
1230 __ cmpl(EBX, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
1231 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1232 __ shll(EBX, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
1234 __ jmp(&done);
1235
1236 __ Bind(&size_tag_overflow);
1237 // Set overflow size tag value.
1238 __ movl(EBX, Immediate(0));
1239
1240 __ Bind(&done);
1241 // EAX: new object.
1242 // EDX: number of context variables.
1243 // EBX: size and bit tags.
1244 uword tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
1245 __ orl(EBX, Immediate(tags));
1246 __ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
1247 }
1248
1249 // Setup up number of context variables field.
1250 // EAX: new object.
1251 // EDX: number of context variables as integer value (not object).
1252 __ movl(FieldAddress(EAX, target::Context::num_variables_offset()), EDX);
1253}
1254
1255// Called for inline allocation of contexts.
1256// Input:
1257// EDX: number of context variables.
1258// Output:
1259// EAX: new allocated Context object.
1260// Clobbered:
1261// EBX, EDX
1262void StubCodeCompiler::GenerateAllocateContextStub() {
1263 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1264 Label slow_case;
1265
1266 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1267
1268 // Setup the parent field.
1269 // EAX: new object.
1270 // EDX: number of context variables.
1271 // No generational barrier needed, since we are storing null.
1272 __ StoreObjectIntoObjectNoBarrier(
1273 EAX, FieldAddress(EAX, target::Context::parent_offset()), NullObject());
1274
1275 // Initialize the context variables.
1276 // EAX: new object.
1277 // EDX: number of context variables.
1278 {
1279 Label loop, entry;
1280 __ leal(EBX, FieldAddress(EAX, target::Context::variable_offset(0)));
1281
1282 __ jmp(&entry, Assembler::kNearJump);
1283 __ Bind(&loop);
1284 __ decl(EDX);
1285 // No generational barrier needed, since we are storing null.
1286 __ StoreObjectIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
1287 NullObject());
1288 __ Bind(&entry);
1289 __ cmpl(EDX, Immediate(0));
1291 }
1292
1293 // Done allocating and initializing the context.
1294 // EAX: new object.
1295 __ ret();
1296
1297 __ Bind(&slow_case);
1298 }
1299 // Create a stub frame as we are pushing some objects on the stack before
1300 // calling into the runtime.
1301 __ EnterStubFrame();
1302 __ pushl(Immediate(0)); // Setup space on stack for return value.
1303 __ SmiTag(EDX);
1304 __ pushl(EDX);
1305 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1306 __ popl(EAX); // Pop number of context variables argument.
1307 __ popl(EAX); // Pop the new context object.
1308
1309 // Write-barrier elimination might be enabled for this context (depending on
1310 // the size). To be sure we will check if the allocated object is in old
1311 // space and if so call a leaf runtime to add it to the remembered set.
1313
1314 // EAX: new object
1315 // Restore the frame pointer.
1316 __ LeaveFrame();
1317
1318 __ ret();
1319}
1320
1321// Called for clone of contexts.
1322// Input:
1323// ECX: context variable.
1324// Output:
1325// EAX: new allocated Context object.
1326// Clobbered:
1327// EBX, ECX, EDX
1328void StubCodeCompiler::GenerateCloneContextStub() {
1329 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1330 Label slow_case;
1331
1332 // Load num. variable in the existing context.
1333 __ movl(EDX, FieldAddress(ECX, target::Context::num_variables_offset()));
1334
1335 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1336
1337 // Setup the parent field.
1338 // EAX: new object.
1339 // ECX: old object to clone.
1340 __ movl(EBX, FieldAddress(ECX, target::Context::parent_offset()));
1341 __ StoreIntoObjectNoBarrier(
1342 EAX, FieldAddress(EAX, target::Context::parent_offset()), EBX);
1343
1344 // Initialize the context variables.
1345 // EAX: new context.
1346 // ECX: context to clone.
1347 // EDX: number of context variables.
1348 {
1349 Label loop, entry;
1350 __ jmp(&entry, Assembler::kNearJump);
1351
1352 __ Bind(&loop);
1353 __ decl(EDX);
1354
1355 __ movl(EBX, FieldAddress(ECX, EDX, TIMES_4,
1356 target::Context::variable_offset(0)));
1357 __ StoreIntoObjectNoBarrier(
1358 EAX,
1359 FieldAddress(EAX, EDX, TIMES_4, target::Context::variable_offset(0)),
1360 EBX);
1361
1362 __ Bind(&entry);
1363 __ cmpl(EDX, Immediate(0));
1365 }
1366
1367 // Done allocating and initializing the context.
1368 // EAX: new object.
1369 __ ret();
1370
1371 __ Bind(&slow_case);
1372 }
1373
1374 // Create a stub frame as we are pushing some objects on the stack before
1375 // calling into the runtime.
1376 __ EnterStubFrame();
1377 __ pushl(Immediate(0)); // Setup space on stack for return value.
1378 __ pushl(ECX);
1379 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Allocate context.
1380 __ popl(EAX); // Pop number of context variables argument.
1381 __ popl(EAX); // Pop the new context object.
1382
1383 // Write-barrier elimination might be enabled for this context (depending on
1384 // the size). To be sure we will check if the allocated object is in old
1385 // space and if so call a leaf runtime to add it to the remembered set.
1387
1388 // EAX: new object
1389 // Restore the frame pointer.
1390 __ LeaveFrame();
1391 __ ret();
1392}
1393
1394void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1395 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1396 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1397
1398 Register reg = static_cast<Register>(i);
1399 intptr_t start = __ CodeSize();
1401 __ movl(kWriteBarrierObjectReg, reg);
1402 __ call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
1404 __ ret();
1405 intptr_t end = __ CodeSize();
1406
1409 }
1410}
1411
1412// Helper stub to implement Assembler::StoreIntoObject/Array.
1413// Input parameters:
1414// EDX: Object (old)
1415// EBX: Value (old or new)
1416// EDI: Slot
1417// If EAX is new, add EDX to the store buffer. Otherwise EAX is old, mark EAX
1418// and add it to the mark list.
1422static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1423 // Save values being destroyed.
1424 __ pushl(EAX);
1425 __ pushl(ECX);
1426
1427 Label skip_marking;
1428 __ movl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1429 __ andl(EAX, Address(THR, target::Thread::write_barrier_mask_offset()));
1430 __ testl(EAX, Immediate(target::UntaggedObject::kIncrementalBarrierMask));
1431 __ j(ZERO, &skip_marking);
1432
1433 {
1434 // Atomically clear kNotMarkedBit.
1435 Label retry, done;
1436 __ movl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1437 __ Bind(&retry);
1438 __ movl(ECX, EAX);
1439 __ testl(ECX, Immediate(1 << target::UntaggedObject::kNotMarkedBit));
1440 __ j(ZERO, &done); // Marked by another thread.
1441 __ andl(ECX, Immediate(~(1 << target::UntaggedObject::kNotMarkedBit)));
1442 // Cmpxchgq: compare value = implicit operand EAX, new value = ECX.
1443 // On failure, EAX is updated with the current value.
1444 __ LockCmpxchgl(FieldAddress(EBX, target::Object::tags_offset()), ECX);
1445 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1446
1447 __ movl(EAX, Address(THR, target::Thread::marking_stack_block_offset()));
1449 __ movl(Address(EAX, ECX, TIMES_4,
1451 EBX);
1452 __ incl(ECX);
1454 __ cmpl(ECX, Immediate(target::MarkingStackBlock::kSize));
1455 __ j(NOT_EQUAL, &done);
1456
1457 {
1458 LeafRuntimeScope rt(assembler,
1459 /*frame_size=*/1 * target::kWordSize,
1460 /*preserve_registers=*/true);
1461 __ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
1462 rt.Call(kMarkingStackBlockProcessRuntimeEntry, 1);
1463 }
1464
1465 __ Bind(&done);
1466 }
1467
1468 Label add_to_remembered_set, remember_card;
1469 __ Bind(&skip_marking);
1470 __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
1471 __ shrl(EAX, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1472 __ andl(EAX, FieldAddress(EBX, target::Object::tags_offset()));
1473 __ testl(EAX, Immediate(target::UntaggedObject::kGenerationalBarrierMask));
1474 __ j(NOT_ZERO, &add_to_remembered_set, Assembler::kNearJump);
1475 __ popl(ECX); // Unspill.
1476 __ popl(EAX); // Unspill.
1477 __ ret();
1478
1479 __ Bind(&add_to_remembered_set);
1480 if (cards) {
1481 __ testl(FieldAddress(EDX, target::Object::tags_offset()),
1482 Immediate(1 << target::UntaggedObject::kCardRememberedBit));
1483 __ j(NOT_ZERO, &remember_card, Assembler::kFarJump); // Unlikely.
1484 } else {
1485#if defined(DEBUG)
1486 Label ok;
1487 __ testl(FieldAddress(EDX, target::Object::tags_offset()),
1488 Immediate(1 << target::UntaggedObject::kCardRememberedBit));
1490 __ Stop("Wrong barrier");
1491 __ Bind(&ok);
1492#endif
1493 }
1494
1495 {
1496 // Atomically clear kOldAndNotRememberedBit.
1497 Label retry, done;
1498 __ movl(EAX, FieldAddress(EDX, target::Object::tags_offset()));
1499 __ Bind(&retry);
1500 __ movl(ECX, EAX);
1501 __ testl(ECX,
1502 Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1503 __ j(ZERO, &done); // Remembered by another thread.
1504 __ andl(ECX,
1505 Immediate(~(1 << target::UntaggedObject::kOldAndNotRememberedBit)));
1506 // Cmpxchgl: compare value = implicit operand EAX, new value = ECX.
1507 // On failure, EAX is updated with the current value.
1508 __ LockCmpxchgl(FieldAddress(EDX, target::Object::tags_offset()), ECX);
1509 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1510
1511 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1512 // StoreBufferBlock and add the address to the pointers_.
1513 // Spilled: EAX, ECX
1514 // EDX: Address being stored
1515 __ movl(EAX, Address(THR, target::Thread::store_buffer_block_offset()));
1517 __ movl(
1519 EDX);
1520
1521 // Increment top_ and check for overflow.
1522 // Spilled: EAX, ECX
1523 // ECX: top_
1524 // EAX: StoreBufferBlock
1525 __ incl(ECX);
1527 __ cmpl(ECX, Immediate(target::StoreBufferBlock::kSize));
1528 __ j(NOT_EQUAL, &done);
1529
1530 {
1531 LeafRuntimeScope rt(assembler,
1532 /*frame_size=*/1 * target::kWordSize,
1533 /*preserve_registers=*/true);
1534 __ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
1535 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1536 }
1537
1538 __ Bind(&done);
1539 __ popl(ECX);
1540 __ popl(EAX);
1541 __ ret();
1542 }
1543 if (cards) {
1544 Label remember_card_slow;
1545
1546 // Get card table.
1547 __ Bind(&remember_card);
1548 __ movl(EAX, EDX); // Object.
1549 __ andl(EAX, Immediate(target::kPageMask)); // Page.
1550 __ cmpl(Address(EAX, target::Page::card_table_offset()), Immediate(0));
1551 __ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
1552
1553 // Dirty the card. Not atomic: we assume mutable arrays are not shared
1554 // between threads.
1555 __ pushl(EBX);
1556 __ subl(EDI, EAX); // Offset in page.
1557 __ movl(EAX,
1558 Address(EAX, target::Page::card_table_offset())); // Card table.
1559 __ movl(ECX, EDI);
1560 __ shrl(EDI,
1561 Immediate(target::Page::kBytesPerCardLog2 +
1562 target::kBitsPerWordLog2)); // Word offset.
1563 __ shrl(ECX, Immediate(target::Page::kBytesPerCardLog2));
1564 __ movl(EBX, Immediate(1));
1565 __ shll(EBX, ECX); // Bit mask. (Shift amount is mod 32.)
1566 __ orl(Address(EAX, EDI, TIMES_4, 0), EBX);
1567 __ popl(EBX);
1568 __ popl(ECX);
1569 __ popl(EAX);
1570 __ ret();
1571
1572 // Card table not yet allocated.
1573 __ Bind(&remember_card_slow);
1574
1575 {
1576 LeafRuntimeScope rt(assembler,
1577 /*frame_size=*/2 * target::kWordSize,
1578 /*preserve_registers=*/true);
1579 __ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
1580 __ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
1581 rt.Call(kRememberCardRuntimeEntry, 2);
1582 }
1583 __ popl(ECX);
1584 __ popl(EAX);
1585 __ ret();
1586 }
1587}
1588
1589void StubCodeCompiler::GenerateWriteBarrierStub() {
1590 GenerateWriteBarrierStubHelper(assembler, false);
1591}
1592
1593void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1594 GenerateWriteBarrierStubHelper(assembler, true);
1595}
1596
1597void StubCodeCompiler::GenerateAllocateObjectStub() {
1598 __ int3();
1599}
1600
1601void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1602 __ int3();
1603}
1604
1605void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1606 __ int3();
1607}
1608
1609// Called for inline allocation of objects.
1610// Input parameters:
1611// ESP : points to return address.
1612// AllocateObjectABI::kTypeArgumentsPos : type arguments object
1613// (only if class is parameterized).
1614// Uses AllocateObjectABI::kResultReg, EBX, ECX, EDI as temporary registers.
1615// Returns patch_code_pc offset where patching code for disabling the stub
1616// has been generated (similar to regularly generated Dart code).
1618 UnresolvedPcRelativeCalls* unresolved_calls,
1619 const Class& cls,
1620 const Code& allocate_object,
1621 const Code& allocat_object_parametrized) {
1622 const Immediate& raw_null = Immediate(target::ToRawPointer(NullObject()));
1623 // The generated code is different if the class is parameterized.
1624 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
1625 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
1626 cls) != target::Class::kNoTypeArguments);
1627 // kInlineInstanceSize is a constant used as a threshold for determining
1628 // when the object initialization should be done as a loop or as
1629 // straight line code.
1630 const int kInlineInstanceSize = 12; // In words.
1631 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1632 ASSERT(instance_size > 0);
1633
1634 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1635 // (if is_cls_parameterized).
1636 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
1637 target::Heap::IsAllocatableInNewSpace(instance_size) &&
1638 !target::Class::TraceAllocation(cls)) {
1639 Label slow_case;
1640 // Allocate the object and update top to point to
1641 // next object start and initialize the allocated object.
1642 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1643 // (if is_cls_parameterized).
1645 Address(THR, target::Thread::top_offset()));
1646 __ leal(EBX, Address(AllocateObjectABI::kResultReg, instance_size));
1647 // Check if the allocation fits into the remaining space.
1648 // AllocateObjectABI::kResultReg: potential new object start.
1649 // EBX: potential next object start.
1650 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
1651 __ j(ABOVE_EQUAL, &slow_case);
1652 __ CheckAllocationCanary(AllocateObjectABI::kResultReg);
1653 __ movl(Address(THR, target::Thread::top_offset()), EBX);
1654
1655 // AllocateObjectABI::kResultReg: new object start (untagged).
1656 // EBX: next object start.
1657 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1658 // (if is_cls_parameterized).
1659 // Set the tags.
1660 ASSERT(target::Class::GetId(cls) != kIllegalCid);
1661 uword tags = target::MakeTagWordForNewSpaceObject(target::Class::GetId(cls),
1662 instance_size);
1663 __ movl(
1664 Address(AllocateObjectABI::kResultReg, target::Object::tags_offset()),
1665 Immediate(tags));
1667
1668 // Initialize the remaining words of the object.
1669
1670 // AllocateObjectABI::kResultReg: new object (tagged).
1671 // EBX: next object start.
1672 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1673 // (if is_cls_parameterized).
1674 // First try inlining the initialization without a loop.
1675 if (instance_size < (kInlineInstanceSize * target::kWordSize)) {
1676 // Check if the object contains any non-header fields.
1677 // Small objects are initialized using a consecutive set of writes.
1678 for (intptr_t current_offset = target::Instance::first_field_offset();
1679 current_offset < instance_size;
1680 current_offset += target::kWordSize) {
1681 __ StoreObjectIntoObjectNoBarrier(
1683 FieldAddress(AllocateObjectABI::kResultReg, current_offset),
1684 NullObject());
1685 }
1686 } else {
1687 __ leal(ECX, FieldAddress(AllocateObjectABI::kResultReg,
1688 target::Instance::first_field_offset()));
1689 // Loop until the whole object is initialized.
1690 // AllocateObjectABI::kResultReg: new object (tagged).
1691 // EBX: next object start.
1692 // ECX: next word to be initialized.
1693 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments
1694 // (if is_cls_parameterized).
1695 Label loop;
1696 __ Bind(&loop);
1697 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1698 offset += target::kWordSize) {
1699 __ StoreObjectIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
1700 Address(ECX, offset), NullObject());
1701 }
1702 // Safe to only check every kObjectAlignment bytes instead of each word.
1703 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
1704 __ addl(ECX, Immediate(target::kObjectAlignment));
1705 __ cmpl(ECX, EBX);
1706 __ j(UNSIGNED_LESS, &loop);
1707 __ WriteAllocationCanary(EBX); // Fix overshoot.
1708 }
1709 if (is_cls_parameterized) {
1710 // AllocateObjectABI::kResultReg: new object (tagged).
1711 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
1712 // Set the type arguments in the new object.
1713 const intptr_t offset = target::Class::TypeArgumentsFieldOffset(cls);
1714 __ StoreIntoObjectNoBarrier(
1718 }
1719 // Done allocating and initializing the instance.
1720 // AllocateObjectABI::kResultReg: new object (tagged).
1721 __ ret();
1722
1723 __ Bind(&slow_case);
1724 }
1725 // If is_cls_parameterized:
1726 // AllocateObjectABI::kTypeArgumentsReg: new object type arguments.
1727 // Create a stub frame as we are pushing some objects on the stack before
1728 // calling into the runtime.
1729 __ EnterStubFrame();
1730 __ pushl(raw_null); // Setup space on stack for return value.
1731 __ PushObject(
1732 CastHandle<Object>(cls)); // Push class of object to be allocated.
1733 if (is_cls_parameterized) {
1734 // Push type arguments of object to be allocated.
1736 } else {
1737 __ pushl(raw_null); // Push null type arguments.
1738 }
1739 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1740 __ popl(AllocateObjectABI::kResultReg); // Drop type arguments.
1741 __ popl(AllocateObjectABI::kResultReg); // Drop class.
1742 __ popl(AllocateObjectABI::kResultReg); // Pop allocated object.
1743
1745 // Write-barrier elimination is enabled for [cls] and we therefore need to
1746 // ensure that the object is in new-space or has remembered bit set.
1748 }
1749
1750 // AllocateObjectABI::kResultReg: new object
1751 // Restore the frame pointer.
1752 __ LeaveFrame();
1753 __ ret();
1754}
1755
1756// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1757// from the entry code of a dart function after an error in passed argument
1758// name or number is detected.
1759// Input parameters:
1760// ESP : points to return address.
1761// ESP + 4 : address of last argument.
1762// EDX : arguments descriptor array.
1763// Uses EAX, EBX, EDI as temporary registers.
1764void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
1765 __ EnterStubFrame();
1766
1767 // Load the receiver.
1768 __ movl(EDI, FieldAddress(EDX, target::ArgumentsDescriptor::size_offset()));
1769 __ movl(EAX,
1770 Address(EBP, EDI, TIMES_2,
1771 target::frame_layout.param_end_from_fp * target::kWordSize));
1772
1773 // Load the function.
1774 __ movl(EBX, FieldAddress(EAX, target::Closure::function_offset()));
1775
1776 __ pushl(Immediate(0)); // Setup space on stack for result from noSuchMethod.
1777 __ pushl(EAX); // Receiver.
1778 __ pushl(EBX); // Function.
1779 __ pushl(EDX); // Arguments descriptor array.
1780
1781 // Adjust arguments count.
1782 __ cmpl(
1783 FieldAddress(EDX, target::ArgumentsDescriptor::type_args_len_offset()),
1784 Immediate(0));
1785 __ movl(EDX, EDI);
1786 Label args_count_ok;
1787 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1788 __ addl(EDX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1789 __ Bind(&args_count_ok);
1790
1791 // EDX: Smi-tagged arguments array length.
1792 PushArrayOfArguments(assembler);
1793
1794 const intptr_t kNumArgs = 4;
1795 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
1796 // noSuchMethod on closures always throws an error, so it will never return.
1797 __ int3();
1798}
1799
1800// Cannot use function object from ICData as it may be the inlined
1801// function and not the top-scope function.
1803 Register ic_reg = ECX;
1804 Register func_reg = EAX;
1805 if (FLAG_trace_optimized_ic_calls) {
1806 __ EnterStubFrame();
1807 __ pushl(func_reg); // Preserve
1808 __ pushl(ic_reg); // Preserve.
1809 __ pushl(ic_reg); // Argument.
1810 __ pushl(func_reg); // Argument.
1811 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1812 __ popl(EAX); // Discard argument;
1813 __ popl(EAX); // Discard argument;
1814 __ popl(ic_reg); // Restore.
1815 __ popl(func_reg); // Restore.
1816 __ LeaveFrame();
1817 }
1818 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1819}
1820
1821// Loads function into 'temp_reg'.
1823 if (FLAG_optimization_counter_threshold >= 0) {
1824 Register func_reg = temp_reg;
1825 ASSERT(func_reg != IC_DATA_REG);
1826 __ Comment("Increment function counter");
1827 __ movl(func_reg,
1828 FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
1829 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
1830 }
1831}
1832
1833// Note: ECX must be preserved.
1834// Attempt a quick Smi operation for known operations ('kind'). The ICData
1835// must have been primed with a Smi/Smi check that will be used for counting
1836// the invocations.
1837static void EmitFastSmiOp(Assembler* assembler,
1838 Token::Kind kind,
1839 intptr_t num_args,
1840 Label* not_smi_or_overflow) {
1841 __ Comment("Fast Smi op");
1842 ASSERT(num_args == 2);
1843 __ movl(EAX, Address(ESP, +2 * target::kWordSize)); // Left
1844 __ movl(EDI, Address(ESP, +1 * target::kWordSize)); // Right
1845 __ movl(EBX, EDI);
1846 __ orl(EBX, EAX);
1847 __ testl(EBX, Immediate(kSmiTagMask));
1848 __ j(NOT_ZERO, not_smi_or_overflow, Assembler::kNearJump);
1849 switch (kind) {
1850 case Token::kADD: {
1851 __ addl(EAX, EDI);
1852 __ j(OVERFLOW, not_smi_or_overflow, Assembler::kNearJump);
1853 break;
1854 }
1855 case Token::kLT: {
1856 Label done, is_true;
1857 __ cmpl(EAX, EDI);
1858 __ setcc(GREATER_EQUAL, AL);
1859 __ movzxb(EAX, AL); // EAX := EAX < EDI ? 0 : 1
1860 __ movl(EAX,
1861 Address(THR, EAX, TIMES_4, target::Thread::bool_true_offset()));
1862 ASSERT(target::Thread::bool_true_offset() + 4 ==
1863 target::Thread::bool_false_offset());
1864 break;
1865 }
1866 case Token::kEQ: {
1867 Label done, is_true;
1868 __ cmpl(EAX, EDI);
1869 __ setcc(NOT_EQUAL, AL);
1870 __ movzxb(EAX, AL); // EAX := EAX == EDI ? 0 : 1
1871 __ movl(EAX,
1872 Address(THR, EAX, TIMES_4, target::Thread::bool_true_offset()));
1873 ASSERT(target::Thread::bool_true_offset() + 4 ==
1874 target::Thread::bool_false_offset());
1875 break;
1876 }
1877 default:
1878 UNIMPLEMENTED();
1879 }
1880
1881 // ECX: IC data object.
1882 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1883 // EBX: ic_data_array with check entries: classes and target functions.
1884 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1885#if defined(DEBUG)
1886 // Check that first entry is for Smi/Smi.
1887 Label error, ok;
1888 const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
1889 __ cmpl(Address(EBX, 0 * target::kWordSize), imm_smi_cid);
1891 __ cmpl(Address(EBX, 1 * target::kWordSize), imm_smi_cid);
1893 __ Bind(&error);
1894 __ Stop("Incorrect IC data");
1895 __ Bind(&ok);
1896#endif
1897 if (FLAG_optimization_counter_threshold >= 0) {
1898 const intptr_t count_offset =
1899 target::ICData::CountIndexFor(num_args) * target::kWordSize;
1900 // Update counter, ignore overflow.
1901 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
1902 }
1903 __ ret();
1904}
1905
1906// Generate inline cache check for 'num_args'.
1907// EBX: receiver (if instance call)
1908// ECX: ICData
1909// ESP[0]: return address
1910// Control flow:
1911// - If receiver is null -> jump to IC miss.
1912// - If receiver is Smi -> load Smi class.
1913// - If receiver is not-Smi -> load receiver's class.
1914// - Check if 'num_args' (including receiver) match any IC data group.
1915// - Match found -> jump to target.
1916// - Match not found -> jump to IC miss.
1918 intptr_t num_args,
1919 const RuntimeEntry& handle_ic_miss,
1920 Token::Kind kind,
1921 Optimized optimized,
1922 CallType type,
1923 Exactness exactness) {
1924 GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
1925 optimized, type, exactness,
1927 __ BindUncheckedEntryPoint();
1928 GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
1929 optimized, type, exactness,
1931}
1932
1934 intptr_t num_args,
1935 const RuntimeEntry& handle_ic_miss,
1936 Token::Kind kind,
1937 Optimized optimized,
1938 CallType type,
1939 Exactness exactness,
1940 CodeEntryKind entry_kind) {
1941 if (optimized == kOptimized) {
1943 } else {
1944 GenerateUsageCounterIncrement(/* scratch */ EAX);
1945 }
1946
1947 ASSERT(num_args == 1 || num_args == 2);
1948#if defined(DEBUG)
1949 {
1950 Label ok;
1951 // Check that the IC data array has NumArgsTested() == num_args.
1952 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1953 __ movl(EAX, FieldAddress(ECX, target::ICData::state_bits_offset()));
1954 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
1955 __ andl(EAX, Immediate(target::ICData::NumArgsTestedMask()));
1956 __ cmpl(EAX, Immediate(num_args));
1958 __ Stop("Incorrect stub for IC data");
1959 __ Bind(&ok);
1960 }
1961#endif // DEBUG
1962
1963#if !defined(PRODUCT)
1964 Label stepping, done_stepping;
1965 if (optimized == kUnoptimized) {
1966 __ Comment("Check single stepping");
1967 __ LoadIsolate(EAX);
1968 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
1969 __ j(NOT_EQUAL, &stepping);
1970 __ Bind(&done_stepping);
1971 }
1972#endif
1973 Label not_smi_or_overflow;
1974 if (kind != Token::kILLEGAL) {
1975 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
1976 }
1977 __ Bind(&not_smi_or_overflow);
1978
1979 __ Comment("Extract ICData initial values and receiver cid");
1980 // ECX: IC data object (preserved).
1981 // Load arguments descriptor into EDX.
1982 __ movl(
1984 FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
1985 // Loop that checks if there is an IC data match.
1986 Label loop, found, miss;
1987 // ECX: IC data object (preserved).
1988 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
1989 // EBX: ic_data_array with check entries: classes and target functions.
1990 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
1991 // EBX: points directly to the first ic data array element.
1992
1993 // Get argument descriptor into EAX. In the 1-argument case this is the
1994 // last time we need the argument descriptor, and we reuse EAX for the
1995 // class IDs from the IC descriptor. In the 2-argument case we preserve
1996 // the argument descriptor in EAX.
1997 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
1998 target::ArgumentsDescriptor::count_offset()));
1999 if (num_args == 1) {
2000 // Load receiver into EDI.
2001 __ movl(EDI,
2002 Address(ESP, EAX, TIMES_2, 0)); // EAX (argument count) is Smi.
2003 __ LoadTaggedClassIdMayBeSmi(EAX, EDI);
2004 // EAX: receiver class ID as Smi.
2005 }
2006
2007 __ Comment("ICData loop");
2008
2009 // We unroll the generic one that is generated once more than the others.
2010 bool optimize = kind == Token::kILLEGAL;
2011 const intptr_t target_offset =
2012 target::ICData::TargetIndexFor(num_args) * target::kWordSize;
2013 const intptr_t count_offset =
2014 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2015 const intptr_t exactness_offset =
2016 target::ICData::ExactnessIndexFor(num_args) * target::kWordSize;
2017 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2018 num_args, exactness == kCheckExactness) *
2019 target::kWordSize;
2020
2021 __ Bind(&loop);
2022 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2023 Label update;
2024 if (num_args == 1) {
2025 __ movl(EDI, Address(EBX, 0));
2026 __ cmpl(EDI, EAX); // Class id match?
2027 __ j(EQUAL, &found); // Break.
2028 __ addl(EBX, Immediate(entry_size)); // Next entry.
2029 __ cmpl(EDI, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2030 } else {
2031 ASSERT(num_args == 2);
2032 // Load receiver into EDI.
2033 __ movl(EDI, Address(ESP, EAX, TIMES_2, 0));
2034 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
2035 __ cmpl(EDI, Address(EBX, 0)); // Class id match?
2036 __ j(NOT_EQUAL, &update); // Continue.
2037
2038 // Load second argument into EDI.
2039 __ movl(EDI, Address(ESP, EAX, TIMES_2, -target::kWordSize));
2040 __ LoadTaggedClassIdMayBeSmi(EDI, EDI);
2041 __ cmpl(EDI, Address(EBX, target::kWordSize)); // Class id match?
2042 __ j(EQUAL, &found); // Break.
2043
2044 __ Bind(&update);
2045 __ addl(EBX, Immediate(entry_size)); // Next entry.
2046 __ cmpl(Address(EBX, -entry_size),
2047 Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2048 }
2049
2050 if (unroll == 0) {
2051 __ j(NOT_EQUAL, &loop);
2052 } else {
2053 __ j(EQUAL, &miss);
2054 }
2055 }
2056
2057 __ Bind(&miss);
2058 __ Comment("IC miss");
2059 // Compute address of arguments (first read number of arguments from
2060 // arguments descriptor array and then compute address on the stack).
2061 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
2062 target::ArgumentsDescriptor::count_offset()));
2063 __ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
2064 // Create a stub frame as we are pushing some objects on the stack before
2065 // calling into the runtime.
2066 __ EnterStubFrame();
2067 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
2068 __ pushl(ECX); // Preserve IC data object.
2069 __ pushl(Immediate(0)); // Result slot.
2070 // Push call arguments.
2071 for (intptr_t i = 0; i < num_args; i++) {
2072 __ movl(EBX, Address(EAX, -target::kWordSize * i));
2073 __ pushl(EBX);
2074 }
2075 __ pushl(ECX); // Pass IC data object.
2076 __ CallRuntime(handle_ic_miss, num_args + 1);
2077 // Remove the call arguments pushed earlier, including the IC data object.
2078 for (intptr_t i = 0; i < num_args + 1; i++) {
2079 __ popl(EAX);
2080 }
2081 __ popl(FUNCTION_REG); // Pop returned function object into EAX.
2082 __ popl(ECX); // Restore IC data array.
2083 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
2084 __ LeaveFrame();
2085 Label call_target_function;
2086 if (!FLAG_lazy_dispatchers) {
2087 GenerateDispatcherCode(assembler, &call_target_function);
2088 } else {
2089 __ jmp(&call_target_function);
2090 }
2091
2092 __ Bind(&found);
2093 // EBX: Pointer to an IC data check group.
2094 Label call_target_function_through_unchecked_entry;
2095 if (exactness == kCheckExactness) {
2096 Label exactness_ok;
2097 ASSERT(num_args == 1);
2098 __ movl(EDI, Address(EBX, exactness_offset));
2099 __ cmpl(EDI, Immediate(target::ToRawSmi(
2101 __ j(LESS, &exactness_ok);
2102 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2103
2104 // Check trivial exactness.
2105 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2106 // because we only emit calls to this stub when it is not null.
2107 __ movl(EAX, FieldAddress(ARGS_DESC_REG,
2108 target::ArgumentsDescriptor::count_offset()));
2109 __ movl(EAX, Address(ESP, EAX, TIMES_2, 0)); // Receiver
2110 // EDI contains an offset to type arguments in words as a smi,
2111 // hence TIMES_2. EAX is guaranteed to be non-smi because it is expected
2112 // to have type arguments.
2113 __ movl(EDI,
2114 FieldAddress(EAX, EDI, TIMES_2, 0)); // Receiver's type arguments
2115 __ movl(EAX,
2116 FieldAddress(ECX, target::ICData::receivers_static_type_offset()));
2117 __ cmpl(EDI, FieldAddress(EAX, target::Type::arguments_offset()));
2118 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2119
2120 // Update exactness state (not-exact anymore).
2121 __ movl(Address(EBX, exactness_offset),
2122 Immediate(target::ToRawSmi(
2124 __ Bind(&exactness_ok);
2125 }
2126
2127 if (FLAG_optimization_counter_threshold >= 0) {
2128 __ Comment("Update caller's counter");
2129 // Ignore overflow.
2130 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2131 }
2132
2133 __ movl(FUNCTION_REG, Address(EBX, target_offset));
2134 __ Bind(&call_target_function);
2135 __ Comment("Call target");
2136 // EAX: Target function.
2137 __ jmp(FieldAddress(FUNCTION_REG,
2138 target::Function::entry_point_offset(entry_kind)));
2139
2140 if (exactness == kCheckExactness) {
2141 __ Bind(&call_target_function_through_unchecked_entry);
2142 if (FLAG_optimization_counter_threshold >= 0) {
2143 __ Comment("Update ICData counter");
2144 // Ignore overflow.
2145 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2146 }
2147 __ Comment("Call target (via unchecked entry point)");
2148 __ LoadCompressed(FUNCTION_REG, Address(EBX, target_offset));
2149 __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset(
2151 }
2152
2153#if !defined(PRODUCT)
2154 if (optimized == kUnoptimized) {
2155 __ Bind(&stepping);
2156 __ EnterStubFrame();
2157 __ pushl(EBX); // Preserve receiver.
2158 __ pushl(ECX); // Preserve ICData.
2159 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2160 __ popl(ECX); // Restore ICData.
2161 __ popl(EBX); // Restore receiver.
2162 __ LeaveFrame();
2163 __ jmp(&done_stepping);
2164 }
2165#endif
2166}
2167
2168// EBX: receiver
2169// ECX: ICData
2170// ESP[0]: return address
2171void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2173 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2175}
2176
2177// EBX: receiver
2178// ECX: ICData
2179// ESP[0]: return address
2180void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2182 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2184}
2185
2186void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
2187 __ Stop("Unimplemented");
2188}
2189
2190void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
2191 __ Stop("Unimplemented");
2192}
2193
2194// EBX: receiver
2195// ECX: ICData
2196// ESP[0]: return address
2197void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2199 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2201}
2202
2203// EBX: receiver
2204// ECX: ICData
2205// ESP[0]: return address
2206void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2208 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2210}
2211
2212// EBX: receiver
2213// ECX: ICData
2214// ESP[0]: return address
2215void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2217 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2219}
2220
2221// EBX: receiver
2222// ECX: ICData
2223// ESP[0]: return address
2224void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2226 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2228}
2229
2230// EBX: receiver
2231// ECX: ICData
2232// EAX: Function
2233// ESP[0]: return address
2234void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2236 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2238}
2239
2240// EBX: receiver
2241// ECX: ICData
2242// EAX: Function
2243// ESP[0]: return address
2244void StubCodeCompiler::
2245 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2247 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2249}
2250
2251// EBX: receiver
2252// ECX: ICData
2253// EAX: Function
2254// ESP[0]: return address
2255void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2257 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2259}
2260
2261// ECX: ICData
2262// ESP[0]: return address
2263static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
2264 StubCodeCompiler* stub_code_compiler,
2265 CodeEntryKind entry_kind) {
2266 stub_code_compiler->GenerateUsageCounterIncrement(/* scratch */ EAX);
2267 auto* const assembler = stub_code_compiler->assembler;
2268
2269#if defined(DEBUG)
2270 {
2271 Label ok;
2272 // Check that the IC data array has NumArgsTested() == num_args.
2273 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2274 __ movl(EBX, FieldAddress(ECX, target::ICData::state_bits_offset()));
2275 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2276 __ andl(EBX, Immediate(target::ICData::NumArgsTestedMask()));
2277 __ cmpl(EBX, Immediate(0));
2279 __ Stop("Incorrect IC data for unoptimized static call");
2280 __ Bind(&ok);
2281 }
2282#endif // DEBUG
2283
2284#if !defined(PRODUCT)
2285 // Check single stepping.
2286 Label stepping, done_stepping;
2287 __ LoadIsolate(EAX);
2288 __ cmpb(Address(EAX, target::Isolate::single_step_offset()), Immediate(0));
2289 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2290 __ Bind(&done_stepping);
2291#endif
2292
2293 // ECX: IC data object (preserved).
2294 __ movl(EBX, FieldAddress(ECX, target::ICData::entries_offset()));
2295 // EBX: ic_data_array with entries: target functions and count.
2296 __ leal(EBX, FieldAddress(EBX, target::Array::data_offset()));
2297 // EBX: points directly to the first ic data array element.
2298 const intptr_t target_offset =
2299 target::ICData::TargetIndexFor(0) * target::kWordSize;
2300 const intptr_t count_offset =
2301 target::ICData::CountIndexFor(0) * target::kWordSize;
2302
2303 if (FLAG_optimization_counter_threshold >= 0) {
2304 // Increment count for this call, ignore overflow.
2305 __ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
2306 }
2307
2308 // Load arguments descriptor into EDX.
2309 __ movl(
2311 FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
2312
2313 // Get function and call it, if possible.
2314 __ movl(FUNCTION_REG, Address(EBX, target_offset));
2315 __ jmp(FieldAddress(FUNCTION_REG,
2316 target::Function::entry_point_offset(entry_kind)));
2317
2318#if !defined(PRODUCT)
2319 __ Bind(&stepping);
2320 __ EnterStubFrame();
2321 __ pushl(ECX);
2322 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2323 __ popl(ECX);
2324 __ LeaveFrame();
2325 __ jmp(&done_stepping, Assembler::kNearJump);
2326#endif
2327}
2328
2329void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2330 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
2332 __ BindUncheckedEntryPoint();
2333 GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
2335}
2336
2337// ECX: ICData
2338// ESP[0]: return address
2339void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2341 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2343}
2344
2345// ECX: ICData
2346// ESP[0]: return address
2347void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2349 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2351}
2352
2353// Stub for compiling a function and jumping to the compiled code.
2354// ARGS_DESC_REG: Arguments descriptor.
2355// FUNCTION_REG: Function.
2356void StubCodeCompiler::GenerateLazyCompileStub() {
2357 __ EnterStubFrame();
2358 __ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
2359 __ pushl(FUNCTION_REG); // Pass function.
2360 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2361 __ popl(FUNCTION_REG); // Restore function.
2362 __ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
2363 __ LeaveFrame();
2364
2365 __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2366}
2367
2368// ECX: Contains an ICData.
2369void StubCodeCompiler::GenerateICCallBreakpointStub() {
2370#if defined(PRODUCT)
2371 __ Stop("No debugging in PRODUCT mode");
2372#else
2373 __ EnterStubFrame();
2374 __ pushl(EBX); // Preserve receiver.
2375 __ pushl(ECX); // Preserve ICData.
2376 __ pushl(Immediate(0)); // Room for result.
2377 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2378 __ popl(EAX); // Code of original stub.
2379 __ popl(ECX); // Restore ICData.
2380 __ popl(EBX); // Restore receiver.
2381 __ LeaveFrame();
2382 // Jump to original stub.
2383 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2384#endif // defined(PRODUCT)
2385}
2386
2387void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2388#if defined(PRODUCT)
2389 __ Stop("No debugging in PRODUCT mode");
2390#else
2391 __ EnterStubFrame();
2392 __ pushl(ECX); // Preserve ICData.
2393 __ pushl(Immediate(0)); // Room for result.
2394 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2395 __ popl(EAX); // Code of original stub.
2396 __ popl(ECX); // Restore ICData.
2397 __ LeaveFrame();
2398 // Jump to original stub.
2399 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2400#endif // defined(PRODUCT)
2401}
2402
2403void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2404#if defined(PRODUCT)
2405 __ Stop("No debugging in PRODUCT mode");
2406#else
2407 __ EnterStubFrame();
2408 // Room for result. Debugger stub returns address of the
2409 // unpatched runtime stub.
2410 __ pushl(Immediate(0)); // Room for result.
2411 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2412 __ popl(EAX); // Code of the original stub
2413 __ LeaveFrame();
2414 // Jump to original stub.
2415 __ jmp(FieldAddress(EAX, target::Code::entry_point_offset()));
2416#endif // defined(PRODUCT)
2417}
2418
2419// Called only from unoptimized code.
2420void StubCodeCompiler::GenerateDebugStepCheckStub() {
2421#if defined(PRODUCT)
2422 __ Stop("No debugging in PRODUCT mode");
2423#else
2424 // Check single stepping.
2425 Label stepping, done_stepping;
2426 __ LoadIsolate(EAX);
2427 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2428 __ cmpl(EAX, Immediate(0));
2429 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2430 __ Bind(&done_stepping);
2431 __ ret();
2432
2433 __ Bind(&stepping);
2434 __ EnterStubFrame();
2435 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2436 __ LeaveFrame();
2437 __ jmp(&done_stepping, Assembler::kNearJump);
2438#endif // defined(PRODUCT)
2439}
2440
2441// Constants used for generating subtype test cache lookup stubs.
2442// We represent the depth of as a depth from the top of the stack at the
2443// start of the stub. That is, depths for input values are non-negative and
2444// depths for values pushed during the stub are negative.
2445
2446struct STCInternal : AllStatic {
2447 // Used to initialize depths for conditionally-pushed values.
2448 static constexpr intptr_t kNoDepth = kIntptrMin;
2449
2450 // These inputs are always on the stack when the SubtypeNTestCacheStub is
2451 // called. These absolute depths will be converted to relative depths within
2452 // the stub to compensate for additional pushed values.
2453 static constexpr intptr_t kFunctionTypeArgumentsDepth = 1;
2454 static constexpr intptr_t kInstantiatorTypeArgumentsDepth = 2;
2455 static constexpr intptr_t kDestinationTypeDepth = 3;
2456 static constexpr intptr_t kInstanceDepth = 4;
2457 static constexpr intptr_t kCacheDepth = 5;
2458
2459 // Non-stack values are stored in non-kInstanceReg registers from TypeTestABI.
2460 static constexpr Register kCacheArrayReg =
2461 TypeTestABI::kInstantiatorTypeArgumentsReg;
2462 static constexpr Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2463 static constexpr Register kInstanceCidOrSignatureReg =
2464 TypeTestABI::kFunctionTypeArgumentsReg;
2465 static constexpr Register kInstanceInstantiatorTypeArgumentsReg =
2466 TypeTestABI::kDstTypeReg;
2467};
2468
2469static void GenerateSubtypeTestCacheLoop(
2470 Assembler* assembler,
2471 int n,
2472 intptr_t original_tos_offset,
2473 intptr_t parent_function_type_args_depth,
2474 intptr_t delayed_type_args_depth,
2475 Label* found,
2476 Label* not_found,
2477 Label* next_iteration) {
2478 const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
2479
2480 // Compares a value at the given depth from the stack to the value in src.
2481 auto compare_to_stack = [&](Register src, intptr_t depth) {
2482 ASSERT(original_tos_offset + depth >= 0);
2483 __ CompareToStack(src, original_tos_offset + depth);
2484 };
2485
2486 __ LoadAcquireCompressedFromOffset(
2487 STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2488 target::kCompressedWordSize *
2489 target::SubtypeTestCache::kInstanceCidOrSignature);
2490 __ cmpl(STCInternal::kScratchReg, raw_null);
2491 __ j(EQUAL, not_found, Assembler::kNearJump);
2492 __ cmpl(STCInternal::kScratchReg, STCInternal::kInstanceCidOrSignatureReg);
2493 if (n == 1) {
2494 __ j(EQUAL, found, Assembler::kNearJump);
2495 return;
2496 }
2497 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2498 __ cmpl(STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2499 Address(STCInternal::kCacheArrayReg,
2500 target::kWordSize *
2501 target::SubtypeTestCache::kInstanceTypeArguments));
2502 if (n == 2) {
2503 __ j(EQUAL, found, Assembler::kNearJump);
2504 return;
2505 }
2506 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2507 __ movl(STCInternal::kScratchReg,
2508 Address(STCInternal::kCacheArrayReg,
2509 target::kWordSize *
2510 target::SubtypeTestCache::kInstantiatorTypeArguments));
2511 compare_to_stack(STCInternal::kScratchReg,
2512 STCInternal::kInstantiatorTypeArgumentsDepth);
2513 if (n == 3) {
2514 __ j(EQUAL, found, Assembler::kNearJump);
2515 return;
2516 }
2517 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2518 __ movl(STCInternal::kScratchReg,
2519 Address(STCInternal::kCacheArrayReg,
2520 target::kWordSize *
2521 target::SubtypeTestCache::kFunctionTypeArguments));
2522 compare_to_stack(STCInternal::kScratchReg,
2523 STCInternal::kFunctionTypeArgumentsDepth);
2524 if (n == 4) {
2525 __ j(EQUAL, found, Assembler::kNearJump);
2526 return;
2527 }
2528 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2529 __ movl(
2530 STCInternal::kScratchReg,
2531 Address(
2532 STCInternal::kCacheArrayReg,
2533 target::kWordSize *
2534 target::SubtypeTestCache::kInstanceParentFunctionTypeArguments));
2535 compare_to_stack(STCInternal::kScratchReg, parent_function_type_args_depth);
2536 if (n == 5) {
2537 __ j(EQUAL, found, Assembler::kNearJump);
2538 return;
2539 }
2540 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2541 __ movl(
2542 STCInternal::kScratchReg,
2543 Address(
2544 STCInternal::kCacheArrayReg,
2545 target::kWordSize *
2546 target::SubtypeTestCache::kInstanceDelayedFunctionTypeArguments));
2547 compare_to_stack(STCInternal::kScratchReg, delayed_type_args_depth);
2548 if (n == 6) {
2549 __ j(EQUAL, found, Assembler::kNearJump);
2550 return;
2551 }
2552 __ j(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2553 __ movl(
2554 STCInternal::kScratchReg,
2555 Address(STCInternal::kCacheArrayReg,
2556 target::kWordSize * target::SubtypeTestCache::kDestinationType));
2557 compare_to_stack(STCInternal::kScratchReg,
2558 STCInternal::kDestinationTypeDepth);
2559 __ j(EQUAL, found, Assembler::kNearJump);
2560}
2561
2562// Used to check class and type arguments. Arguments passed on stack:
2563// TOS + 0: return address.
2564// TOS + 1: function type arguments (only used if n >= 4, can be raw_null).
2565// TOS + 2: instantiator type arguments (only used if n >= 3, can be raw_null).
2566// TOS + 3: destination_type (only used if n >= 7).
2567// TOS + 4: instance.
2568// TOS + 5: SubtypeTestCache.
2569//
2570// No registers are preserved by this stub.
2571//
2572// Result in SubtypeTestCacheReg::kResultReg: null -> not found, otherwise
2573// result (true or false).
2574void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2575 int n) {
2576 ASSERT(n >= 1);
2578 // If we need the parent function type arguments for a closure, we also need
2579 // the delayed type arguments, so this case will never happen.
2580 ASSERT(n != 5);
2581
2582 const auto& raw_null = Immediate(target::ToRawPointer(NullObject()));
2583
2584 __ LoadFromStack(TypeTestABI::kInstanceReg, STCInternal::kInstanceDepth);
2585
2586 // Loop initialization (moved up here to avoid having all dependent loads
2587 // after each other)
2588 __ LoadFromStack(STCInternal::kCacheArrayReg, STCInternal::kCacheDepth);
2589#if defined(DEBUG)
2590 // Verify the STC we received has exactly as many inputs as this stub expects.
2591 Label search_stc;
2592 __ LoadFromSlot(STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2593 Slot::SubtypeTestCache_num_inputs());
2594 __ CompareImmediate(STCInternal::kScratchReg, n);
2595 __ BranchIf(EQUAL, &search_stc, Assembler::kNearJump);
2596 __ Breakpoint();
2597 __ Bind(&search_stc);
2598#endif
2599 // We avoid a load-acquire barrier here by relying on the fact that all other
2600 // loads from the array are data-dependent loads.
2601 __ movl(STCInternal::kCacheArrayReg,
2602 FieldAddress(STCInternal::kCacheArrayReg,
2603 target::SubtypeTestCache::cache_offset()));
2604
2605 // There is a maximum size for linear caches that is smaller than the size
2606 // of any hash-based cache, so we check the size of the backing array to
2607 // determine if this is a linear or hash-based cache.
2608 __ LoadFromSlot(STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
2609 Slot::Array_length());
2610 __ CompareImmediate(STCInternal::kScratchReg,
2612 // For IA32, we never handle hash caches in the stub, as there's too much
2613 // register pressure.
2614 Label is_linear;
2615 __ BranchIf(LESS_EQUAL, &is_linear, Assembler::kNearJump);
2616 // Return null so that we'll continue to the runtime for hash-based caches.
2618 __ ret();
2619 __ Bind(&is_linear);
2620 __ AddImmediate(STCInternal::kCacheArrayReg,
2621 target::Array::data_offset() - kHeapObjectTag);
2622
2623 Label loop, not_closure;
2624 if (n >= 3) {
2625 __ LoadClassIdMayBeSmi(STCInternal::kInstanceCidOrSignatureReg,
2627 } else {
2628 __ LoadClassId(STCInternal::kInstanceCidOrSignatureReg,
2630 }
2631 __ cmpl(STCInternal::kInstanceCidOrSignatureReg, Immediate(kClosureCid));
2632 __ j(NOT_EQUAL, &not_closure, Assembler::kNearJump);
2633
2634 // Closure handling.
2635 {
2636 __ movl(STCInternal::kInstanceCidOrSignatureReg,
2637 FieldAddress(TypeTestABI::kInstanceReg,
2638 target::Closure::function_offset()));
2639 __ movl(STCInternal::kInstanceCidOrSignatureReg,
2640 FieldAddress(STCInternal::kInstanceCidOrSignatureReg,
2641 target::Function::signature_offset()));
2642 if (n >= 2) {
2643 __ movl(
2644 STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2645 FieldAddress(TypeTestABI::kInstanceReg,
2646 target::Closure::instantiator_type_arguments_offset()));
2647 }
2648 if (n >= 5) {
2649 __ pushl(FieldAddress(TypeTestABI::kInstanceReg,
2650 target::Closure::function_type_arguments_offset()));
2651 }
2652 if (n >= 6) {
2653 __ pushl(FieldAddress(TypeTestABI::kInstanceReg,
2654 target::Closure::delayed_type_arguments_offset()));
2655 }
2656 __ jmp(&loop, Assembler::kNearJump);
2657 }
2658
2659 // Non-Closure handling.
2660 {
2661 __ Bind(&not_closure);
2662 if (n >= 2) {
2663 Label has_no_type_arguments;
2664 __ LoadClassById(STCInternal::kScratchReg,
2665 STCInternal::kInstanceCidOrSignatureReg);
2666 __ movl(STCInternal::kInstanceInstantiatorTypeArgumentsReg, raw_null);
2667 __ movl(
2668 STCInternal::kScratchReg,
2669 FieldAddress(STCInternal::kScratchReg,
2670 target::Class::
2671 host_type_arguments_field_offset_in_words_offset()));
2672 __ cmpl(STCInternal::kScratchReg,
2673 Immediate(target::Class::kNoTypeArguments));
2674 __ j(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
2675 __ movl(STCInternal::kInstanceInstantiatorTypeArgumentsReg,
2676 FieldAddress(TypeTestABI::kInstanceReg, STCInternal::kScratchReg,
2677 TIMES_4, 0));
2678 __ Bind(&has_no_type_arguments);
2679 }
2680 __ SmiTag(STCInternal::kInstanceCidOrSignatureReg);
2681 if (n >= 5) {
2682 __ pushl(raw_null); // parent function.
2683 }
2684 if (n >= 6) {
2685 __ pushl(raw_null); // delayed.
2686 }
2687 }
2688
2689 // Offset of the original top of the stack from the current top of stack.
2690 intptr_t original_tos_offset = 0;
2691
2692 // Additional data conditionally stored on the stack use negative depths
2693 // that will be non-negative when adjusted for original_tos_offset. We
2694 // initialize conditionally pushed values to kNoInput for extra checking.
2695 intptr_t kInstanceParentFunctionTypeArgumentsDepth = STCInternal::kNoDepth;
2696 intptr_t kInstanceDelayedFunctionTypeArgumentsDepth = STCInternal::kNoDepth;
2697
2698 // Now that instance handling is done, both the delayed and parent function
2699 // type arguments stack slots have been set, so any input uses must be
2700 // offset by the new values and the new values can now be accessed in
2701 // the following code without issue when n >= 6.
2702 if (n >= 5) {
2703 original_tos_offset++;
2704 kInstanceParentFunctionTypeArgumentsDepth = -original_tos_offset;
2705 }
2706 if (n >= 6) {
2707 original_tos_offset++;
2708 kInstanceDelayedFunctionTypeArgumentsDepth = -original_tos_offset;
2709 }
2710
2711 Label found, not_found, done, next_iteration;
2712
2713 // Loop header.
2714 __ Bind(&loop);
2715 GenerateSubtypeTestCacheLoop(assembler, n, original_tos_offset,
2716 kInstanceParentFunctionTypeArgumentsDepth,
2717 kInstanceDelayedFunctionTypeArgumentsDepth,
2718 &found, &not_found, &next_iteration);
2719 __ Bind(&next_iteration);
2720 __ addl(STCInternal::kCacheArrayReg,
2721 Immediate(target::kWordSize *
2722 target::SubtypeTestCache::kTestEntryLength));
2723 __ jmp(&loop, Assembler::kNearJump);
2724
2725 __ Bind(&found);
2726 if (n >= 5) {
2727 __ Drop(original_tos_offset);
2728 }
2730 Address(STCInternal::kCacheArrayReg,
2731 target::kWordSize * target::SubtypeTestCache::kTestResult));
2732 __ ret();
2733
2734 __ Bind(&not_found);
2735 if (n >= 5) {
2736 __ Drop(original_tos_offset);
2737 }
2738 // In the not found case, even though the field that determines occupancy was
2739 // null, another thread might be updating the cache and in the middle of
2740 // filling in the entry. Thus, we load the null object explicitly instead of
2741 // just using the (possibly mid-update) test result field.
2743 __ ret();
2744}
2745
2746// Return the current stack pointer address, used to do stack alignment checks.
2747// TOS + 0: return address
2748// Result in EAX.
2749void StubCodeCompiler::GenerateGetCStackPointerStub() {
2750 __ leal(EAX, Address(ESP, target::kWordSize));
2751 __ ret();
2752}
2753
2754// Jump to a frame on the call stack.
2755// TOS + 0: return address
2756// TOS + 1: program_counter
2757// TOS + 2: stack_pointer
2758// TOS + 3: frame_pointer
2759// TOS + 4: thread
2760// No Result.
2761void StubCodeCompiler::GenerateJumpToFrameStub() {
2762 __ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
2763 __ movl(EBP,
2764 Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
2765 __ movl(EBX,
2766 Address(ESP, 1 * target::kWordSize)); // Load target PC into EBX.
2767 __ movl(ESP,
2768 Address(ESP, 2 * target::kWordSize)); // Load target stack_pointer.
2769#if defined(USING_SHADOW_CALL_STACK)
2770#error Unimplemented
2771#endif
2772
2773 Label exit_through_non_ffi;
2774 // Check if we exited generated from FFI. If so do transition - this is needed
2775 // because normally runtime calls transition back to generated via destructor
2776 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
2777 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
2778 // have this boilerplate, don't have this stack resource, have to transition
2779 // explicitly.
2780 __ cmpl(compiler::Address(
2781 THR, compiler::target::Thread::exit_through_ffi_offset()),
2782 compiler::Immediate(target::Thread::exit_through_ffi()));
2783 __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
2784 __ TransitionNativeToGenerated(ECX, /*leave_safepoint=*/true,
2785 /*ignore_unwind_in_progress=*/true);
2786 __ Bind(&exit_through_non_ffi);
2787
2788 // Set tag.
2789 __ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
2790 // Clear top exit frame.
2791 __ movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
2792 Immediate(0));
2793 __ jmp(EBX); // Jump to the exception handler code.
2794}
2795
2796// Run an exception handler. Execution comes from JumpToFrame stub.
2797//
2798// The arguments are stored in the Thread object.
2799// No result.
2800void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2803 __ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
2804
2806 __ movl(ECX, Address(THR, target::Thread::OffsetFromThread(NullObject())));
2807
2808 // Load the exception from the current thread.
2809 Address exception_addr(THR, target::Thread::active_exception_offset());
2810 __ movl(kExceptionObjectReg, exception_addr);
2811 __ movl(exception_addr, ECX);
2812
2813 // Load the stacktrace from the current thread.
2814 Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
2815 __ movl(kStackTraceObjectReg, stacktrace_addr);
2816 __ movl(stacktrace_addr, ECX);
2817
2818 __ jmp(EBX); // Jump to continuation point.
2819}
2820
2821// Deoptimize a frame on the call stack before rewinding.
2822// The arguments are stored in the Thread object.
2823// No result.
2824void StubCodeCompiler::GenerateDeoptForRewindStub() {
2825 // Push the deopt pc.
2826 __ pushl(Address(THR, target::Thread::resume_pc_offset()));
2827 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2828
2829 // After we have deoptimized, jump to the correct frame.
2830 __ EnterStubFrame();
2831 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2832 __ LeaveFrame();
2833 __ int3();
2834}
2835
2836// Calls to the runtime to optimize the given function.
2837// EBX: function to be reoptimized.
2838// ARGS_DESC_REG: argument descriptor (preserved).
2839void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2840 __ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
2841 __ EnterStubFrame();
2842 __ pushl(ARGS_DESC_REG);
2843 __ pushl(Immediate(0)); // Setup space on stack for return value.
2844 __ pushl(EBX);
2845 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2846 __ popl(EAX); // Discard argument.
2847 __ popl(FUNCTION_REG); // Get Function object
2848 __ popl(ARGS_DESC_REG); // Restore argument descriptor.
2849 __ LeaveFrame();
2850 __ movl(CODE_REG,
2851 FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2852 __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2853 __ int3();
2854}
2855
2856// Does identical check (object references are equal or not equal) with special
2857// checks for boxed numbers.
2858// Return ZF set.
2859// Note: A Mint cannot contain a value that would fit in Smi.
2860static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2861 const Register left,
2862 const Register right,
2863 const Register temp) {
2864 Label reference_compare, done, check_mint;
2865 // If any of the arguments is Smi do reference compare.
2866 __ testl(left, Immediate(kSmiTagMask));
2867 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2868 __ testl(right, Immediate(kSmiTagMask));
2869 __ j(ZERO, &reference_compare, Assembler::kNearJump);
2870
2871 // Value compare for two doubles.
2872 __ CompareClassId(left, kDoubleCid, temp);
2873 __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
2874 __ CompareClassId(right, kDoubleCid, temp);
2876
2877 // Double values bitwise compare.
2878 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2879 0 * target::kWordSize));
2880 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2881 0 * target::kWordSize));
2883 __ movl(temp, FieldAddress(left, target::Double::value_offset() +
2884 1 * target::kWordSize));
2885 __ cmpl(temp, FieldAddress(right, target::Double::value_offset() +
2886 1 * target::kWordSize));
2888
2889 __ Bind(&check_mint);
2890 __ CompareClassId(left, kMintCid, temp);
2891 __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
2892 __ CompareClassId(right, kMintCid, temp);
2894 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2895 0 * target::kWordSize));
2896 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2897 0 * target::kWordSize));
2899 __ movl(temp, FieldAddress(left, target::Mint::value_offset() +
2900 1 * target::kWordSize));
2901 __ cmpl(temp, FieldAddress(right, target::Mint::value_offset() +
2902 1 * target::kWordSize));
2904
2905 __ Bind(&reference_compare);
2906 __ cmpl(left, right);
2907 __ Bind(&done);
2908}
2909
2910// Called only from unoptimized code. All relevant registers have been saved.
2911// TOS + 0: return address
2912// TOS + 1: right argument.
2913// TOS + 2: left argument.
2914// Returns ZF set.
2915void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
2916#if !defined(PRODUCT)
2917 // Check single stepping.
2918 Label stepping, done_stepping;
2919 __ LoadIsolate(EAX);
2920 __ movzxb(EAX, Address(EAX, target::Isolate::single_step_offset()));
2921 __ cmpl(EAX, Immediate(0));
2922 __ j(NOT_EQUAL, &stepping);
2923 __ Bind(&done_stepping);
2924#endif
2925
2926 const Register left = EAX;
2927 const Register right = EDX;
2928 const Register temp = ECX;
2929 __ movl(left, Address(ESP, 2 * target::kWordSize));
2930 __ movl(right, Address(ESP, 1 * target::kWordSize));
2931 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2932 __ ret();
2933
2934#if !defined(PRODUCT)
2935 __ Bind(&stepping);
2936 __ EnterStubFrame();
2937 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2938 __ LeaveFrame();
2939 __ jmp(&done_stepping);
2940#endif
2941}
2942
2943// Called from optimized code only.
2944// TOS + 0: return address
2945// TOS + 1: right argument.
2946// TOS + 2: left argument.
2947// Returns ZF set.
2948void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
2949 const Register left = EAX;
2950 const Register right = EDX;
2951 const Register temp = ECX;
2952 __ movl(left, Address(ESP, 2 * target::kWordSize));
2953 __ movl(right, Address(ESP, 1 * target::kWordSize));
2954 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
2955 __ ret();
2956}
2957
2958// Called from megamorphic calls.
2959// EBX: receiver (passed to target)
2960// IC_DATA_REG: target::MegamorphicCache (preserved)
2961// Passed to target:
2962// EBX: target entry point
2963// FUNCTION_REG: target function
2964// ARGS_DESC_REG: argument descriptor
2965void StubCodeCompiler::GenerateMegamorphicCallStub() {
2966 // Jump if receiver is a smi.
2967 Label smi_case;
2968 // Check if object (in tmp) is a Smi.
2969 __ testl(EBX, Immediate(kSmiTagMask));
2970 // Jump out of line for smi case.
2971 __ j(ZERO, &smi_case, Assembler::kNearJump);
2972
2973 // Loads the cid of the instance.
2974 __ LoadClassId(EAX, EBX);
2975
2976 Label cid_loaded;
2977 __ Bind(&cid_loaded);
2978 __ pushl(EBX); // save receiver
2979 __ movl(EBX,
2980 FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
2981 __ movl(EDI, FieldAddress(IC_DATA_REG,
2982 target::MegamorphicCache::buckets_offset()));
2983 // EDI: cache buckets array.
2984 // EBX: mask as a smi.
2985
2986 // Tag cid as a smi.
2987 __ addl(EAX, EAX);
2988
2989 // Compute the table index.
2990 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
2991 // Use leal and subl multiply with 7 == 8 - 1.
2992 __ leal(EDX, Address(EAX, TIMES_8, 0));
2993 __ subl(EDX, EAX);
2994
2995 Label loop;
2996 __ Bind(&loop);
2997 __ andl(EDX, EBX);
2998
2999 const intptr_t base = target::Array::data_offset();
3000 Label probe_failed;
3001 // EDX is smi tagged, but table entries are two words, so TIMES_4.
3002 __ cmpl(EAX, FieldAddress(EDI, EDX, TIMES_4, base));
3003 __ j(NOT_EQUAL, &probe_failed, Assembler::kNearJump);
3004
3005 Label load_target;
3006 __ Bind(&load_target);
3007 // Call the target found in the cache. For a class id match, this is a
3008 // proper target for the given name and arguments descriptor. If the
3009 // illegal class id was found, the target is a cache miss handler that can
3010 // be invoked as a normal Dart function.
3011 __ movl(FUNCTION_REG,
3012 FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
3013 __ movl(ARGS_DESC_REG,
3014 FieldAddress(IC_DATA_REG,
3015 target::CallSiteData::arguments_descriptor_offset()));
3016 __ popl(EBX); // restore receiver
3017 __ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
3018
3019 __ Bind(&probe_failed);
3020 // Probe failed, check if it is a miss.
3021 __ cmpl(FieldAddress(EDI, EDX, TIMES_4, base),
3022 Immediate(target::ToRawSmi(kIllegalCid)));
3023 Label miss;
3024 __ j(ZERO, &miss, Assembler::kNearJump);
3025
3026 // Try next entry in the table.
3027 __ AddImmediate(EDX, Immediate(target::ToRawSmi(1)));
3028 __ jmp(&loop);
3029
3030 // Load cid for the Smi case.
3031 __ Bind(&smi_case);
3032 __ movl(EAX, Immediate(kSmiCid));
3033 __ jmp(&cid_loaded);
3034
3035 __ Bind(&miss);
3036 __ popl(EBX); // restore receiver
3037 GenerateSwitchableCallMissStub();
3038}
3039
3040void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3041 __ int3(); // AOT only.
3042}
3043
3044void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3045 __ int3(); // AOT only.
3046}
3047
3048// Called from switchable IC calls.
3049// EBX: receiver
3050void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3051 __ movl(CODE_REG,
3052 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
3053 __ EnterStubFrame();
3054 __ pushl(EBX); // Preserve receiver.
3055
3056 __ pushl(Immediate(0)); // Result slot.
3057 __ pushl(Immediate(0)); // Arg0: stub out.
3058 __ pushl(EBX); // Arg1: Receiver
3059 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3060 __ popl(ECX);
3061 __ popl(CODE_REG); // result = stub
3062 __ popl(ECX); // result = IC
3063
3064 __ popl(EBX); // Restore receiver.
3065 __ LeaveFrame();
3066
3067 __ movl(EAX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3069 __ jmp(EAX);
3070}
3071
3072void StubCodeCompiler::GenerateSingleTargetCallStub() {
3073 __ int3(); // AOT only.
3074}
3075
3076static ScaleFactor GetScaleFactor(intptr_t size) {
3077 switch (size) {
3078 case 1:
3079 return TIMES_1;
3080 case 2:
3081 return TIMES_2;
3082 case 4:
3083 return TIMES_4;
3084 case 8:
3085 return TIMES_8;
3086 case 16:
3087 return TIMES_16;
3088 }
3089 UNREACHABLE();
3090 return static_cast<ScaleFactor>(0);
3091}
3092
3093void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3095 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3096 ScaleFactor scale_factor = GetScaleFactor(element_size);
3097
3100
3101 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3102 // Save length argument for possible runtime call, as
3103 // EAX is clobbered.
3104 Label call_runtime;
3106
3107 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, ECX));
3109 /* Check that length is a positive Smi. */
3110 /* EDI: requested array length argument. */
3111 __ testl(EDI, Immediate(kSmiTagMask));
3112 __ j(NOT_ZERO, &call_runtime);
3113 __ SmiUntag(EDI);
3114 /* Check for length >= 0 && length <= max_len. */
3115 /* EDI: untagged array length. */
3116 __ cmpl(EDI, Immediate(max_len));
3117 __ j(ABOVE, &call_runtime);
3118 /* Special case for scaling by 16. */
3119 if (scale_factor == TIMES_16) {
3120 /* double length of array. */
3121 __ addl(EDI, EDI);
3122 /* only scale by 8. */
3123 scale_factor = TIMES_8;
3124 }
3125
3126 const intptr_t fixed_size_plus_alignment_padding =
3127 target::TypedData::HeaderSize() +
3129 __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding));
3131 __ movl(EAX, Address(THR, target::Thread::top_offset()));
3132 __ movl(EBX, EAX);
3133 /* EDI: allocation size. */
3134 __ addl(EBX, EDI);
3135 __ j(CARRY, &call_runtime);
3136
3137 /* Check if the allocation fits into the remaining space. */
3138 /* EAX: potential new object start. */
3139 /* EBX: potential next object start. */
3140 /* EDI: allocation size. */
3141 __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
3142 __ j(ABOVE_EQUAL, &call_runtime);
3143 __ CheckAllocationCanary(EAX);
3144
3145 /* Successfully allocated the object(s), now update top to point to */
3146 /* next object start and initialize the object. */
3147 __ movl(Address(THR, target::Thread::top_offset()), EBX);
3148 __ addl(EAX, Immediate(kHeapObjectTag));
3149
3150 /* Initialize the tags. */
3151 /* EAX: new object start as a tagged pointer. */
3152 /* EBX: new object end address. */
3153 /* EDI: allocation size. */
3154 {
3155 Label size_tag_overflow, done;
3156 __ cmpl(EDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
3157 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
3158 __ shll(EDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
3161 __ Bind(&size_tag_overflow);
3162 __ movl(EDI, Immediate(0));
3163 __ Bind(&done);
3164 /* Get the class index and insert it into the tags. */
3165 uword tags =
3166 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3167 __ orl(EDI, Immediate(tags));
3168 __ movl(FieldAddress(EAX, target::Object::tags_offset()),
3169 EDI); /* Tags. */
3170 }
3171
3172 /* Set the length field. */
3173 /* EAX: new object start as a tagged pointer. */
3174 /* EBX: new object end address. */
3175 __ popl(EDI); /* Array length. */
3176 __ StoreIntoObjectNoBarrier(
3177 EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI);
3178
3179 /* Initialize all array elements to 0. */
3180 /* EAX: new object start as a tagged pointer. */
3181 /* EBX: new object end address. */
3182 /* EDI: iterator which initially points to the start of the variable */
3183 /* ECX: scratch register. */
3184 /* data area to be initialized. */
3185 __ xorl(ECX, ECX); /* Zero. */
3186 __ leal(EDI, FieldAddress(EAX, target::TypedData::HeaderSize()));
3187 __ StoreInternalPointer(
3188 EAX, FieldAddress(EAX, target::PointerBase::data_offset()), EDI);
3189 Label loop;
3190 __ Bind(&loop);
3191 for (intptr_t offset = 0; offset < target::kObjectAlignment;
3192 offset += target::kWordSize) {
3193 __ movl(Address(EDI, offset), ECX);
3194 }
3195 // Safe to only check every kObjectAlignment bytes instead of each word.
3196 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
3197 __ addl(EDI, Immediate(target::kObjectAlignment));
3198 __ cmpl(EDI, EBX);
3199 __ j(UNSIGNED_LESS, &loop);
3200 __ WriteAllocationCanary(EBX); // Fix overshoot.
3201
3202 __ ret();
3203
3204 __ Bind(&call_runtime);
3206 }
3207
3208 __ EnterStubFrame();
3209 __ PushObject(Object::null_object()); // Make room for the result.
3210 __ pushl(Immediate(target::ToRawSmi(cid)));
3212 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3213 __ Drop(2); // Drop arguments.
3215 __ LeaveStubFrame();
3216 __ ret();
3217}
3218
3219} // namespace compiler
3220
3221} // namespace dart
3222
3223#endif // defined(TARGET_ARCH_IA32)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition assert.h:313
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
virtual bool WillAllocateNewOrRemembered() const
Definition il.h:7412
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageSize
static intptr_t ActivationFrameAlignment()
static intptr_t pointers_offset()
static intptr_t top_offset()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxLinearCacheSize
Definition object.h:7883
static constexpr intptr_t kMaxInputs
Definition object.h:7676
static Address VMTagAddress()
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
void GenerateNArgsCheckInlineCacheStubForEntryKind(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness, CodeEntryKind entry_kind)
#define UNIMPLEMENTED
#define ASSERT(E)
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
word ToRawSmi(const dart::Object &a)
word TypedDataMaxNewSpaceElements(classid_t cid)
word TypedDataElementSizeInBytes(classid_t cid)
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
const Object & NullObject()
const Code & StubCodeAllocateArray()
const Register kWriteBarrierSlotReg
const Register THR
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
static constexpr uword kZapReturnAddress
@ kIllegalCid
Definition class_id.h:214
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
void DLRT_ExitTemporaryIsolate()
const Register CODE_REG
@ GREATER_EQUAL
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS
@ ABOVE_EQUAL
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
static constexpr bool IsArgumentRegister(Register reg)
Definition constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition page.h:41
const Register kStackTraceObjectReg
const Register SPREG
const int kFpuRegisterSize
@ kNumberOfXmmRegisters
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kInstanceReg
static constexpr Register kSubtypeTestCacheResultReg
#define NOT_IN_PRODUCT(code)
Definition globals.h:84