Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
7#if defined(TARGET_ARCH_X64)
8
9#define SHOULD_NOT_INCLUDE_RUNTIME
10
11#include "vm/class_id.h"
14#include "vm/instructions.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22namespace compiler {
23
24Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
25 intptr_t far_branch_level)
26 : AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
27 // Far branching mode is only needed and implemented for ARM.
28 ASSERT(far_branch_level == 0);
29
30 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
31 call(Address(THR,
32 target::Thread::write_barrier_wrappers_thread_offset(reg)));
33 };
34 generate_invoke_array_write_barrier_ = [&]() {
35 call(
36 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
37 };
38}
39
40void Assembler::call(Label* label) {
41 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
42 const int kSize = 5;
43 EmitUint8(0xE8);
44 EmitLabel(label, kSize);
45}
46
47void Assembler::LoadNativeEntry(
48 Register dst,
49 const ExternalLabel* label,
50 ObjectPoolBuilderEntry::Patchability patchable) {
51 const intptr_t index =
52 object_pool_builder().FindNativeFunction(label, patchable);
53 LoadWordFromPoolIndex(dst, index);
54}
55
56void Assembler::call(const ExternalLabel* label) {
57 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
58 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
59 EmitRegisterREX(TMP, REX_W);
60 EmitUint8(0xB8 | (TMP & 7));
61 EmitInt64(label->address());
62 }
63 call(TMP);
64}
65
66void Assembler::CallCodeThroughPool(intptr_t target_code_pool_index,
67 CodeEntryKind entry_kind) {
68 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
69 // We don't actually use CODE_REG in the callee and caller might
70 // be using CODE_REG for a live value (e.g. a value that is alive
71 // across invocation of a shared stub like the one we use for
72 // allocating Mint boxes).
73 const Register code_reg = FLAG_precompiled_mode ? TMP : CODE_REG;
74 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
75 call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
76}
77
78void Assembler::CallPatchable(
79 const Code& target,
80 CodeEntryKind entry_kind,
81 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
82 ASSERT(constant_pool_allowed());
83 const intptr_t idx = object_pool_builder().AddObject(
84 ToObject(target), ObjectPoolBuilderEntry::kPatchable, snapshot_behavior);
85 CallCodeThroughPool(idx, entry_kind);
86}
87
88void Assembler::CallWithEquivalence(const Code& target,
89 const Object& equivalence,
90 CodeEntryKind entry_kind) {
91 ASSERT(constant_pool_allowed());
92 const intptr_t idx =
93 object_pool_builder().FindObject(ToObject(target), equivalence);
94 CallCodeThroughPool(idx, entry_kind);
95}
96
97void Assembler::Call(
98 const Code& target,
99 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
100 ASSERT(constant_pool_allowed());
101 const intptr_t idx = object_pool_builder().FindObject(
102 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable,
103 snapshot_behavior);
104 CallCodeThroughPool(idx, CodeEntryKind::kNormal);
105}
106
107void Assembler::pushq(Register reg) {
108 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
109 EmitRegisterREX(reg, REX_NONE);
110 EmitUint8(0x50 | (reg & 7));
111}
112
113void Assembler::pushq(const Immediate& imm) {
114 if (imm.is_int8()) {
115 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
116 EmitUint8(0x6A);
117 EmitUint8(imm.value() & 0xFF);
118 } else if (imm.is_int32()) {
119 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
120 EmitUint8(0x68);
121 EmitImmediate(imm);
122 } else {
123 movq(TMP, imm);
124 pushq(TMP);
125 }
126}
127
128void Assembler::popq(Register reg) {
129 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
130 EmitRegisterREX(reg, REX_NONE);
131 EmitUint8(0x58 | (reg & 7));
132}
133
134void Assembler::setcc(Condition condition, ByteRegister dst) {
135 ASSERT(dst != kNoByteRegister);
136 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
137 if (dst >= 8) {
138 EmitUint8(REX_PREFIX | (((dst & 0x08) != 0) ? REX_B : REX_NONE));
139 }
140 EmitUint8(0x0F);
141 EmitUint8(0x90 + condition);
142 EmitUint8(0xC0 + (dst & 0x07));
143}
144
145void Assembler::EnterFullSafepoint() {
146 // We generate the same number of instructions whether or not the slow-path is
147 // forced, to simplify GenerateJitCallbackTrampolines.
148 // For TSAN, we always go to the runtime so TSAN is aware of the release
149 // semantics of entering the safepoint.
150 Label done, slow_path;
151 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
152 jmp(&slow_path);
153 }
154
155 // Compare and swap the value at Thread::safepoint_state from
156 // unacquired to acquired. If the CAS fails, go to a slow-path stub.
157 pushq(RAX);
158 movq(RAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
159 movq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
160 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
161 movq(TMP, RAX);
162 popq(RAX);
163 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
164
165 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
166 j(EQUAL, &done);
167 }
168
169 Bind(&slow_path);
170 movq(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
171 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
172
173 // Use call instead of CallCFunction to avoid having to clean up shadow space
174 // afterwards. This is possible because the safepoint stub does not use the
175 // shadow space as scratch and has no arguments.
176 call(TMP);
177
178 Bind(&done);
179}
180
181void Assembler::TransitionGeneratedToNative(Register destination_address,
182 Register new_exit_frame,
183 Register new_exit_through_ffi,
184 bool enter_safepoint) {
185 // Save exit frame information to enable stack walking.
186 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
187 new_exit_frame);
188
189 movq(compiler::Address(THR,
190 compiler::target::Thread::exit_through_ffi_offset()),
191 new_exit_through_ffi);
192
193 movq(Assembler::VMTagAddress(), destination_address);
194 movq(Address(THR, target::Thread::execution_state_offset()),
195 Immediate(target::Thread::native_execution_state()));
196
197 if (enter_safepoint) {
198 EnterFullSafepoint();
199 }
200}
201
202void Assembler::ExitFullSafepoint(bool ignore_unwind_in_progress) {
203 // We generate the same number of instructions whether or not the slow-path is
204 // forced, for consistency with EnterFullSafepoint.
205 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
206 // semantics of leaving the safepoint.
207 Label done, slow_path;
208 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
209 jmp(&slow_path);
210 }
211
212 // Compare and swap the value at Thread::safepoint_state from
213 // acquired to unacquired. On success, jump to 'success'; otherwise,
214 // fallthrough.
215
216 pushq(RAX);
217 movq(RAX, Immediate(target::Thread::full_safepoint_state_acquired()));
218 movq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
219 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
220 movq(TMP, RAX);
221 popq(RAX);
222 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
223
224 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
225 j(EQUAL, &done);
226 }
227
228 Bind(&slow_path);
229 if (ignore_unwind_in_progress) {
230 movq(TMP,
231 Address(THR,
232 target::Thread::
233 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
234 } else {
235 movq(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
236 }
237 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
238
239 // Use call instead of CallCFunction to avoid having to clean up shadow space
240 // afterwards. This is possible because the safepoint stub does not use the
241 // shadow space as scratch and has no arguments.
242 call(TMP);
243
244 Bind(&done);
245}
246
247void Assembler::TransitionNativeToGenerated(bool leave_safepoint,
248 bool ignore_unwind_in_progress) {
249 if (leave_safepoint) {
250 ExitFullSafepoint(ignore_unwind_in_progress);
251 } else {
252 // flag only makes sense if we are leaving safepoint
253 ASSERT(!ignore_unwind_in_progress);
254#if defined(DEBUG)
255 // Ensure we've already left the safepoint.
256 movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
257 andq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
258 Label ok;
259 j(ZERO, &ok);
260 Breakpoint();
261 Bind(&ok);
262#endif
263 }
264
265 movq(Assembler::VMTagAddress(), Immediate(target::Thread::vm_tag_dart_id()));
266 movq(Address(THR, target::Thread::execution_state_offset()),
267 Immediate(target::Thread::generated_execution_state()));
268
269 // Reset exit frame information in Isolate's mutator thread structure.
270 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
271 Immediate(0));
272 movq(compiler::Address(THR,
273 compiler::target::Thread::exit_through_ffi_offset()),
274 compiler::Immediate(0));
275}
276
277void Assembler::EmitQ(int reg,
278 const Address& address,
279 int opcode,
280 int prefix2,
281 int prefix1) {
282 ASSERT(reg <= XMM15);
283 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
284 if (prefix1 >= 0) {
285 EmitUint8(prefix1);
286 }
287 EmitOperandREX(reg, address, REX_W);
288 if (prefix2 >= 0) {
289 EmitUint8(prefix2);
290 }
291 EmitUint8(opcode);
292 EmitOperand(reg & 7, address);
293}
294
295void Assembler::EmitL(int reg,
296 const Address& address,
297 int opcode,
298 int prefix2,
299 int prefix1) {
300 ASSERT(reg <= XMM15);
301 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
302 if (prefix1 >= 0) {
303 EmitUint8(prefix1);
304 }
305 EmitOperandREX(reg, address, REX_NONE);
306 if (prefix2 >= 0) {
307 EmitUint8(prefix2);
308 }
309 EmitUint8(opcode);
310 EmitOperand(reg & 7, address);
311}
312
313void Assembler::EmitW(Register reg,
314 const Address& address,
315 int opcode,
316 int prefix2,
317 int prefix1) {
318 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
319 if (prefix1 >= 0) {
320 EmitUint8(prefix1);
321 }
322 EmitOperandSizeOverride();
323 EmitOperandREX(reg, address, REX_NONE);
324 if (prefix2 >= 0) {
325 EmitUint8(prefix2);
326 }
327 EmitUint8(opcode);
328 EmitOperand(reg & 7, address);
329}
330
331void Assembler::EmitB(int reg, const Address& address, int opcode) {
332 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
333 EmitOperandREX(reg & ~0x10, address, reg >= 8 ? REX_PREFIX : REX_NONE);
334 EmitUint8(opcode);
335 EmitOperand(reg & 7, address);
336}
337
338void Assembler::movl(Register dst, const Immediate& imm) {
339 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
340 Operand operand(dst);
341 EmitOperandREX(0, operand, REX_NONE);
342 EmitUint8(0xC7);
343 EmitOperand(0, operand);
344 ASSERT(imm.is_int32());
345 EmitImmediate(imm);
346}
347
348void Assembler::movl(const Address& dst, const Immediate& imm) {
349 movl(TMP, imm);
350 movl(dst, TMP);
351}
352
353void Assembler::movb(const Address& dst, const Immediate& imm) {
354 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
355 EmitOperandREX(0, dst, REX_NONE);
356 EmitUint8(0xC6);
357 EmitOperand(0, dst);
358 ASSERT(imm.is_int8());
359 EmitUint8(imm.value() & 0xFF);
360}
361
362void Assembler::movw(Register dst, const Address& src) {
363 // This would leave 16 bits above the 2 byte value undefined.
364 // If we ever want to purposefully have those undefined, remove this.
365 // TODO(40210): Allow this.
366 FATAL("Use movzxw or movsxw instead.");
367}
368
369void Assembler::movw(const Address& dst, const Immediate& imm) {
370 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
371 EmitOperandSizeOverride();
372 EmitOperandREX(0, dst, REX_NONE);
373 EmitUint8(0xC7);
374 EmitOperand(0, dst);
375 EmitUint8(imm.value() & 0xFF);
376 EmitUint8((imm.value() >> 8) & 0xFF);
377}
378
379void Assembler::movq(Register dst, const Immediate& imm) {
380 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
381 if (imm.is_uint32()) {
382 // Pick single byte B8 encoding if possible. If dst < 8 then we also omit
383 // the Rex byte.
384 EmitRegisterREX(dst, REX_NONE);
385 EmitUint8(0xB8 | (dst & 7));
386 EmitUInt32(imm.value());
387 } else if (imm.is_int32()) {
388 // Sign extended C7 Cx encoding if we have a negative input.
389 Operand operand(dst);
390 EmitOperandREX(0, operand, REX_W);
391 EmitUint8(0xC7);
392 EmitOperand(0, operand);
393 EmitImmediate(imm);
394 } else {
395 // Full 64 bit immediate encoding.
396 EmitRegisterREX(dst, REX_W);
397 EmitUint8(0xB8 | (dst & 7));
398 EmitImmediate(imm);
399 }
400}
401
402void Assembler::movq(const Address& dst, const Immediate& imm) {
403 if (imm.is_int32()) {
404 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
405 EmitOperandREX(0, dst, REX_W);
406 EmitUint8(0xC7);
407 EmitOperand(0, dst);
408 EmitImmediate(imm);
409 } else {
410 movq(TMP, imm);
411 movq(dst, TMP);
412 }
413}
414
415void Assembler::EmitSimple(int opcode, int opcode2, int opcode3) {
416 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
417 EmitUint8(opcode);
418 if (opcode2 != -1) {
419 EmitUint8(opcode2);
420 if (opcode3 != -1) {
421 EmitUint8(opcode3);
422 }
423 }
424}
425
426void Assembler::EmitQ(int dst, int src, int opcode, int prefix2, int prefix1) {
427 ASSERT(src <= XMM15);
428 ASSERT(dst <= XMM15);
429 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
430 if (prefix1 >= 0) {
431 EmitUint8(prefix1);
432 }
433 EmitRegRegRex(dst, src, REX_W);
434 if (prefix2 >= 0) {
435 EmitUint8(prefix2);
436 }
437 EmitUint8(opcode);
438 EmitRegisterOperand(dst & 7, src);
439}
440
441void Assembler::EmitL(int dst, int src, int opcode, int prefix2, int prefix1) {
442 ASSERT(src <= XMM15);
443 ASSERT(dst <= XMM15);
444 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
445 if (prefix1 >= 0) {
446 EmitUint8(prefix1);
447 }
448 EmitRegRegRex(dst, src);
449 if (prefix2 >= 0) {
450 EmitUint8(prefix2);
451 }
452 EmitUint8(opcode);
453 EmitRegisterOperand(dst & 7, src);
454}
455
456void Assembler::EmitW(Register dst,
457 Register src,
458 int opcode,
459 int prefix2,
460 int prefix1) {
461 ASSERT(src <= R15);
462 ASSERT(dst <= R15);
463 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
464 if (prefix1 >= 0) {
465 EmitUint8(prefix1);
466 }
467 EmitOperandSizeOverride();
468 EmitRegRegRex(dst, src);
469 if (prefix2 >= 0) {
470 EmitUint8(prefix2);
471 }
472 EmitUint8(opcode);
473 EmitRegisterOperand(dst & 7, src);
474}
475
476#define UNARY_XMM_WITH_CONSTANT(name, constant, op) \
477 void Assembler::name(XmmRegister dst, XmmRegister src) { \
478 movq(TMP, Address(THR, target::Thread::constant##_address_offset())); \
479 if (dst == src) { \
480 op(dst, Address(TMP, 0)); \
481 } else { \
482 movups(dst, Address(TMP, 0)); \
483 op(dst, src); \
484 } \
485 }
486
487// TODO(erikcorry): For the case where dst != src, we could construct these
488// with pcmpeqw xmm0,xmm0 followed by left and right shifts. This would avoid
489// memory traffic.
490// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
491UNARY_XMM_WITH_CONSTANT(notps, float_not, xorps)
492// { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
493UNARY_XMM_WITH_CONSTANT(negateps, float_negate, xorps)
494// { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF }
495UNARY_XMM_WITH_CONSTANT(absps, float_absolute, andps)
496// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 }
497UNARY_XMM_WITH_CONSTANT(zerowps, float_zerow, andps)
498// { 0x8000000000000000LL, 0x8000000000000000LL }
499UNARY_XMM_WITH_CONSTANT(negatepd, double_negate, xorpd)
500// { 0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL }
501UNARY_XMM_WITH_CONSTANT(abspd, double_abs, andpd)
502// {0x8000000000000000LL, 0x8000000000000000LL}
503UNARY_XMM_WITH_CONSTANT(DoubleNegate, double_negate, xorpd)
504// {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}
505UNARY_XMM_WITH_CONSTANT(DoubleAbs, double_abs, andpd)
506
507#undef UNARY_XMM_WITH_CONSTANT
508
509void Assembler::CmpPS(XmmRegister dst, XmmRegister src, int condition) {
510 EmitL(dst, src, 0xC2, 0x0F);
511 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
512 EmitUint8(condition);
513}
514
515void Assembler::set1ps(XmmRegister dst, Register tmp1, const Immediate& imm) {
516 // Load 32-bit immediate value into tmp1.
517 movl(tmp1, imm);
518 // Move value from tmp1 into dst.
519 movd(dst, tmp1);
520 // Broadcast low lane into other three lanes.
521 shufps(dst, dst, Immediate(0x0));
522}
523
524void Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
525 EmitL(dst, src, 0xC6, 0x0F);
526 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
527 ASSERT(imm.is_uint8());
528 EmitUint8(imm.value());
529}
530
531void Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
532 EmitL(dst, src, 0xC6, 0x0F, 0x66);
533 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
534 ASSERT(imm.is_uint8());
535 EmitUint8(imm.value());
536}
537
538void Assembler::roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode) {
539 ASSERT(src <= XMM15);
540 ASSERT(dst <= XMM15);
541 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
542 EmitUint8(0x66);
543 EmitRegRegRex(dst, src);
544 EmitUint8(0x0F);
545 EmitUint8(0x3A);
546 EmitUint8(0x0B);
547 EmitRegisterOperand(dst & 7, src);
548 // Mask precision exception.
549 EmitUint8(static_cast<uint8_t>(mode) | 0x8);
550}
551
552void Assembler::fldl(const Address& src) {
553 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
554 EmitUint8(0xDD);
555 EmitOperand(0, src);
556}
557
558void Assembler::fstpl(const Address& dst) {
559 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
560 EmitUint8(0xDD);
561 EmitOperand(3, dst);
562}
563
564void Assembler::ffree(intptr_t value) {
565 ASSERT(value < 7);
566 EmitSimple(0xDD, 0xC0 + value);
567}
568
569void Assembler::CompareImmediate(Register reg,
570 const Immediate& imm,
571 OperandSize width) {
572 if (width == kEightBytes) {
573 if (imm.is_int32()) {
574 cmpq(reg, imm);
575 } else {
576 ASSERT(reg != TMP);
577 LoadImmediate(TMP, imm);
578 cmpq(reg, TMP);
579 }
580 } else {
581 ASSERT(width == kFourBytes);
582 cmpl(reg, imm);
583 }
584}
585
586void Assembler::CompareImmediate(const Address& address,
587 const Immediate& imm,
588 OperandSize width) {
589 if (width == kEightBytes) {
590 if (imm.is_int32()) {
591 cmpq(address, imm);
592 } else {
593 LoadImmediate(TMP, imm);
594 cmpq(address, TMP);
595 }
596 } else {
597 ASSERT(width == kFourBytes);
598 cmpl(address, imm);
599 }
600}
601
602void Assembler::testb(const Address& address, const Immediate& imm) {
603 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
604 EmitOperandREX(0, address, REX_NONE);
605 EmitUint8(0xF6);
606 EmitOperand(0, address);
607 ASSERT(imm.is_int8());
608 EmitUint8(imm.value() & 0xFF);
609}
610
611void Assembler::testb(const Address& address, Register reg) {
612 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
613 EmitOperandREX(reg, address, REX_NONE);
614 EmitUint8(0x84);
615 EmitOperand(reg & 7, address);
616}
617
618void Assembler::testq(Register reg, const Immediate& imm) {
619 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
620 if (imm.is_uint8()) {
621 // Use zero-extended 8-bit immediate.
622 if (reg >= 4) {
623 // We need the Rex byte to give access to the SIL and DIL registers (the
624 // low bytes of RSI and RDI).
625 EmitRegisterREX(reg, REX_NONE, /* force = */ true);
626 }
627 if (reg == RAX) {
628 EmitUint8(0xA8);
629 } else {
630 EmitUint8(0xF6);
631 EmitUint8(0xC0 + (reg & 7));
632 }
633 EmitUint8(imm.value() & 0xFF);
634 } else if (imm.is_uint32()) {
635 if (reg == RAX) {
636 EmitUint8(0xA9);
637 } else {
638 EmitRegisterREX(reg, REX_NONE);
639 EmitUint8(0xF7);
640 EmitUint8(0xC0 | (reg & 7));
641 }
642 EmitUInt32(imm.value());
643 } else {
644 // Sign extended version of 32 bit test.
645 ASSERT(imm.is_int32());
646 EmitRegisterREX(reg, REX_W);
647 if (reg == RAX) {
648 EmitUint8(0xA9);
649 } else {
650 EmitUint8(0xF7);
651 EmitUint8(0xC0 | (reg & 7));
652 }
653 EmitImmediate(imm);
654 }
655}
656
657void Assembler::TestImmediate(Register dst,
658 const Immediate& imm,
659 OperandSize width) {
660 if (width == kEightBytes) {
661 if (imm.is_int32() || imm.is_uint32()) {
662 testq(dst, imm);
663 } else {
664 ASSERT(dst != TMP);
665 LoadImmediate(TMP, imm);
666 testq(dst, TMP);
667 }
668 } else {
669 ASSERT(width == kFourBytes);
670 testl(dst, imm);
671 }
672}
673
674void Assembler::AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm) {
675 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
676 EmitRegisterREX(dst, REX_NONE);
677 EmitComplex(modrm_opcode, Operand(dst), imm);
678}
679
680void Assembler::AluB(uint8_t modrm_opcode,
681 const Address& dst,
682 const Immediate& imm) {
683 ASSERT(imm.is_uint8() || imm.is_int8());
684 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
685 EmitOperandREX(modrm_opcode, dst, REX_NONE);
686 EmitUint8(0x80);
687 EmitOperand(modrm_opcode, dst);
688 EmitUint8(imm.value() & 0xFF);
689}
690
691void Assembler::AluW(uint8_t modrm_opcode,
692 const Address& dst,
693 const Immediate& imm) {
694 ASSERT(imm.is_int16() || imm.is_uint16());
695 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
696 EmitOperandSizeOverride();
697 EmitOperandREX(modrm_opcode, dst, REX_NONE);
698 if (imm.is_int8()) {
699 EmitSignExtendedInt8(modrm_opcode, dst, imm);
700 } else {
701 EmitUint8(0x81);
702 EmitOperand(modrm_opcode, dst);
703 EmitUint8(imm.value() & 0xFF);
704 EmitUint8((imm.value() >> 8) & 0xFF);
705 }
706}
707
708void Assembler::AluL(uint8_t modrm_opcode,
709 const Address& dst,
710 const Immediate& imm) {
711 ASSERT(imm.is_int32());
712 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
713 EmitOperandREX(modrm_opcode, dst, REX_NONE);
714 EmitComplex(modrm_opcode, dst, imm);
715}
716
717void Assembler::AluQ(uint8_t modrm_opcode,
718 uint8_t opcode,
719 Register dst,
720 const Immediate& imm) {
721 Operand operand(dst);
722 if (modrm_opcode == 4 && imm.is_uint32()) {
723 // We can use andl for andq.
724 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
725 EmitRegisterREX(dst, REX_NONE);
726 // Would like to use EmitComplex here, but it doesn't like uint32
727 // immediates.
728 if (imm.is_int8()) {
729 EmitSignExtendedInt8(modrm_opcode, operand, imm);
730 } else {
731 if (dst == RAX) {
732 EmitUint8(0x25);
733 } else {
734 EmitUint8(0x81);
735 EmitOperand(modrm_opcode, operand);
736 }
737 EmitUInt32(imm.value());
738 }
739 } else if (imm.is_int32()) {
740 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
741 EmitRegisterREX(dst, REX_W);
742 EmitComplex(modrm_opcode, operand, imm);
743 } else {
744 ASSERT(dst != TMP);
745 movq(TMP, imm);
746 EmitQ(dst, TMP, opcode);
747 }
748}
749
750void Assembler::AluQ(uint8_t modrm_opcode,
751 uint8_t opcode,
752 const Address& dst,
753 const Immediate& imm) {
754 if (imm.is_int32()) {
755 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
756 EmitOperandREX(modrm_opcode, dst, REX_W);
757 EmitComplex(modrm_opcode, dst, imm);
758 } else {
759 movq(TMP, imm);
760 EmitQ(TMP, dst, opcode);
761 }
762}
763
764void Assembler::AndImmediate(Register dst, const Immediate& imm) {
765 if (imm.is_int32() || imm.is_uint32()) {
766 andq(dst, imm);
767 } else {
768 ASSERT(dst != TMP);
769 LoadImmediate(TMP, imm);
770 andq(dst, TMP);
771 }
772}
773
774void Assembler::AndRegisters(Register dst, Register src1, Register src2) {
775 ASSERT(src1 != src2); // Likely a mistake.
776 if (src2 == kNoRegister) {
777 src2 = dst;
778 }
779 if (dst == src1) {
780 andq(dst, src2);
781 } else if (dst == src2) {
782 andq(dst, src1);
783 } else {
784 movq(dst, src1);
785 andq(dst, src2);
786 }
787}
788
789void Assembler::LslRegister(Register dst, Register shift) {
790 if (shift != RCX) {
791 movq(TMP, RCX);
792 movq(RCX, shift);
793 shlq(dst == RCX ? TMP : dst, RCX);
794 movq(RCX, TMP);
795 } else {
796 shlq(dst, shift);
797 }
798}
799
800void Assembler::OrImmediate(Register dst, const Immediate& imm) {
801 if (imm.is_int32()) {
802 orq(dst, imm);
803 } else {
804 ASSERT(dst != TMP);
805 LoadImmediate(TMP, imm);
806 orq(dst, TMP);
807 }
808}
809
810void Assembler::XorImmediate(Register dst, const Immediate& imm) {
811 if (imm.is_int32()) {
812 xorq(dst, imm);
813 } else {
814 ASSERT(dst != TMP);
815 LoadImmediate(TMP, imm);
816 xorq(dst, TMP);
817 }
818}
819
820void Assembler::cqo() {
821 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
822 EmitRegisterREX(RAX, REX_W);
823 EmitUint8(0x99);
824}
825
826void Assembler::EmitUnaryQ(Register reg, int opcode, int modrm_code) {
827 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
828 EmitRegisterREX(reg, REX_W);
829 EmitUint8(opcode);
830 EmitOperand(modrm_code, Operand(reg));
831}
832
833void Assembler::EmitUnaryL(Register reg, int opcode, int modrm_code) {
834 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
835 EmitRegisterREX(reg, REX_NONE);
836 EmitUint8(opcode);
837 EmitOperand(modrm_code, Operand(reg));
838}
839
840void Assembler::EmitUnaryQ(const Address& address, int opcode, int modrm_code) {
841 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
842 Operand operand(address);
843 EmitOperandREX(modrm_code, operand, REX_W);
844 EmitUint8(opcode);
845 EmitOperand(modrm_code, operand);
846}
847
848void Assembler::EmitUnaryL(const Address& address, int opcode, int modrm_code) {
849 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
850 Operand operand(address);
851 EmitOperandREX(modrm_code, operand, REX_NONE);
852 EmitUint8(opcode);
853 EmitOperand(modrm_code, operand);
854}
855
856void Assembler::imull(Register reg, const Immediate& imm) {
857 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
858 Operand operand(reg);
859 EmitOperandREX(reg, operand, REX_NONE);
860 EmitUint8(0x69);
861 EmitOperand(reg & 7, Operand(reg));
862 EmitImmediate(imm);
863}
864
865void Assembler::imulq(Register reg, const Immediate& imm) {
866 if (imm.is_int32()) {
867 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
868 Operand operand(reg);
869 EmitOperandREX(reg, operand, REX_W);
870 EmitUint8(0x69);
871 EmitOperand(reg & 7, Operand(reg));
872 EmitImmediate(imm);
873 } else {
874 ASSERT(reg != TMP);
875 movq(TMP, imm);
876 imulq(reg, TMP);
877 }
878}
879
880void Assembler::MulImmediate(Register reg,
881 const Immediate& imm,
882 OperandSize width) {
883 ASSERT(width == kFourBytes || width == kEightBytes);
884 if (Utils::IsPowerOfTwo(imm.value())) {
885 const intptr_t shift = Utils::ShiftForPowerOfTwo(imm.value());
886 if (width == kFourBytes) {
887 shll(reg, Immediate(shift));
888 } else {
889 shlq(reg, Immediate(shift));
890 }
891 } else if (imm.is_int32()) {
892 if (width == kFourBytes) {
893 imull(reg, imm);
894 } else {
895 imulq(reg, imm);
896 }
897 } else {
898 ASSERT(reg != TMP);
899 ASSERT(width == kEightBytes);
900 movq(TMP, imm);
901 imulq(reg, TMP);
902 }
903}
904
905void Assembler::shll(Register reg, const Immediate& imm) {
906 EmitGenericShift(false, 4, reg, imm);
907}
908
909void Assembler::shll(Register operand, Register shifter) {
910 EmitGenericShift(false, 4, operand, shifter);
911}
912
913void Assembler::shrl(Register reg, const Immediate& imm) {
914 EmitGenericShift(false, 5, reg, imm);
915}
916
917void Assembler::shrl(Register operand, Register shifter) {
918 EmitGenericShift(false, 5, operand, shifter);
919}
920
921void Assembler::sarl(Register reg, const Immediate& imm) {
922 EmitGenericShift(false, 7, reg, imm);
923}
924
925void Assembler::sarl(Register operand, Register shifter) {
926 EmitGenericShift(false, 7, operand, shifter);
927}
928
929void Assembler::shldl(Register dst, Register src, const Immediate& imm) {
930 EmitL(src, dst, 0xA4, 0x0F);
931 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
932 ASSERT(imm.is_int8());
933 EmitUint8(imm.value() & 0xFF);
934}
935
936void Assembler::shlq(Register reg, const Immediate& imm) {
937 EmitGenericShift(true, 4, reg, imm);
938}
939
940void Assembler::shlq(Register operand, Register shifter) {
941 EmitGenericShift(true, 4, operand, shifter);
942}
943
944void Assembler::shrq(Register reg, const Immediate& imm) {
945 EmitGenericShift(true, 5, reg, imm);
946}
947
948void Assembler::shrq(Register operand, Register shifter) {
949 EmitGenericShift(true, 5, operand, shifter);
950}
951
952void Assembler::sarq(Register reg, const Immediate& imm) {
953 EmitGenericShift(true, 7, reg, imm);
954}
955
956void Assembler::sarq(Register operand, Register shifter) {
957 EmitGenericShift(true, 7, operand, shifter);
958}
959
960void Assembler::shldq(Register dst, Register src, const Immediate& imm) {
961 EmitQ(src, dst, 0xA4, 0x0F);
962 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
963 ASSERT(imm.is_int8());
964 EmitUint8(imm.value() & 0xFF);
965}
966
967void Assembler::btq(Register base, int bit) {
968 ASSERT(bit >= 0 && bit < 64);
969 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
970 Operand operand(base);
971 EmitOperandREX(4, operand, bit >= 32 ? REX_W : REX_NONE);
972 EmitUint8(0x0F);
973 EmitUint8(0xBA);
974 EmitOperand(4, operand);
975 EmitUint8(bit);
976}
977
978void Assembler::enter(const Immediate& imm) {
979 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
980 EmitUint8(0xC8);
981 ASSERT(imm.is_uint16());
982 EmitUint8(imm.value() & 0xFF);
983 EmitUint8((imm.value() >> 8) & 0xFF);
984 EmitUint8(0x00);
985}
986
987void Assembler::nop(int size) {
988 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
989 // There are nops up to size 15, but for now just provide up to size 8.
990 ASSERT(0 < size && size <= MAX_NOP_SIZE);
991 switch (size) {
992 case 1:
993 EmitUint8(0x90);
994 break;
995 case 2:
996 EmitUint8(0x66);
997 EmitUint8(0x90);
998 break;
999 case 3:
1000 EmitUint8(0x0F);
1001 EmitUint8(0x1F);
1002 EmitUint8(0x00);
1003 break;
1004 case 4:
1005 EmitUint8(0x0F);
1006 EmitUint8(0x1F);
1007 EmitUint8(0x40);
1008 EmitUint8(0x00);
1009 break;
1010 case 5:
1011 EmitUint8(0x0F);
1012 EmitUint8(0x1F);
1013 EmitUint8(0x44);
1014 EmitUint8(0x00);
1015 EmitUint8(0x00);
1016 break;
1017 case 6:
1018 EmitUint8(0x66);
1019 EmitUint8(0x0F);
1020 EmitUint8(0x1F);
1021 EmitUint8(0x44);
1022 EmitUint8(0x00);
1023 EmitUint8(0x00);
1024 break;
1025 case 7:
1026 EmitUint8(0x0F);
1027 EmitUint8(0x1F);
1028 EmitUint8(0x80);
1029 EmitUint8(0x00);
1030 EmitUint8(0x00);
1031 EmitUint8(0x00);
1032 EmitUint8(0x00);
1033 break;
1034 case 8:
1035 EmitUint8(0x0F);
1036 EmitUint8(0x1F);
1037 EmitUint8(0x84);
1038 EmitUint8(0x00);
1039 EmitUint8(0x00);
1040 EmitUint8(0x00);
1041 EmitUint8(0x00);
1042 EmitUint8(0x00);
1043 break;
1044 default:
1045 UNIMPLEMENTED();
1046 }
1047}
1048
1049void Assembler::j(Condition condition, Label* label, JumpDistance distance) {
1050 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1051 if (label->IsBound()) {
1052 const int kShortSize = 2;
1053 const int kLongSize = 6;
1054 intptr_t offset = label->Position() - buffer_.Size();
1055 ASSERT(offset <= 0);
1056 if (Utils::IsInt(8, offset - kShortSize)) {
1057 EmitUint8(0x70 + condition);
1058 EmitUint8((offset - kShortSize) & 0xFF);
1059 } else {
1060 EmitUint8(0x0F);
1061 EmitUint8(0x80 + condition);
1062 EmitInt32(offset - kLongSize);
1063 }
1064 } else if (distance == kNearJump) {
1065 EmitUint8(0x70 + condition);
1066 EmitNearLabelLink(label);
1067 } else {
1068 EmitUint8(0x0F);
1069 EmitUint8(0x80 + condition);
1070 EmitLabelLink(label);
1071 }
1072}
1073
1074void Assembler::J(Condition condition, const Code& target, Register pp) {
1075 Label no_jump;
1076 // Negate condition.
1077 j(static_cast<Condition>(condition ^ 1), &no_jump, kNearJump);
1078 Jmp(target, pp);
1079 Bind(&no_jump);
1080}
1081
1082void Assembler::jmp(Label* label, JumpDistance distance) {
1083 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1084 if (label->IsBound()) {
1085 const int kShortSize = 2;
1086 const int kLongSize = 5;
1087 intptr_t offset = label->Position() - buffer_.Size();
1088 ASSERT(offset <= 0);
1089 if (Utils::IsInt(8, offset - kShortSize)) {
1090 EmitUint8(0xEB);
1091 EmitUint8((offset - kShortSize) & 0xFF);
1092 } else {
1093 EmitUint8(0xE9);
1094 EmitInt32(offset - kLongSize);
1095 }
1096 } else if (distance == kNearJump) {
1097 EmitUint8(0xEB);
1098 EmitNearLabelLink(label);
1099 } else {
1100 EmitUint8(0xE9);
1101 EmitLabelLink(label);
1102 }
1103}
1104
1105void Assembler::jmp(const ExternalLabel* label) {
1106 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
1107 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1108 EmitRegisterREX(TMP, REX_W);
1109 EmitUint8(0xB8 | (TMP & 7));
1110 EmitInt64(label->address());
1111 }
1112 jmp(TMP);
1113}
1114
1115void Assembler::JmpPatchable(const Code& target, Register pp) {
1116 ASSERT((pp != PP) || constant_pool_allowed());
1117 const intptr_t idx = object_pool_builder().AddObject(
1118 ToObject(target), ObjectPoolBuilderEntry::kPatchable);
1119 const int32_t offset = target::ObjectPool::element_offset(idx);
1120 movq(CODE_REG, Address(pp, offset - kHeapObjectTag));
1121 movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1122 jmp(TMP);
1123}
1124
1125void Assembler::Jmp(const Code& target, Register pp) {
1126 ASSERT((pp != PP) || constant_pool_allowed());
1127 const intptr_t idx = object_pool_builder().FindObject(
1128 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
1129 const int32_t offset = target::ObjectPool::element_offset(idx);
1130 movq(CODE_REG, FieldAddress(pp, offset));
1131 jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1132}
1133
1134void Assembler::CompareRegisters(Register a, Register b) {
1135 cmpq(a, b);
1136}
1137
1138void Assembler::LoadFromStack(Register dst, intptr_t depth) {
1139 ASSERT(depth >= 0);
1140 movq(dst, Address(SPREG, depth * target::kWordSize));
1141}
1142
1143void Assembler::StoreToStack(Register src, intptr_t depth) {
1144 ASSERT(depth >= 0);
1145 movq(Address(SPREG, depth * target::kWordSize), src);
1146}
1147
1148void Assembler::CompareToStack(Register src, intptr_t depth) {
1149 ASSERT(depth >= 0);
1150 cmpq(src, Address(SPREG, depth * target::kWordSize));
1151}
1152
1153void Assembler::ExtendValue(Register to, Register from, OperandSize sz) {
1154 switch (sz) {
1155 case kEightBytes:
1156 if (to == from) return; // No operation needed.
1157 return movq(to, from);
1158 case kUnsignedFourBytes:
1159 return movl(to, from);
1160 case kFourBytes:
1161 return movsxd(to, from);
1162 case kUnsignedTwoBytes:
1163 return movzxw(to, from);
1164 case kTwoBytes:
1165 return movsxw(to, from);
1166 case kUnsignedByte:
1167 return movzxb(to, from);
1168 case kByte:
1169 return movsxb(to, from);
1170 default:
1171 UNIMPLEMENTED();
1172 break;
1173 }
1174}
1175
1176void Assembler::PushRegister(Register r) {
1177 pushq(r);
1178}
1179
1180void Assembler::PopRegister(Register r) {
1181 popq(r);
1182}
1183
1184void Assembler::AddImmediate(Register reg,
1185 const Immediate& imm,
1186 OperandSize width) {
1187 ASSERT(width == kFourBytes || width == kEightBytes);
1188 const int64_t value = imm.value();
1189 if (value == 0) {
1190 return;
1191 }
1192 if ((value > 0) || (value == kMinInt64)) {
1193 if (value == 1) {
1194 if (width == kFourBytes) {
1195 incl(reg);
1196 } else {
1197 incq(reg);
1198 }
1199 } else {
1200 if (imm.is_int32() || (width == kFourBytes && imm.is_uint32())) {
1201 if (width == kFourBytes) {
1202 addl(reg, imm);
1203 } else {
1204 addq(reg, imm);
1205 }
1206 } else {
1207 ASSERT(reg != TMP);
1208 ASSERT(width == kEightBytes);
1209 LoadImmediate(TMP, imm);
1210 addq(reg, TMP);
1211 }
1212 }
1213 } else {
1214 SubImmediate(reg, Immediate(-value), width);
1215 }
1216}
1217
1218void Assembler::AddImmediate(Register dest, Register src, int64_t value) {
1219 if (dest == src) {
1220 AddImmediate(dest, value);
1221 return;
1222 }
1223 if (value == 0) {
1224 MoveRegister(dest, src);
1225 return;
1226 }
1227 if (Utils::IsInt(32, value)) {
1228 leaq(dest, Address(src, value));
1229 return;
1230 }
1231 LoadImmediate(dest, value);
1232 addq(dest, src);
1233}
1234
1235void Assembler::AddImmediate(const Address& address, const Immediate& imm) {
1236 const int64_t value = imm.value();
1237 if (value == 0) {
1238 return;
1239 }
1240 if ((value > 0) || (value == kMinInt64)) {
1241 if (value == 1) {
1242 incq(address);
1243 } else {
1244 if (imm.is_int32()) {
1245 addq(address, imm);
1246 } else {
1247 LoadImmediate(TMP, imm);
1248 addq(address, TMP);
1249 }
1250 }
1251 } else {
1252 SubImmediate(address, Immediate(-value));
1253 }
1254}
1255
1256void Assembler::SubImmediate(Register reg,
1257 const Immediate& imm,
1258 OperandSize width) {
1259 ASSERT(width == kFourBytes || width == kEightBytes);
1260 const int64_t value = imm.value();
1261 if (value == 0) {
1262 return;
1263 }
1264 if ((value > 0) || (value == kMinInt64) ||
1265 (value == kMinInt32 && width == kFourBytes)) {
1266 if (value == 1) {
1267 if (width == kFourBytes) {
1268 decl(reg);
1269 } else {
1270 decq(reg);
1271 }
1272 } else {
1273 if (imm.is_int32()) {
1274 if (width == kFourBytes) {
1275 subl(reg, imm);
1276 } else {
1277 subq(reg, imm);
1278 }
1279 } else {
1280 ASSERT(reg != TMP);
1281 ASSERT(width == kEightBytes);
1282 LoadImmediate(TMP, imm);
1283 subq(reg, TMP);
1284 }
1285 }
1286 } else {
1287 AddImmediate(reg, Immediate(-value), width);
1288 }
1289}
1290
1291void Assembler::SubImmediate(const Address& address, const Immediate& imm) {
1292 const int64_t value = imm.value();
1293 if (value == 0) {
1294 return;
1295 }
1296 if ((value > 0) || (value == kMinInt64)) {
1297 if (value == 1) {
1298 decq(address);
1299 } else {
1300 if (imm.is_int32()) {
1301 subq(address, imm);
1302 } else {
1303 LoadImmediate(TMP, imm);
1304 subq(address, TMP);
1305 }
1306 }
1307 } else {
1308 AddImmediate(address, Immediate(-value));
1309 }
1310}
1311
1312void Assembler::Drop(intptr_t stack_elements, Register tmp) {
1313 ASSERT(stack_elements >= 0);
1314 if (stack_elements <= 4) {
1315 for (intptr_t i = 0; i < stack_elements; i++) {
1316 popq(tmp);
1317 }
1318 return;
1319 }
1320 addq(RSP, Immediate(stack_elements * target::kWordSize));
1321}
1322
1323bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1324 ASSERT(IsOriginalObject(object));
1325 if (!constant_pool_allowed()) {
1326 return false;
1327 }
1328
1329 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
1330 ASSERT(IsInOldSpace(object));
1331 return true;
1332}
1333
1334void Assembler::LoadWordFromPoolIndex(Register dst, intptr_t idx) {
1335 ASSERT(constant_pool_allowed());
1336 ASSERT(dst != PP);
1337 // PP is tagged on X64.
1338 movq(dst, FieldAddress(PP, target::ObjectPool::element_offset(idx)));
1339}
1340
1341void Assembler::StoreWordToPoolIndex(Register src, intptr_t idx) {
1342 ASSERT(constant_pool_allowed());
1343 ASSERT(src != PP);
1344 // PP is tagged on X64.
1345 movq(FieldAddress(PP, target::ObjectPool::element_offset(idx)), src);
1346}
1347
1348void Assembler::LoadInt64FromBoxOrSmi(Register result, Register value) {
1349 compiler::Label done;
1350#if !defined(DART_COMPRESSED_POINTERS)
1351 // Optimistically untag value.
1352 SmiUntag(result, value);
1353 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1354 // Undo untagging by multiplying value by 2.
1355 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
1356 movq(result, compiler::Address(result, result, TIMES_1,
1357 target::Mint::value_offset()));
1358#else
1359 if (result == value) {
1360 ASSERT(TMP != value);
1361 MoveRegister(TMP, value);
1362 value = TMP;
1363 }
1364 ASSERT(value != result);
1365 // Cannot speculatively untag with value == result because it erases the
1366 // upper bits needed to dereference when it is a Mint.
1367 SmiUntagAndSignExtend(result, value);
1368 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1369 movq(result, compiler::FieldAddress(value, target::Mint::value_offset()));
1370#endif
1371 Bind(&done);
1372}
1373
1374void Assembler::LoadInt32FromBoxOrSmi(Register result, Register value) {
1375 compiler::Label done;
1376#if !defined(DART_COMPRESSED_POINTERS)
1377 // Optimistically untag value.
1378 SmiUntag(result, value);
1379 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1380 // Undo untagging by multiplying value by 2.
1381 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
1382 movsxd(result, compiler::Address(result, result, TIMES_1,
1383 compiler::target::Mint::value_offset()));
1384#else
1385 if (result == value) {
1386 ASSERT(TMP != value);
1387 MoveRegister(TMP, value);
1388 value = TMP;
1389 }
1390 ASSERT(value != result);
1391 // Cannot speculatively untag with value == result because it erases the
1392 // upper bits needed to dereference when it is a Mint.
1393 SmiUntagAndSignExtend(result, value);
1394 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1395 movsxd(result,
1396 compiler::FieldAddress(value, compiler::target::Mint::value_offset()));
1397#endif
1398 Bind(&done);
1399}
1400
1401void Assembler::LoadIsolate(Register dst) {
1402 movq(dst, Address(THR, target::Thread::isolate_offset()));
1403}
1404
1405void Assembler::LoadIsolateGroup(Register dst) {
1406 movq(dst, Address(THR, target::Thread::isolate_group_offset()));
1407}
1408
1409void Assembler::LoadDispatchTable(Register dst) {
1410 movq(dst, Address(THR, target::Thread::dispatch_table_array_offset()));
1411}
1412
1413void Assembler::LoadObjectHelper(
1414 Register dst,
1415 const Object& object,
1416 bool is_unique,
1417 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1418 ASSERT(IsOriginalObject(object));
1419
1420 // `is_unique == true` effectively means object has to be patchable.
1421 if (!is_unique) {
1422 intptr_t offset;
1423 if (target::CanLoadFromThread(object, &offset)) {
1424 movq(dst, Address(THR, offset));
1425 return;
1426 }
1427 if (target::IsSmi(object)) {
1428 LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
1429 return;
1430 }
1431 }
1432 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1433 const intptr_t index =
1434 is_unique
1435 ? object_pool_builder().AddObject(
1436 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1437 : object_pool_builder().FindObject(
1438 object, ObjectPoolBuilderEntry::kNotPatchable,
1439 snapshot_behavior);
1440 LoadWordFromPoolIndex(dst, index);
1441}
1442
1443void Assembler::LoadObject(Register dst, const Object& object) {
1444 LoadObjectHelper(dst, object, false);
1445}
1446
1447void Assembler::LoadUniqueObject(
1448 Register dst,
1449 const Object& object,
1450 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1451 LoadObjectHelper(dst, object, true, snapshot_behavior);
1452}
1453
1454void Assembler::StoreObject(const Address& dst,
1455 const Object& object,
1456 OperandSize size) {
1457 ASSERT(IsOriginalObject(object));
1458 ASSERT(size == kWordBytes || size == kObjectBytes);
1459
1460 intptr_t offset_from_thread;
1461 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1462 movq(TMP, Address(THR, offset_from_thread));
1463 Store(TMP, dst, size);
1464 } else if (target::IsSmi(object)) {
1465 MoveImmediate(dst, Immediate(target::ToRawSmi(object)), size);
1466 } else {
1467 LoadObject(TMP, object);
1468 Store(TMP, dst, size);
1469 }
1470}
1471
1472void Assembler::PushObject(const Object& object) {
1473 ASSERT(IsOriginalObject(object));
1474
1475 intptr_t offset_from_thread;
1476 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1477 pushq(Address(THR, offset_from_thread));
1478 } else if (target::IsSmi(object)) {
1479 PushImmediate(Immediate(target::ToRawSmi(object)));
1480 } else {
1481 LoadObject(TMP, object);
1482 pushq(TMP);
1483 }
1484}
1485
1486void Assembler::CompareObject(Register reg, const Object& object) {
1487 ASSERT(IsOriginalObject(object));
1488
1489 intptr_t offset_from_thread;
1490 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1491 OBJ(cmp)(reg, Address(THR, offset_from_thread));
1492 } else if (target::IsSmi(object)) {
1493 CompareImmediate(reg, Immediate(target::ToRawSmi(object)), kObjectBytes);
1494 } else {
1495 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1496 const intptr_t idx = object_pool_builder().FindObject(
1497 object, ObjectPoolBuilderEntry::kNotPatchable);
1498 const int32_t offset = target::ObjectPool::element_offset(idx);
1499 OBJ(cmp)(reg, Address(PP, offset - kHeapObjectTag));
1500 }
1501}
1502
1503void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
1504 if (imm.value() == 0) {
1505 xorl(reg, reg);
1506 } else if (imm.is_int32() || !constant_pool_allowed()) {
1507 movq(reg, imm);
1508 } else {
1509 const intptr_t idx = object_pool_builder().FindImmediate(imm.value());
1510 LoadWordFromPoolIndex(reg, idx);
1511 }
1512}
1513
1514void Assembler::MoveImmediate(const Address& dst,
1515 const Immediate& imm,
1516 OperandSize size) {
1517 if (imm.is_int32()) {
1518 if (size == kFourBytes) {
1519 movl(dst, imm);
1520 } else {
1521 ASSERT(size == kEightBytes);
1522 movq(dst, imm);
1523 }
1524 } else {
1525 LoadImmediate(TMP, imm);
1526 if (size == kFourBytes) {
1527 movl(dst, TMP);
1528 } else {
1529 ASSERT(size == kEightBytes);
1530 movq(dst, TMP);
1531 }
1532 }
1533}
1534
1535void Assembler::LoadSImmediate(FpuRegister dst, float immediate) {
1536 int32_t bits = bit_cast<int32_t>(immediate);
1537 if (bits == 0) {
1538 xorps(dst, dst);
1539 } else {
1540 intptr_t index = object_pool_builder().FindImmediate(bits);
1541 LoadUnboxedSingle(
1542 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1543 }
1544}
1545
1546void Assembler::LoadDImmediate(FpuRegister dst, double immediate) {
1547 int64_t bits = bit_cast<int64_t>(immediate);
1548 if (bits == 0) {
1549 xorps(dst, dst);
1550 } else {
1551 intptr_t index = object_pool_builder().FindImmediate64(bits);
1552 LoadUnboxedDouble(
1553 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1554 }
1555}
1556
1557void Assembler::LoadQImmediate(FpuRegister dst, simd128_value_t immediate) {
1558 intptr_t index = object_pool_builder().FindImmediate128(immediate);
1559 movups(dst, Address(PP, target::ObjectPool::element_offset(index) -
1560 kHeapObjectTag));
1561}
1562
1563#if defined(DART_COMPRESSED_POINTERS)
1564void Assembler::LoadCompressed(Register dest, const Address& slot) {
1565 movl(dest, slot); // Zero-extension.
1566 addq(dest, Address(THR, target::Thread::heap_base_offset()));
1567}
1568#endif
1569
1570void Assembler::StoreBarrier(Register object,
1571 Register value,
1572 CanBeSmi can_be_smi,
1573 Register scratch) {
1574 // x.slot = x. Barrier should have be removed at the IL level.
1575 ASSERT(object != value);
1576 ASSERT(object != scratch);
1577 ASSERT(value != scratch);
1578 ASSERT(scratch != kNoRegister);
1579
1580 // In parallel, test whether
1581 // - object is old and not remembered and value is new, or
1582 // - object is old and value is old and not marked and concurrent marking is
1583 // in progress
1584 // If so, call the WriteBarrier stub, which will either add object to the
1585 // store buffer (case 1) or add value to the marking stack (case 2).
1586 // Compare UntaggedObject::StorePointer.
1587 Label done;
1588 if (can_be_smi == kValueCanBeSmi) {
1589 BranchIfSmi(value, &done, kNearJump);
1590 } else {
1591#if defined(DEBUG)
1592 Label passed_check;
1593 BranchIfNotSmi(value, &passed_check, kNearJump);
1594 Breakpoint();
1595 Bind(&passed_check);
1596#endif
1597 }
1598 movb(ByteRegisterOf(scratch),
1599 FieldAddress(object, target::Object::tags_offset()));
1600 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1601 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1602 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1603 j(ZERO, &done, kNearJump);
1604
1605 Register object_for_call = object;
1606 if (value != kWriteBarrierValueReg) {
1607 // Unlikely. Only non-graph intrinsics.
1608 // TODO(rmacnak): Shuffle registers in intrinsics.
1609 pushq(kWriteBarrierValueReg);
1610 if (object == kWriteBarrierValueReg) {
1611 COMPILE_ASSERT(RBX != kWriteBarrierValueReg);
1612 COMPILE_ASSERT(RCX != kWriteBarrierValueReg);
1613 object_for_call = (value == RBX) ? RCX : RBX;
1614 pushq(object_for_call);
1615 movq(object_for_call, object);
1616 }
1617 movq(kWriteBarrierValueReg, value);
1618 }
1619 generate_invoke_write_barrier_wrapper_(object_for_call);
1620 if (value != kWriteBarrierValueReg) {
1621 if (object == kWriteBarrierValueReg) {
1622 popq(object_for_call);
1623 }
1624 popq(kWriteBarrierValueReg);
1625 }
1626 Bind(&done);
1627}
1628
1629void Assembler::ArrayStoreBarrier(Register object,
1630 Register slot,
1631 Register value,
1632 CanBeSmi can_be_smi,
1633 Register scratch) {
1634 ASSERT(object != scratch);
1635 ASSERT(value != scratch);
1636 ASSERT(slot != scratch);
1637 ASSERT(scratch != kNoRegister);
1638
1639 // In parallel, test whether
1640 // - object is old and not remembered and value is new, or
1641 // - object is old and value is old and not marked and concurrent marking is
1642 // in progress
1643 // If so, call the WriteBarrier stub, which will either add object to the
1644 // store buffer (case 1) or add value to the marking stack (case 2).
1645 // Compare UntaggedObject::StorePointer.
1646 Label done;
1647 if (can_be_smi == kValueCanBeSmi) {
1648 BranchIfSmi(value, &done, kNearJump);
1649 } else {
1650#if defined(DEBUG)
1651 Label passed_check;
1652 BranchIfNotSmi(value, &passed_check, kNearJump);
1653 Breakpoint();
1654 Bind(&passed_check);
1655#endif
1656 }
1657 movb(ByteRegisterOf(scratch),
1658 FieldAddress(object, target::Object::tags_offset()));
1659 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1660 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1661 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1662 j(ZERO, &done, kNearJump);
1663
1664 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1665 (slot != kWriteBarrierSlotReg)) {
1666 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1667 // from StoreIndexInstr, which gets these exact registers from the register
1668 // allocator.
1669 UNIMPLEMENTED();
1670 }
1671
1672 generate_invoke_array_write_barrier_();
1673
1674 Bind(&done);
1675}
1676
1677void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1678 Register value) {
1679 // We can't assert the incremental barrier is not needed here, only the
1680 // generational barrier. We sometimes omit the write barrier when 'value' is
1681 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1682 // reachable via a constant pool, so it doesn't matter if it is not traced via
1683 // 'object'.
1684 Label done;
1685 BranchIfSmi(value, &done, kNearJump);
1686 testb(FieldAddress(value, target::Object::tags_offset()),
1687 Immediate(1 << target::UntaggedObject::kNewBit));
1688 j(ZERO, &done, Assembler::kNearJump);
1689 testb(FieldAddress(object, target::Object::tags_offset()),
1690 Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1691 j(ZERO, &done, Assembler::kNearJump);
1692 Stop("Write barrier is required");
1693 Bind(&done);
1694}
1695
1696void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1697 const Address& dest,
1698 const Object& value,
1699 MemoryOrder memory_order,
1700 OperandSize size) {
1701 if (memory_order == kRelease) {
1702 LoadObject(TMP, value);
1703 StoreIntoObjectNoBarrier(object, dest, TMP, memory_order, size);
1704 } else {
1705 StoreObject(dest, value, size);
1706 }
1707}
1708
1709void Assembler::StoreInternalPointer(Register object,
1710 const Address& dest,
1711 Register value) {
1712 movq(dest, value);
1713}
1714
1715void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
1716#if defined(DEBUG)
1717 Label done;
1718 testq(value, Immediate(kHeapObjectTag));
1719 j(ZERO, &done);
1720 Stop("New value must be Smi.");
1721 Bind(&done);
1722#endif // defined(DEBUG)
1723 movq(dest, value);
1724}
1725
1726void Assembler::ZeroInitSmiField(const Address& dest) {
1727 Immediate zero(target::ToRawSmi(0));
1728 movq(dest, zero);
1729}
1730
1731void Assembler::ZeroInitCompressedSmiField(const Address& dest) {
1732 Immediate zero(target::ToRawSmi(0));
1733 OBJ(mov)(dest, zero);
1734}
1735
1736void Assembler::IncrementCompressedSmiField(const Address& dest,
1737 int64_t increment) {
1738 // Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
1739 // the length of this instruction sequence.
1740 Immediate inc_imm(target::ToRawSmi(increment));
1741 OBJ(add)(dest, inc_imm);
1742}
1743
1744void Assembler::Bind(Label* label) {
1745 intptr_t bound = buffer_.Size();
1746 ASSERT(!label->IsBound()); // Labels can only be bound once.
1747 while (label->IsLinked()) {
1748 intptr_t position = label->LinkPosition();
1749 intptr_t next = buffer_.Load<int32_t>(position);
1750 buffer_.Store<int32_t>(position, bound - (position + 4));
1751 label->position_ = next;
1752 }
1753 while (label->HasNear()) {
1754 intptr_t position = label->NearPosition();
1755 intptr_t offset = bound - (position + 1);
1756 ASSERT(Utils::IsInt(8, offset));
1757 buffer_.Store<int8_t>(position, offset);
1758 }
1759 label->BindTo(bound);
1760}
1761
1762void Assembler::Load(Register reg, const Address& address, OperandSize sz) {
1763 switch (sz) {
1764 case kByte:
1765 return movsxb(reg, address);
1766 case kUnsignedByte:
1767 return movzxb(reg, address);
1768 case kTwoBytes:
1769 return movsxw(reg, address);
1770 case kUnsignedTwoBytes:
1771 return movzxw(reg, address);
1772 case kFourBytes:
1773 return movsxd(reg, address);
1774 case kUnsignedFourBytes:
1775 return movl(reg, address);
1776 case kEightBytes:
1777 return movq(reg, address);
1778 default:
1779 UNREACHABLE();
1780 break;
1781 }
1782}
1783
1784void Assembler::Store(Register reg, const Address& address, OperandSize sz) {
1785 switch (sz) {
1786 case kByte:
1787 case kUnsignedByte:
1788 return movb(address, ByteRegisterOf(reg));
1789 case kTwoBytes:
1790 case kUnsignedTwoBytes:
1791 return movw(address, reg);
1792 case kFourBytes:
1793 case kUnsignedFourBytes:
1794 return movl(address, reg);
1795 case kEightBytes:
1796 return movq(address, reg);
1797 default:
1798 UNREACHABLE();
1799 break;
1800 }
1801}
1802
1803void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1804 sarq(reg, Immediate(shift));
1805}
1806
1807void Assembler::CompareWords(Register reg1,
1808 Register reg2,
1809 intptr_t offset,
1810 Register count,
1811 Register temp,
1812 Label* equals) {
1813 Label loop;
1814 Bind(&loop);
1815 decq(count);
1816 j(LESS, equals, Assembler::kNearJump);
1817 COMPILE_ASSERT(target::kWordSize == 8);
1818 movq(temp, FieldAddress(reg1, count, TIMES_8, offset));
1819 cmpq(temp, FieldAddress(reg2, count, TIMES_8, offset));
1820 BranchIf(EQUAL, &loop, Assembler::kNearJump);
1821}
1822
1823void Assembler::EnterFrame(intptr_t frame_size) {
1824 if (prologue_offset_ == -1) {
1825 prologue_offset_ = CodeSize();
1826 Comment("PrologueOffset = %" Pd "", CodeSize());
1827 }
1828#ifdef DEBUG
1829 intptr_t check_offset = CodeSize();
1830#endif
1831 pushq(RBP);
1832 movq(RBP, RSP);
1833#ifdef DEBUG
1834 ProloguePattern pp(CodeAddress(check_offset));
1835 ASSERT(pp.IsValid());
1836#endif
1837 if (frame_size != 0) {
1838 Immediate frame_space(frame_size);
1839 subq(RSP, frame_space);
1840 }
1841}
1842
1843void Assembler::LeaveFrame() {
1844 movq(RSP, RBP);
1845 popq(RBP);
1846}
1847
1848void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1849 // Reserve space for arguments and align frame before entering
1850 // the C++ world.
1851 if (frame_space != 0) {
1852 subq(RSP, Immediate(frame_space));
1853 }
1854 if (OS::ActivationFrameAlignment() > 1) {
1855 andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1856 }
1857}
1858
1859void Assembler::EmitEntryFrameVerification() {
1860#if defined(DEBUG)
1861 Label ok;
1862 leaq(RAX, Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
1863 target::kWordSize));
1864 cmpq(RAX, RSP);
1865 j(EQUAL, &ok);
1866 Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
1867 Bind(&ok);
1868#endif
1869}
1870
1871void Assembler::PushRegisters(const RegisterSet& register_set) {
1872 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1873 if (xmm_regs_count > 0) {
1874 AddImmediate(RSP, Immediate(-xmm_regs_count * kFpuRegisterSize));
1875 // Store XMM registers with the lowest register number at the lowest
1876 // address.
1877 intptr_t offset = 0;
1878 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1879 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1880 if (register_set.ContainsFpuRegister(xmm_reg)) {
1881 movups(Address(RSP, offset), xmm_reg);
1883 }
1884 }
1885 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1886 }
1887
1888 // The order in which the registers are pushed must match the order
1889 // in which the registers are encoded in the safe point's stack map.
1890 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1891 Register reg = static_cast<Register>(i);
1892 if (register_set.ContainsRegister(reg)) {
1893 pushq(reg);
1894 }
1895 }
1896}
1897
1898void Assembler::PopRegisters(const RegisterSet& register_set) {
1899 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1900 Register reg = static_cast<Register>(i);
1901 if (register_set.ContainsRegister(reg)) {
1902 popq(reg);
1903 }
1904 }
1905
1906 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1907 if (xmm_regs_count > 0) {
1908 // XMM registers have the lowest register number at the lowest address.
1909 intptr_t offset = 0;
1910 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1911 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1912 if (register_set.ContainsFpuRegister(xmm_reg)) {
1913 movups(xmm_reg, Address(RSP, offset));
1915 }
1916 }
1917 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1918 AddImmediate(RSP, Immediate(offset));
1919 }
1920}
1921
1922void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
1923 for (Register reg : regs) {
1924 PushRegister(reg);
1925 }
1926}
1927
1928static const RegisterSet kVolatileRegisterSet(
1929 CallingConventions::kVolatileCpuRegisters,
1930 CallingConventions::kVolatileXmmRegisters);
1931
1932void Assembler::CallCFunction(Register reg, bool restore_rsp) {
1933 // Reserve shadow space for outgoing arguments.
1934 if (CallingConventions::kShadowSpaceBytes != 0) {
1935 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1936 }
1937 call(reg);
1938 // Restore stack.
1939 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1940 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1941 }
1942}
1943void Assembler::CallCFunction(Address address, bool restore_rsp) {
1944 // Reserve shadow space for outgoing arguments.
1945 if (CallingConventions::kShadowSpaceBytes != 0) {
1946 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1947 }
1948 call(address);
1949 // Restore stack.
1950 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1951 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1952 }
1953}
1954
1955void Assembler::CallRuntime(const RuntimeEntry& entry,
1956 intptr_t argument_count) {
1957 ASSERT(!entry.is_leaf());
1958 // Argument count is not checked here, but in the runtime entry for a more
1959 // informative error message.
1960 movq(RBX, compiler::Address(THR, entry.OffsetFromThread()));
1961 LoadImmediate(R10, compiler::Immediate(argument_count));
1962 call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1963}
1964
1965#define __ assembler_->
1966
1967LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1968 intptr_t frame_size,
1969 bool preserve_registers)
1970 : assembler_(assembler), preserve_registers_(preserve_registers) {
1971 __ Comment("EnterCallRuntimeFrame");
1972 __ EnterFrame(0);
1973
1974 if (preserve_registers_) {
1975 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
1976 __ PushRegisters(kVolatileRegisterSet);
1977 } else {
1978 // These registers must always be preserved.
1979 ASSERT(IsCalleeSavedRegister(THR));
1980 ASSERT(IsCalleeSavedRegister(PP));
1981 ASSERT(IsCalleeSavedRegister(CODE_REG));
1982 }
1983
1984 __ ReserveAlignedFrameSpace(frame_size);
1985}
1986
1987void LeafRuntimeScope::Call(const RuntimeEntry& entry,
1988 intptr_t argument_count) {
1989 ASSERT(entry.is_leaf());
1990 ASSERT(entry.argument_count() == argument_count);
1991 COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
1992 __ movq(RAX, compiler::Address(THR, entry.OffsetFromThread()));
1993 __ movq(compiler::Assembler::VMTagAddress(), RAX);
1994 __ CallCFunction(RAX);
1995 __ movq(compiler::Assembler::VMTagAddress(),
1996 compiler::Immediate(VMTag::kDartTagId));
1997}
1998
1999LeafRuntimeScope::~LeafRuntimeScope() {
2000 if (preserve_registers_) {
2001 // RSP might have been modified to reserve space for arguments
2002 // and ensure proper alignment of the stack frame.
2003 // We need to restore it before restoring registers.
2004 const intptr_t kPushedCpuRegistersCount =
2005 RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
2006 const intptr_t kPushedXmmRegistersCount =
2007 RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
2008 const intptr_t kPushedRegistersSize =
2009 kPushedCpuRegistersCount * target::kWordSize +
2010 kPushedXmmRegistersCount * kFpuRegisterSize;
2011
2012 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2013
2014 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
2015 __ PopRegisters(kVolatileRegisterSet);
2016 } else {
2017 const intptr_t kPushedRegistersSize =
2018 (target::frame_layout.dart_fixed_frame_size - 2) *
2019 target::kWordSize; // From EnterStubFrame (excluding PC / FP)
2020 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2021 }
2022
2023 __ LeaveFrame();
2024}
2025
2026#if defined(TARGET_USES_THREAD_SANITIZER)
2027void Assembler::TsanLoadAcquire(Address addr) {
2028 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2029 leaq(CallingConventions::kArg1Reg, addr);
2030 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
2031}
2032
2033void Assembler::TsanStoreRelease(Address addr) {
2034 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2035 leaq(CallingConventions::kArg1Reg, addr);
2036 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
2037}
2038#endif
2039
2040void Assembler::RestoreCodePointer() {
2041 movq(CODE_REG,
2042 Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
2043}
2044
2045void Assembler::LoadPoolPointer(Register pp) {
2046 // Load new pool pointer.
2047 CheckCodePointer();
2048 movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
2049 set_constant_pool_allowed(pp == PP);
2050}
2051
2052void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
2053 ASSERT(!constant_pool_allowed());
2054 EnterFrame(0);
2055 if (!FLAG_precompiled_mode) {
2056 pushq(CODE_REG);
2057 pushq(PP);
2058 if (new_pp == kNoRegister) {
2059 LoadPoolPointer(PP);
2060 } else {
2061 movq(PP, new_pp);
2062 }
2063 }
2064 set_constant_pool_allowed(true);
2065 if (frame_size != 0) {
2066 subq(RSP, Immediate(frame_size));
2067 }
2068}
2069
2070void Assembler::LeaveDartFrame() {
2071 // Restore caller's PP register that was pushed in EnterDartFrame.
2072 if (!FLAG_precompiled_mode) {
2073 movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
2074 target::kWordSize)));
2075 }
2076 set_constant_pool_allowed(false);
2077 LeaveFrame();
2078}
2079
2080void Assembler::CheckCodePointer() {
2081#ifdef DEBUG
2082 if (!FLAG_check_code_pointer) {
2083 return;
2084 }
2085 Comment("CheckCodePointer");
2086 Label cid_ok, instructions_ok;
2087 pushq(RAX);
2088 LoadClassId(RAX, CODE_REG);
2089 cmpq(RAX, Immediate(kCodeCid));
2090 j(EQUAL, &cid_ok);
2091 int3();
2092 Bind(&cid_ok);
2093 {
2094 const intptr_t kRIPRelativeLeaqSize = 7;
2095 const intptr_t header_to_entry_offset =
2096 (target::Instructions::HeaderSize() - kHeapObjectTag);
2097 const intptr_t header_to_rip_offset =
2098 CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
2099 leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
2100 ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
2101 }
2102 cmpq(RAX, FieldAddress(CODE_REG, target::Code::instructions_offset()));
2103 j(EQUAL, &instructions_ok);
2104 int3();
2105 Bind(&instructions_ok);
2106 popq(RAX);
2107#endif
2108}
2109
2110// On entry to a function compiled for OSR, the caller's frame pointer, the
2111// stack locals, and any copied parameters are already in place. The frame
2112// pointer is already set up. The PC marker is not correct for the
2113// optimized function and there may be extra space for spill slots to
2114// allocate.
2115void Assembler::EnterOsrFrame(intptr_t extra_size) {
2116 ASSERT(!constant_pool_allowed());
2117 if (prologue_offset_ == -1) {
2118 Comment("PrologueOffset = %" Pd "", CodeSize());
2119 prologue_offset_ = CodeSize();
2120 }
2121 RestoreCodePointer();
2122 LoadPoolPointer();
2123
2124 if (extra_size != 0) {
2125 subq(RSP, Immediate(extra_size));
2126 }
2127}
2128
2129void Assembler::EnterStubFrame() {
2130 EnterDartFrame(0, kNoRegister);
2131}
2132
2133void Assembler::LeaveStubFrame() {
2134 LeaveDartFrame();
2135}
2136
2137void Assembler::EnterCFrame(intptr_t frame_space) {
2138 // Already saved.
2139 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
2140 COMPILE_ASSERT(IsCalleeSavedRegister(PP));
2141
2142 EnterFrame(0);
2143 ReserveAlignedFrameSpace(frame_space);
2144}
2145
2146void Assembler::LeaveCFrame() {
2147 LeaveFrame();
2148}
2149
2150// RDX receiver, RBX ICData entries array
2151// Preserve R10 (ARGS_DESC_REG), not required today, but maybe later.
2152void Assembler::MonomorphicCheckedEntryJIT() {
2153 has_monomorphic_entry_ = true;
2154 intptr_t start = CodeSize();
2155 Label have_cid, miss;
2156 Bind(&miss);
2157 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2158
2159 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
2160 // store them in ICData / MegamorphicCache arrays)
2161 nop(1);
2162
2163 Comment("MonomorphicCheckedEntry");
2164 ASSERT_EQUAL(CodeSize() - start,
2165 target::Instructions::kMonomorphicEntryOffsetJIT);
2166 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2167
2168 const intptr_t cid_offset = target::Array::element_offset(0);
2169 const intptr_t count_offset = target::Array::element_offset(1);
2170
2171 LoadTaggedClassIdMayBeSmi(RAX, RDX);
2172
2173 OBJ(cmp)(RAX, FieldAddress(RBX, cid_offset));
2174 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2175 OBJ(add)(FieldAddress(RBX, count_offset), Immediate(target::ToRawSmi(1)));
2176 xorq(R10, R10); // GC-safe for OptimizeInvokedFunction.
2177#if defined(DART_COMPRESSED_POINTERS)
2178 nop(4);
2179#else
2180 nop(1);
2181#endif
2182
2183 // Fall through to unchecked entry.
2184 ASSERT_EQUAL(CodeSize() - start,
2185 target::Instructions::kPolymorphicEntryOffsetJIT);
2186 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
2187}
2188
2189// RBX - input: class id smi
2190// RDX - input: receiver object
2191void Assembler::MonomorphicCheckedEntryAOT() {
2192 has_monomorphic_entry_ = true;
2193 intptr_t start = CodeSize();
2194 Label have_cid, miss;
2195 Bind(&miss);
2196 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2197
2198 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
2199 // store them in ICData / MegamorphicCache arrays)
2200 nop(1);
2201
2202 Comment("MonomorphicCheckedEntry");
2203 ASSERT_EQUAL(CodeSize() - start,
2204 target::Instructions::kMonomorphicEntryOffsetAOT);
2205 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2206
2207 SmiUntag(RBX);
2208 LoadClassId(RAX, RDX);
2209 cmpq(RAX, RBX);
2210 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2211
2212 // Ensure the unchecked entry is 2-byte aligned (so GC can see them if we
2213 // store them in ICData / MegamorphicCache arrays).
2214#if defined(DART_COMPRESSED_POINTERS)
2215 nop(1);
2216#endif
2217
2218 // Fall through to unchecked entry.
2219 ASSERT_EQUAL(CodeSize() - start,
2220 target::Instructions::kPolymorphicEntryOffsetAOT);
2221 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
2222}
2223
2224void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
2225 has_monomorphic_entry_ = true;
2226 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
2227 int3();
2228 }
2229 jmp(label);
2230 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
2231 int3();
2232 }
2233}
2234
2235void Assembler::CombineHashes(Register dst, Register other) {
2236 // hash += other_hash
2237 addl(dst, other);
2238 // hash += hash << 10
2239 movl(other, dst);
2240 shll(other, Immediate(10));
2241 addl(dst, other);
2242 // hash ^= hash >> 6
2243 movl(other, dst);
2244 shrl(other, Immediate(6));
2245 xorl(dst, other);
2246}
2247
2248void Assembler::FinalizeHashForSize(intptr_t bit_size,
2249 Register dst,
2250 Register scratch) {
2251 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
2252 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
2253 // reasonably expect that the returned values fill the entire bit space.
2254 ASSERT(bit_size <= kBitsPerInt32);
2255 ASSERT(scratch != kNoRegister);
2256 // hash += hash << 3;
2257 movl(scratch, dst);
2258 shll(scratch, Immediate(3));
2259 addl(dst, scratch);
2260 // hash ^= hash >> 11; // Logical shift, unsigned hash.
2261 movl(scratch, dst);
2262 shrl(scratch, Immediate(11));
2263 xorl(dst, scratch);
2264 // hash += hash << 15;
2265 movl(scratch, dst);
2266 shll(scratch, Immediate(15));
2267 addl(dst, scratch);
2268 // Size to fit.
2269 if (bit_size < kBitsPerInt32) {
2270 andl(dst, Immediate(Utils::NBitMask(bit_size)));
2271 }
2272 // return (hash == 0) ? 1 : hash;
2273 Label done;
2274 j(NOT_ZERO, &done, kNearJump);
2275 incl(dst);
2276 Bind(&done);
2277}
2278
2279#ifndef PRODUCT
2280void Assembler::MaybeTraceAllocation(Register cid,
2281 Label* trace,
2282 Register temp_reg,
2283 JumpDistance distance) {
2284 if (temp_reg == kNoRegister) {
2285 temp_reg = TMP;
2286 }
2287 ASSERT(temp_reg != cid);
2288 LoadIsolateGroup(temp_reg);
2289 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2290
2291 movq(temp_reg,
2292 Address(temp_reg,
2293 target::ClassTable::allocation_tracing_state_table_offset()));
2294 cmpb(Address(temp_reg, cid, TIMES_1,
2295 target::ClassTable::AllocationTracingStateSlotOffsetFor(0)),
2296 Immediate(0));
2297 // We are tracing for this class, jump to the trace label which will use
2298 // the allocation stub.
2299 j(NOT_ZERO, trace, distance);
2300}
2301
2302void Assembler::MaybeTraceAllocation(intptr_t cid,
2303 Label* trace,
2304 Register temp_reg,
2305 JumpDistance distance) {
2306 ASSERT(cid > 0);
2307
2308 if (temp_reg == kNoRegister) {
2309 temp_reg = TMP;
2310 }
2311 LoadIsolateGroup(temp_reg);
2312 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2313 movq(temp_reg,
2314 Address(temp_reg,
2315 target::ClassTable::allocation_tracing_state_table_offset()));
2316 cmpb(Address(temp_reg,
2317 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid)),
2318 Immediate(0));
2319 // We are tracing for this class, jump to the trace label which will use
2320 // the allocation stub.
2321 j(NOT_ZERO, trace, distance);
2322}
2323#endif // !PRODUCT
2324
2325void Assembler::TryAllocateObject(intptr_t cid,
2326 intptr_t instance_size,
2327 Label* failure,
2328 JumpDistance distance,
2329 Register instance_reg,
2330 Register temp_reg) {
2331 ASSERT(failure != nullptr);
2332 ASSERT(instance_size != 0);
2333 ASSERT(Utils::IsAligned(instance_size,
2334 target::ObjectAlignment::kObjectAlignment));
2335 if (FLAG_inline_alloc &&
2336 target::Heap::IsAllocatableInNewSpace(instance_size)) {
2337 // If this allocation is traced, program will jump to failure path
2338 // (i.e. the allocation stub) which will allocate the object and trace the
2339 // allocation call site.
2340 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
2341 movq(instance_reg, Address(THR, target::Thread::top_offset()));
2342 addq(instance_reg, Immediate(instance_size));
2343 // instance_reg: potential next object start.
2344 cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
2345 j(ABOVE_EQUAL, failure, distance);
2346 CheckAllocationCanary(instance_reg);
2347 // Successfully allocated the object, now update top to point to
2348 // next object start and store the class in the class field of object.
2349 movq(Address(THR, target::Thread::top_offset()), instance_reg);
2350 ASSERT(instance_size >= kHeapObjectTag);
2351 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
2352 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2353 MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
2354 Immediate(tags));
2355 } else {
2356 jmp(failure);
2357 }
2358}
2359
2360void Assembler::TryAllocateArray(intptr_t cid,
2361 intptr_t instance_size,
2362 Label* failure,
2363 JumpDistance distance,
2364 Register instance,
2365 Register end_address,
2366 Register temp) {
2367 ASSERT(failure != nullptr);
2368 if (FLAG_inline_alloc &&
2369 target::Heap::IsAllocatableInNewSpace(instance_size)) {
2370 // If this allocation is traced, program will jump to failure path
2371 // (i.e. the allocation stub) which will allocate the object and trace the
2372 // allocation call site.
2373 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp, distance));
2374 movq(instance, Address(THR, target::Thread::top_offset()));
2375 movq(end_address, instance);
2376
2377 addq(end_address, Immediate(instance_size));
2378 j(CARRY, failure);
2379
2380 // Check if the allocation fits into the remaining space.
2381 // instance: potential new object start.
2382 // end_address: potential next object start.
2383 cmpq(end_address, Address(THR, target::Thread::end_offset()));
2384 j(ABOVE_EQUAL, failure);
2385 CheckAllocationCanary(instance);
2386
2387 // Successfully allocated the object(s), now update top to point to
2388 // next object start and initialize the object.
2389 movq(Address(THR, target::Thread::top_offset()), end_address);
2390 addq(instance, Immediate(kHeapObjectTag));
2391
2392 // Initialize the tags.
2393 // instance: new object start as a tagged pointer.
2394 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2395 movq(FieldAddress(instance, target::Object::tags_offset()),
2396 Immediate(tags));
2397 } else {
2398 jmp(failure);
2399 }
2400}
2401
2402void Assembler::CopyMemoryWords(Register src,
2403 Register dst,
2404 Register size,
2405 Register temp) {
2406 // This loop is equivalent to
2407 // shrq(size, Immediate(target::kWordSizeLog2));
2408 // rep_movsq()
2409 // but shows better performance on certain micro-benchmarks.
2410 Label loop, done;
2411 cmpq(size, Immediate(0));
2412 j(EQUAL, &done, kNearJump);
2413 Bind(&loop);
2414 movq(temp, Address(src, 0));
2415 addq(src, Immediate(target::kWordSize));
2416 movq(Address(dst, 0), temp);
2417 addq(dst, Immediate(target::kWordSize));
2418 subq(size, Immediate(target::kWordSize));
2419 j(NOT_ZERO, &loop, kNearJump);
2420 Bind(&done);
2421}
2422
2423void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2424 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2425 buffer_.Emit<uint8_t>(0xe8);
2426 buffer_.Emit<int32_t>(0);
2427
2428 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2429 PcRelativeCallPattern::kLengthInBytes);
2430 pattern.set_distance(offset_into_target);
2431}
2432
2433void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2434 intptr_t offset_into_target) {
2435 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2436 buffer_.Emit<uint8_t>(0xe9);
2437 buffer_.Emit<int32_t>(0);
2438
2439 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2440 PcRelativeCallPattern::kLengthInBytes);
2441 pattern.set_distance(offset_into_target);
2442}
2443
2444void Assembler::Align(int alignment, intptr_t offset) {
2445 ASSERT(Utils::IsPowerOfTwo(alignment));
2446 intptr_t pos = offset + buffer_.GetPosition();
2447 int mod = pos & (alignment - 1);
2448 if (mod == 0) {
2449 return;
2450 }
2451 intptr_t bytes_needed = alignment - mod;
2452 while (bytes_needed > MAX_NOP_SIZE) {
2453 nop(MAX_NOP_SIZE);
2454 bytes_needed -= MAX_NOP_SIZE;
2455 }
2456 if (bytes_needed != 0) {
2457 nop(bytes_needed);
2458 }
2459 ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
2460}
2461
2462void Assembler::EmitOperand(int rm, const Operand& operand) {
2463 ASSERT(rm >= 0 && rm < 8);
2464 const intptr_t length = operand.length_;
2465 ASSERT(length > 0);
2466 // Emit the ModRM byte updated with the given RM value.
2467 ASSERT((operand.encoding_[0] & 0x38) == 0);
2468 EmitUint8(operand.encoding_[0] + (rm << 3));
2469 // Emit the rest of the encoded operand.
2470 for (intptr_t i = 1; i < length; i++) {
2471 EmitUint8(operand.encoding_[i]);
2472 }
2473}
2474
2475void Assembler::EmitRegisterOperand(int rm, int reg) {
2476 Operand operand;
2477 operand.SetModRM(3, static_cast<Register>(reg));
2478 EmitOperand(rm, operand);
2479}
2480
2481void Assembler::EmitImmediate(const Immediate& imm) {
2482 if (imm.is_int32()) {
2483 EmitInt32(static_cast<int32_t>(imm.value()));
2484 } else {
2485 EmitInt64(imm.value());
2486 }
2487}
2488
2489void Assembler::EmitSignExtendedInt8(int rm,
2490 const Operand& operand,
2491 const Immediate& immediate) {
2492 EmitUint8(0x83);
2493 EmitOperand(rm, operand);
2494 EmitUint8(immediate.value() & 0xFF);
2495}
2496
2497void Assembler::EmitComplex(int rm,
2498 const Operand& operand,
2499 const Immediate& immediate) {
2500 ASSERT(rm >= 0 && rm < 8);
2501 ASSERT(immediate.is_int32());
2502 if (immediate.is_int8()) {
2503 EmitSignExtendedInt8(rm, operand, immediate);
2504 } else if (operand.IsRegister(RAX)) {
2505 // Use short form if the destination is rax.
2506 EmitUint8(0x05 + (rm << 3));
2507 EmitImmediate(immediate);
2508 } else {
2509 EmitUint8(0x81);
2510 EmitOperand(rm, operand);
2511 EmitImmediate(immediate);
2512 }
2513}
2514
2515void Assembler::EmitLabel(Label* label, intptr_t instruction_size) {
2516 if (label->IsBound()) {
2517 intptr_t offset = label->Position() - buffer_.Size();
2518 ASSERT(offset <= 0);
2519 EmitInt32(offset - instruction_size);
2520 } else {
2521 EmitLabelLink(label);
2522 }
2523}
2524
2525void Assembler::EmitLabelLink(Label* label) {
2526 ASSERT(!label->IsBound());
2527 intptr_t position = buffer_.Size();
2528 EmitInt32(label->position_);
2529 label->LinkTo(position);
2530}
2531
2532void Assembler::EmitNearLabelLink(Label* label) {
2533 ASSERT(!label->IsBound());
2534 intptr_t position = buffer_.Size();
2535 EmitUint8(0);
2536 label->NearLinkTo(position);
2537}
2538
2539void Assembler::EmitGenericShift(bool wide,
2540 int rm,
2541 Register reg,
2542 const Immediate& imm) {
2543 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2544 ASSERT(imm.is_int8());
2545 if (wide) {
2546 EmitRegisterREX(reg, REX_W);
2547 } else {
2548 EmitRegisterREX(reg, REX_NONE);
2549 }
2550 if (imm.value() == 1) {
2551 EmitUint8(0xD1);
2552 EmitOperand(rm, Operand(reg));
2553 } else {
2554 EmitUint8(0xC1);
2555 EmitOperand(rm, Operand(reg));
2556 EmitUint8(imm.value() & 0xFF);
2557 }
2558}
2559
2560void Assembler::EmitGenericShift(bool wide,
2561 int rm,
2562 Register operand,
2563 Register shifter) {
2564 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2565 ASSERT(shifter == RCX);
2566 EmitRegisterREX(operand, wide ? REX_W : REX_NONE);
2567 EmitUint8(0xD3);
2568 EmitOperand(rm, Operand(operand));
2569}
2570
2571void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
2572 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2573 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2574 movl(result, tags);
2575 shrl(result, Immediate(12));
2576}
2577
2578void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
2579 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2580 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2581 movzxw(result, tags);
2582 shrl(result, Immediate(target::UntaggedObject::kSizeTagPos -
2583 target::ObjectAlignment::kObjectAlignmentLog2));
2584 AndImmediate(result,
2585 Immediate(Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2586 << target::ObjectAlignment::kObjectAlignmentLog2));
2587}
2588
2589void Assembler::LoadClassId(Register result, Register object) {
2590 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2591 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2592 movl(result, FieldAddress(object, target::Object::tags_offset()));
2593 shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
2594}
2595
2596void Assembler::LoadClassById(Register result, Register class_id) {
2597 ASSERT(result != class_id);
2598 const intptr_t table_offset =
2599 target::IsolateGroup::cached_class_table_table_offset();
2600
2601 LoadIsolateGroup(result);
2602 movq(result, Address(result, table_offset));
2603 movq(result, Address(result, class_id, TIMES_8, 0));
2604}
2605
2606void Assembler::CompareClassId(Register object,
2607 intptr_t class_id,
2608 Register scratch) {
2609 LoadClassId(TMP, object);
2610 cmpl(TMP, Immediate(class_id));
2611}
2612
2613void Assembler::SmiUntagOrCheckClass(Register object,
2614 intptr_t class_id,
2615 Label* is_smi) {
2616#if !defined(DART_COMPRESSED_POINTERS)
2617 ASSERT(kSmiTagShift == 1);
2618 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2619 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2620 // Untag optimistically. Tag bit is shifted into the CARRY.
2621 SmiUntag(object);
2622 j(NOT_CARRY, is_smi, kNearJump);
2623 // Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
2624 // factor in the addressing mode to compensate for this.
2625 movl(TMP, Address(object, TIMES_2,
2626 target::Object::tags_offset() + kHeapObjectTag));
2627 shrl(TMP, Immediate(target::UntaggedObject::kClassIdTagPos));
2628 cmpl(TMP, Immediate(class_id));
2629#else
2630 // Cannot speculatively untag compressed Smis because it erases upper address
2631 // bits.
2632 UNREACHABLE();
2633#endif
2634}
2635
2636void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2637 Label smi;
2638
2639 if (result == object) {
2640 Label join;
2641
2642 testq(object, Immediate(kSmiTagMask));
2643 j(EQUAL, &smi, Assembler::kNearJump);
2644 LoadClassId(result, object);
2645 jmp(&join, Assembler::kNearJump);
2646
2647 Bind(&smi);
2648 movq(result, Immediate(kSmiCid));
2649
2650 Bind(&join);
2651 } else {
2652 testq(object, Immediate(kSmiTagMask));
2653 movq(result, Immediate(kSmiCid));
2654 j(EQUAL, &smi, Assembler::kNearJump);
2655 LoadClassId(result, object);
2656
2657 Bind(&smi);
2658 }
2659}
2660
2661void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2662 Label smi;
2663
2664 if (result == object) {
2665 Label join;
2666
2667 testq(object, Immediate(kSmiTagMask));
2668 j(EQUAL, &smi, Assembler::kNearJump);
2669 LoadClassId(result, object);
2670 SmiTag(result);
2671 jmp(&join, Assembler::kNearJump);
2672
2673 Bind(&smi);
2674 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2675
2676 Bind(&join);
2677 } else {
2678 testq(object, Immediate(kSmiTagMask));
2679 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2680 j(EQUAL, &smi, Assembler::kNearJump);
2681 LoadClassId(result, object);
2682 SmiTag(result);
2683
2684 Bind(&smi);
2685 }
2686}
2687
2688void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2689 Register src,
2690 Register scratch,
2691 bool can_be_null) {
2692#if defined(DEBUG)
2693 Comment("Check that object in register has cid %" Pd "", cid);
2694 Label matches;
2695 LoadClassIdMayBeSmi(scratch, src);
2696 CompareImmediate(scratch, cid);
2697 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2698 if (can_be_null) {
2699 CompareImmediate(scratch, kNullCid);
2700 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2701 }
2702 Breakpoint();
2703 Bind(&matches);
2704#endif
2705}
2706
2707Address Assembler::VMTagAddress() {
2708 return Address(THR, target::Thread::vm_tag_offset());
2709}
2710
2711bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
2712 bool is_external,
2713 intptr_t cid,
2714 intptr_t index_scale) {
2715 if (!IsSafeSmi(constant)) return false;
2716 const int64_t index = target::SmiValue(constant);
2717 const int64_t disp =
2718 index * index_scale +
2719 (is_external ? 0 : target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
2720 return Utils::IsInt(32, disp);
2721}
2722
2723Address Assembler::ElementAddressForIntIndex(bool is_external,
2724 intptr_t cid,
2725 intptr_t index_scale,
2726 Register array,
2727 intptr_t index) {
2728 if (is_external) {
2729 return Address(array, index * index_scale);
2730 } else {
2731 const int64_t disp = static_cast<int64_t>(index) * index_scale +
2732 target::Instance::DataOffsetFor(cid);
2733 ASSERT(Utils::IsInt(32, disp));
2734 return FieldAddress(array, static_cast<int32_t>(disp));
2735 }
2736}
2737
2738Address Assembler::ElementAddressForRegIndex(bool is_external,
2739 intptr_t cid,
2740 intptr_t index_scale,
2741 bool index_unboxed,
2742 Register array,
2743 Register index) {
2744 if (is_external) {
2745 return Address(array, index, ToScaleFactor(index_scale, index_unboxed), 0);
2746 } else {
2747 return FieldAddress(array, index, ToScaleFactor(index_scale, index_unboxed),
2748 target::Instance::DataOffsetFor(cid));
2749 }
2750}
2751
2752void Assembler::RangeCheck(Register value,
2753 Register temp,
2754 intptr_t low,
2755 intptr_t high,
2756 RangeCheckCondition condition,
2757 Label* target) {
2758 auto cc = condition == kIfInRange ? BELOW_EQUAL : ABOVE;
2759 Register to_check = value;
2760 if (temp != kNoRegister) {
2761 movq(temp, value);
2762 to_check = temp;
2763 }
2764 subq(to_check, Immediate(low));
2765 cmpq(to_check, Immediate(high - low));
2766 j(cc, target);
2767}
2768
2769} // namespace compiler
2770} // namespace dart
2771
2772#endif // defined(TARGET_ARCH_X64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
SkPoint pos
static float next(float f)
static bool ok(int result)
#define EQUAL(field)
static bool equals(T *a, T *b)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
static constexpr int kSize
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
struct MyStruct a[10]
#define FATAL(error)
uint8_t value
GAsyncResult * result
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
int argument_count
Definition fuchsia.cc:52
size_t length
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
const Object & ToObject(const Code &handle)
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ BELOW_EQUAL
@ kNumberOfCpuRegisters
const Register TMP
const int MAX_NOP_SIZE
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
@ kNumberOfXmmRegisters
call(args)
Definition dom.py:159
dst
Definition cp.py:12
SINT Vec< 2 *N, T > join(const Vec< N, T > &lo, const Vec< N, T > &hi)
Definition SkVx.h:242
dest
Definition zip.py:79
#define Pd
Definition globals.h:408
int32_t width
Point offset
constexpr bool kTargetUsesThreadSanitizer
#define NOT_IN_PRODUCT(code)
Definition globals.h:84