Flutter Engine
The Flutter Engine
assembler_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
7#if defined(TARGET_ARCH_X64)
8
9#define SHOULD_NOT_INCLUDE_RUNTIME
10
11#include "vm/class_id.h"
14#include "vm/instructions.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22namespace compiler {
23
24Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
25 intptr_t far_branch_level)
26 : AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
27 // Far branching mode is only needed and implemented for ARM.
28 ASSERT(far_branch_level == 0);
29
30 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
31 call(Address(THR,
32 target::Thread::write_barrier_wrappers_thread_offset(reg)));
33 };
34 generate_invoke_array_write_barrier_ = [&]() {
35 call(
36 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
37 };
38}
39
40void Assembler::call(Label* label) {
41 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
42 const int kSize = 5;
43 EmitUint8(0xE8);
44 EmitLabel(label, kSize);
45}
46
47void Assembler::LoadNativeEntry(
49 const ExternalLabel* label,
50 ObjectPoolBuilderEntry::Patchability patchable) {
51 const intptr_t index =
52 object_pool_builder().FindNativeFunction(label, patchable);
53 LoadWordFromPoolIndex(dst, index);
54}
55
56void Assembler::call(const ExternalLabel* label) {
57 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
58 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
59 EmitRegisterREX(TMP, REX_W);
60 EmitUint8(0xB8 | (TMP & 7));
61 EmitInt64(label->address());
62 }
63 call(TMP);
64}
65
66void Assembler::CallCodeThroughPool(intptr_t target_code_pool_index,
67 CodeEntryKind entry_kind) {
68 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
69 // We don't actually use CODE_REG in the callee and caller might
70 // be using CODE_REG for a live value (e.g. a value that is alive
71 // across invocation of a shared stub like the one we use for
72 // allocating Mint boxes).
73 const Register code_reg = FLAG_precompiled_mode ? TMP : CODE_REG;
74 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
75 call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
76}
77
78void Assembler::CallPatchable(
79 const Code& target,
80 CodeEntryKind entry_kind,
81 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
82 ASSERT(constant_pool_allowed());
83 const intptr_t idx = object_pool_builder().AddObject(
84 ToObject(target), ObjectPoolBuilderEntry::kPatchable, snapshot_behavior);
85 CallCodeThroughPool(idx, entry_kind);
86}
87
88void Assembler::CallWithEquivalence(const Code& target,
89 const Object& equivalence,
90 CodeEntryKind entry_kind) {
91 ASSERT(constant_pool_allowed());
92 const intptr_t idx =
93 object_pool_builder().FindObject(ToObject(target), equivalence);
94 CallCodeThroughPool(idx, entry_kind);
95}
96
97void Assembler::Call(
98 const Code& target,
99 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
100 ASSERT(constant_pool_allowed());
101 const intptr_t idx = object_pool_builder().FindObject(
102 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable,
103 snapshot_behavior);
104 CallCodeThroughPool(idx, CodeEntryKind::kNormal);
105}
106
107void Assembler::pushq(Register reg) {
108 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
109 EmitRegisterREX(reg, REX_NONE);
110 EmitUint8(0x50 | (reg & 7));
111}
112
113void Assembler::pushq(const Immediate& imm) {
114 if (imm.is_int8()) {
115 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
116 EmitUint8(0x6A);
117 EmitUint8(imm.value() & 0xFF);
118 } else if (imm.is_int32()) {
119 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
120 EmitUint8(0x68);
121 EmitImmediate(imm);
122 } else {
123 movq(TMP, imm);
124 pushq(TMP);
125 }
126}
127
128void Assembler::popq(Register reg) {
129 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
130 EmitRegisterREX(reg, REX_NONE);
131 EmitUint8(0x58 | (reg & 7));
132}
133
134void Assembler::setcc(Condition condition, ByteRegister dst) {
136 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
137 if (dst >= 8) {
138 EmitUint8(REX_PREFIX | (((dst & 0x08) != 0) ? REX_B : REX_NONE));
139 }
140 EmitUint8(0x0F);
141 EmitUint8(0x90 + condition);
142 EmitUint8(0xC0 + (dst & 0x07));
143}
144
145void Assembler::EnterFullSafepoint() {
146 // We generate the same number of instructions whether or not the slow-path is
147 // forced, to simplify GenerateJitCallbackTrampolines.
148 // For TSAN, we always go to the runtime so TSAN is aware of the release
149 // semantics of entering the safepoint.
150 Label done, slow_path;
151 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
152 jmp(&slow_path);
153 }
154
155 // Compare and swap the value at Thread::safepoint_state from
156 // unacquired to acquired. If the CAS fails, go to a slow-path stub.
157 pushq(RAX);
158 movq(RAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
159 movq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
160 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
161 movq(TMP, RAX);
162 popq(RAX);
163 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
164
165 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
166 j(EQUAL, &done);
167 }
168
169 Bind(&slow_path);
170 movq(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
171 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
172
173 // Use call instead of CallCFunction to avoid having to clean up shadow space
174 // afterwards. This is possible because the safepoint stub does not use the
175 // shadow space as scratch and has no arguments.
176 call(TMP);
177
178 Bind(&done);
179}
180
181void Assembler::TransitionGeneratedToNative(Register destination_address,
182 Register new_exit_frame,
183 Register new_exit_through_ffi,
184 bool enter_safepoint) {
185 // Save exit frame information to enable stack walking.
186 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
187 new_exit_frame);
188
189 movq(compiler::Address(THR,
190 compiler::target::Thread::exit_through_ffi_offset()),
191 new_exit_through_ffi);
192
193 movq(Assembler::VMTagAddress(), destination_address);
194 movq(Address(THR, target::Thread::execution_state_offset()),
195 Immediate(target::Thread::native_execution_state()));
196
197 if (enter_safepoint) {
198 EnterFullSafepoint();
199 }
200}
201
202void Assembler::ExitFullSafepoint(bool ignore_unwind_in_progress) {
203 // We generate the same number of instructions whether or not the slow-path is
204 // forced, for consistency with EnterFullSafepoint.
205 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
206 // semantics of leaving the safepoint.
207 Label done, slow_path;
208 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
209 jmp(&slow_path);
210 }
211
212 // Compare and swap the value at Thread::safepoint_state from
213 // acquired to unacquired. On success, jump to 'success'; otherwise,
214 // fallthrough.
215
216 pushq(RAX);
217 movq(RAX, Immediate(target::Thread::full_safepoint_state_acquired()));
218 movq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
219 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
220 movq(TMP, RAX);
221 popq(RAX);
222 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
223
224 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
225 j(EQUAL, &done);
226 }
227
228 Bind(&slow_path);
229 if (ignore_unwind_in_progress) {
230 movq(TMP,
231 Address(THR,
232 target::Thread::
233 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
234 } else {
235 movq(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
236 }
237 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
238
239 // Use call instead of CallCFunction to avoid having to clean up shadow space
240 // afterwards. This is possible because the safepoint stub does not use the
241 // shadow space as scratch and has no arguments.
242 call(TMP);
243
244 Bind(&done);
245}
246
247void Assembler::TransitionNativeToGenerated(bool leave_safepoint,
248 bool ignore_unwind_in_progress,
249 bool set_tag) {
250 if (leave_safepoint) {
251 ExitFullSafepoint(ignore_unwind_in_progress);
252 } else {
253 // flag only makes sense if we are leaving safepoint
254 ASSERT(!ignore_unwind_in_progress);
255#if defined(DEBUG)
256 // Ensure we've already left the safepoint.
257 movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
258 andq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
259 Label ok;
260 j(ZERO, &ok);
261 Breakpoint();
262 Bind(&ok);
263#endif
264 }
265
266 if (set_tag) {
267 movq(Assembler::VMTagAddress(),
268 Immediate(target::Thread::vm_tag_dart_id()));
269 }
270 movq(Address(THR, target::Thread::execution_state_offset()),
271 Immediate(target::Thread::generated_execution_state()));
272
273 // Reset exit frame information in Isolate's mutator thread structure.
274 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
275 Immediate(0));
276 movq(compiler::Address(THR,
277 compiler::target::Thread::exit_through_ffi_offset()),
278 compiler::Immediate(0));
279}
280
281void Assembler::EmitQ(int reg,
282 const Address& address,
283 int opcode,
284 int prefix2,
285 int prefix1) {
286 ASSERT(reg <= XMM15);
287 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
288 if (prefix1 >= 0) {
289 EmitUint8(prefix1);
290 }
291 EmitOperandREX(reg, address, REX_W);
292 if (prefix2 >= 0) {
293 EmitUint8(prefix2);
294 }
295 EmitUint8(opcode);
296 EmitOperand(reg & 7, address);
297}
298
299void Assembler::EmitL(int reg,
300 const Address& address,
301 int opcode,
302 int prefix2,
303 int prefix1) {
304 ASSERT(reg <= XMM15);
305 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
306 if (prefix1 >= 0) {
307 EmitUint8(prefix1);
308 }
309 EmitOperandREX(reg, address, REX_NONE);
310 if (prefix2 >= 0) {
311 EmitUint8(prefix2);
312 }
313 EmitUint8(opcode);
314 EmitOperand(reg & 7, address);
315}
316
317void Assembler::EmitW(Register reg,
318 const Address& address,
319 int opcode,
320 int prefix2,
321 int prefix1) {
322 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
323 if (prefix1 >= 0) {
324 EmitUint8(prefix1);
325 }
326 EmitOperandSizeOverride();
327 EmitOperandREX(reg, address, REX_NONE);
328 if (prefix2 >= 0) {
329 EmitUint8(prefix2);
330 }
331 EmitUint8(opcode);
332 EmitOperand(reg & 7, address);
333}
334
335void Assembler::EmitB(int reg, const Address& address, int opcode) {
336 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
337 EmitOperandREX(reg & ~0x10, address, reg >= 8 ? REX_PREFIX : REX_NONE);
338 EmitUint8(opcode);
339 EmitOperand(reg & 7, address);
340}
341
342void Assembler::movl(Register dst, const Immediate& imm) {
343 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
344 Operand operand(dst);
345 EmitOperandREX(0, operand, REX_NONE);
346 EmitUint8(0xC7);
347 EmitOperand(0, operand);
348 ASSERT(imm.is_int32());
349 EmitImmediate(imm);
350}
351
352void Assembler::movl(const Address& dst, const Immediate& imm) {
353 movl(TMP, imm);
354 movl(dst, TMP);
355}
356
357void Assembler::movb(const Address& dst, const Immediate& imm) {
358 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
359 EmitOperandREX(0, dst, REX_NONE);
360 EmitUint8(0xC6);
361 EmitOperand(0, dst);
362 ASSERT(imm.is_int8());
363 EmitUint8(imm.value() & 0xFF);
364}
365
366void Assembler::movw(Register dst, const Address& src) {
367 // This would leave 16 bits above the 2 byte value undefined.
368 // If we ever want to purposefully have those undefined, remove this.
369 // TODO(40210): Allow this.
370 FATAL("Use movzxw or movsxw instead.");
371}
372
373void Assembler::movw(const Address& dst, const Immediate& imm) {
374 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
375 EmitOperandSizeOverride();
376 EmitOperandREX(0, dst, REX_NONE);
377 EmitUint8(0xC7);
378 EmitOperand(0, dst);
379 EmitUint8(imm.value() & 0xFF);
380 EmitUint8((imm.value() >> 8) & 0xFF);
381}
382
383void Assembler::movq(Register dst, const Immediate& imm) {
384 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
385 if (imm.is_uint32()) {
386 // Pick single byte B8 encoding if possible. If dst < 8 then we also omit
387 // the Rex byte.
388 EmitRegisterREX(dst, REX_NONE);
389 EmitUint8(0xB8 | (dst & 7));
390 EmitUInt32(imm.value());
391 } else if (imm.is_int32()) {
392 // Sign extended C7 Cx encoding if we have a negative input.
393 Operand operand(dst);
394 EmitOperandREX(0, operand, REX_W);
395 EmitUint8(0xC7);
396 EmitOperand(0, operand);
397 EmitImmediate(imm);
398 } else {
399 // Full 64 bit immediate encoding.
400 EmitRegisterREX(dst, REX_W);
401 EmitUint8(0xB8 | (dst & 7));
402 EmitImmediate(imm);
403 }
404}
405
406void Assembler::movq(const Address& dst, const Immediate& imm) {
407 if (imm.is_int32()) {
408 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
409 EmitOperandREX(0, dst, REX_W);
410 EmitUint8(0xC7);
411 EmitOperand(0, dst);
412 EmitImmediate(imm);
413 } else {
414 movq(TMP, imm);
415 movq(dst, TMP);
416 }
417}
418
419void Assembler::EmitSimple(int opcode, int opcode2, int opcode3) {
420 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
421 EmitUint8(opcode);
422 if (opcode2 != -1) {
423 EmitUint8(opcode2);
424 if (opcode3 != -1) {
425 EmitUint8(opcode3);
426 }
427 }
428}
429
430void Assembler::EmitQ(int dst, int src, int opcode, int prefix2, int prefix1) {
431 ASSERT(src <= XMM15);
432 ASSERT(dst <= XMM15);
433 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
434 if (prefix1 >= 0) {
435 EmitUint8(prefix1);
436 }
437 EmitRegRegRex(dst, src, REX_W);
438 if (prefix2 >= 0) {
439 EmitUint8(prefix2);
440 }
441 EmitUint8(opcode);
442 EmitRegisterOperand(dst & 7, src);
443}
444
445void Assembler::EmitL(int dst, int src, int opcode, int prefix2, int prefix1) {
446 ASSERT(src <= XMM15);
447 ASSERT(dst <= XMM15);
448 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
449 if (prefix1 >= 0) {
450 EmitUint8(prefix1);
451 }
452 EmitRegRegRex(dst, src);
453 if (prefix2 >= 0) {
454 EmitUint8(prefix2);
455 }
456 EmitUint8(opcode);
457 EmitRegisterOperand(dst & 7, src);
458}
459
460void Assembler::EmitW(Register dst,
462 int opcode,
463 int prefix2,
464 int prefix1) {
465 ASSERT(src <= R15);
466 ASSERT(dst <= R15);
467 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
468 if (prefix1 >= 0) {
469 EmitUint8(prefix1);
470 }
471 EmitOperandSizeOverride();
472 EmitRegRegRex(dst, src);
473 if (prefix2 >= 0) {
474 EmitUint8(prefix2);
475 }
476 EmitUint8(opcode);
477 EmitRegisterOperand(dst & 7, src);
478}
479
480#define UNARY_XMM_WITH_CONSTANT(name, constant, op) \
481 void Assembler::name(XmmRegister dst, XmmRegister src) { \
482 movq(TMP, Address(THR, target::Thread::constant##_address_offset())); \
483 if (dst == src) { \
484 op(dst, Address(TMP, 0)); \
485 } else { \
486 movups(dst, Address(TMP, 0)); \
487 op(dst, src); \
488 } \
489 }
490
491// TODO(erikcorry): For the case where dst != src, we could construct these
492// with pcmpeqw xmm0,xmm0 followed by left and right shifts. This would avoid
493// memory traffic.
494// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
495UNARY_XMM_WITH_CONSTANT(notps, float_not, xorps)
496// { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
497UNARY_XMM_WITH_CONSTANT(negateps, float_negate, xorps)
498// { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF }
499UNARY_XMM_WITH_CONSTANT(absps, float_absolute, andps)
500// { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 }
501UNARY_XMM_WITH_CONSTANT(zerowps, float_zerow, andps)
502// { 0x8000000000000000LL, 0x8000000000000000LL }
503UNARY_XMM_WITH_CONSTANT(negatepd, double_negate, xorpd)
504// { 0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL }
505UNARY_XMM_WITH_CONSTANT(abspd, double_abs, andpd)
506// {0x8000000000000000LL, 0x8000000000000000LL}
507UNARY_XMM_WITH_CONSTANT(DoubleNegate, double_negate, xorpd)
508// {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}
509UNARY_XMM_WITH_CONSTANT(DoubleAbs, double_abs, andpd)
510
511#undef UNARY_XMM_WITH_CONSTANT
512
513void Assembler::CmpPS(XmmRegister dst, XmmRegister src, int condition) {
514 EmitL(dst, src, 0xC2, 0x0F);
515 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
516 EmitUint8(condition);
517}
518
519void Assembler::set1ps(XmmRegister dst, Register tmp1, const Immediate& imm) {
520 // Load 32-bit immediate value into tmp1.
521 movl(tmp1, imm);
522 // Move value from tmp1 into dst.
523 movd(dst, tmp1);
524 // Broadcast low lane into other three lanes.
525 shufps(dst, dst, Immediate(0x0));
526}
527
528void Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
529 EmitL(dst, src, 0xC6, 0x0F);
530 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
531 ASSERT(imm.is_uint8());
532 EmitUint8(imm.value());
533}
534
535void Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
536 EmitL(dst, src, 0xC6, 0x0F, 0x66);
537 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
538 ASSERT(imm.is_uint8());
539 EmitUint8(imm.value());
540}
541
542void Assembler::roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode) {
543 ASSERT(src <= XMM15);
544 ASSERT(dst <= XMM15);
545 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
546 EmitUint8(0x66);
547 EmitRegRegRex(dst, src);
548 EmitUint8(0x0F);
549 EmitUint8(0x3A);
550 EmitUint8(0x0B);
551 EmitRegisterOperand(dst & 7, src);
552 // Mask precision exception.
553 EmitUint8(static_cast<uint8_t>(mode) | 0x8);
554}
555
556void Assembler::fldl(const Address& src) {
557 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
558 EmitUint8(0xDD);
559 EmitOperand(0, src);
560}
561
562void Assembler::fstpl(const Address& dst) {
563 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
564 EmitUint8(0xDD);
565 EmitOperand(3, dst);
566}
567
568void Assembler::ffree(intptr_t value) {
569 ASSERT(value < 7);
570 EmitSimple(0xDD, 0xC0 + value);
571}
572
573void Assembler::CompareImmediate(Register reg,
574 const Immediate& imm,
576 if (width == kEightBytes) {
577 if (imm.is_int32()) {
578 cmpq(reg, imm);
579 } else {
580 ASSERT(reg != TMP);
581 LoadImmediate(TMP, imm);
582 cmpq(reg, TMP);
583 }
584 } else {
586 cmpl(reg, imm);
587 }
588}
589
590void Assembler::CompareImmediate(const Address& address,
591 const Immediate& imm,
593 if (width == kEightBytes) {
594 if (imm.is_int32()) {
595 cmpq(address, imm);
596 } else {
597 LoadImmediate(TMP, imm);
598 cmpq(address, TMP);
599 }
600 } else {
602 cmpl(address, imm);
603 }
604}
605
606void Assembler::testb(const Address& address, const Immediate& imm) {
607 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
608 EmitOperandREX(0, address, REX_NONE);
609 EmitUint8(0xF6);
610 EmitOperand(0, address);
611 ASSERT(imm.is_int8());
612 EmitUint8(imm.value() & 0xFF);
613}
614
615void Assembler::testb(const Address& address, Register reg) {
616 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
617 EmitOperandREX(reg, address, REX_NONE);
618 EmitUint8(0x84);
619 EmitOperand(reg & 7, address);
620}
621
622void Assembler::testq(Register reg, const Immediate& imm) {
623 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
624 if (imm.is_uint8()) {
625 // Use zero-extended 8-bit immediate.
626 if (reg >= 4) {
627 // We need the Rex byte to give access to the SIL and DIL registers (the
628 // low bytes of RSI and RDI).
629 EmitRegisterREX(reg, REX_NONE, /* force = */ true);
630 }
631 if (reg == RAX) {
632 EmitUint8(0xA8);
633 } else {
634 EmitUint8(0xF6);
635 EmitUint8(0xC0 + (reg & 7));
636 }
637 EmitUint8(imm.value() & 0xFF);
638 } else if (imm.is_uint32()) {
639 if (reg == RAX) {
640 EmitUint8(0xA9);
641 } else {
642 EmitRegisterREX(reg, REX_NONE);
643 EmitUint8(0xF7);
644 EmitUint8(0xC0 | (reg & 7));
645 }
646 EmitUInt32(imm.value());
647 } else {
648 // Sign extended version of 32 bit test.
649 ASSERT(imm.is_int32());
650 EmitRegisterREX(reg, REX_W);
651 if (reg == RAX) {
652 EmitUint8(0xA9);
653 } else {
654 EmitUint8(0xF7);
655 EmitUint8(0xC0 | (reg & 7));
656 }
657 EmitImmediate(imm);
658 }
659}
660
661void Assembler::TestImmediate(Register dst,
662 const Immediate& imm,
664 if (width == kEightBytes) {
665 if (imm.is_int32() || imm.is_uint32()) {
666 testq(dst, imm);
667 } else {
668 ASSERT(dst != TMP);
669 LoadImmediate(TMP, imm);
670 testq(dst, TMP);
671 }
672 } else {
674 testl(dst, imm);
675 }
676}
677
678void Assembler::AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm) {
679 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
680 EmitRegisterREX(dst, REX_NONE);
681 EmitComplex(modrm_opcode, Operand(dst), imm);
682}
683
684void Assembler::AluB(uint8_t modrm_opcode,
685 const Address& dst,
686 const Immediate& imm) {
687 ASSERT(imm.is_uint8() || imm.is_int8());
688 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
689 EmitOperandREX(modrm_opcode, dst, REX_NONE);
690 EmitUint8(0x80);
691 EmitOperand(modrm_opcode, dst);
692 EmitUint8(imm.value() & 0xFF);
693}
694
695void Assembler::AluW(uint8_t modrm_opcode,
696 const Address& dst,
697 const Immediate& imm) {
698 ASSERT(imm.is_int16() || imm.is_uint16());
699 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
700 EmitOperandSizeOverride();
701 EmitOperandREX(modrm_opcode, dst, REX_NONE);
702 if (imm.is_int8()) {
703 EmitSignExtendedInt8(modrm_opcode, dst, imm);
704 } else {
705 EmitUint8(0x81);
706 EmitOperand(modrm_opcode, dst);
707 EmitUint8(imm.value() & 0xFF);
708 EmitUint8((imm.value() >> 8) & 0xFF);
709 }
710}
711
712void Assembler::AluL(uint8_t modrm_opcode,
713 const Address& dst,
714 const Immediate& imm) {
715 ASSERT(imm.is_int32());
716 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
717 EmitOperandREX(modrm_opcode, dst, REX_NONE);
718 EmitComplex(modrm_opcode, dst, imm);
719}
720
721void Assembler::AluQ(uint8_t modrm_opcode,
722 uint8_t opcode,
724 const Immediate& imm) {
725 Operand operand(dst);
726 if (modrm_opcode == 4 && imm.is_uint32()) {
727 // We can use andl for andq.
728 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
729 EmitRegisterREX(dst, REX_NONE);
730 // Would like to use EmitComplex here, but it doesn't like uint32
731 // immediates.
732 if (imm.is_int8()) {
733 EmitSignExtendedInt8(modrm_opcode, operand, imm);
734 } else {
735 if (dst == RAX) {
736 EmitUint8(0x25);
737 } else {
738 EmitUint8(0x81);
739 EmitOperand(modrm_opcode, operand);
740 }
741 EmitUInt32(imm.value());
742 }
743 } else if (imm.is_int32()) {
744 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
745 EmitRegisterREX(dst, REX_W);
746 EmitComplex(modrm_opcode, operand, imm);
747 } else {
748 ASSERT(dst != TMP);
749 movq(TMP, imm);
750 EmitQ(dst, TMP, opcode);
751 }
752}
753
754void Assembler::AluQ(uint8_t modrm_opcode,
755 uint8_t opcode,
756 const Address& dst,
757 const Immediate& imm) {
758 if (imm.is_int32()) {
759 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
760 EmitOperandREX(modrm_opcode, dst, REX_W);
761 EmitComplex(modrm_opcode, dst, imm);
762 } else {
763 movq(TMP, imm);
764 EmitQ(TMP, dst, opcode);
765 }
766}
767
768void Assembler::AndImmediate(Register dst, const Immediate& imm) {
769 if (imm.is_int32() || imm.is_uint32()) {
770 andq(dst, imm);
771 } else {
772 ASSERT(dst != TMP);
773 LoadImmediate(TMP, imm);
774 andq(dst, TMP);
775 }
776}
777
778void Assembler::AndRegisters(Register dst, Register src1, Register src2) {
779 ASSERT(src1 != src2); // Likely a mistake.
780 if (src2 == kNoRegister) {
781 src2 = dst;
782 }
783 if (dst == src1) {
784 andq(dst, src2);
785 } else if (dst == src2) {
786 andq(dst, src1);
787 } else {
788 movq(dst, src1);
789 andq(dst, src2);
790 }
791}
792
793void Assembler::LslRegister(Register dst, Register shift) {
794 if (shift != RCX) {
795 movq(TMP, RCX);
796 movq(RCX, shift);
797 shlq(dst == RCX ? TMP : dst, RCX);
798 movq(RCX, TMP);
799 } else {
800 shlq(dst, shift);
801 }
802}
803
804void Assembler::OrImmediate(Register dst, const Immediate& imm) {
805 if (imm.is_int32()) {
806 orq(dst, imm);
807 } else {
808 ASSERT(dst != TMP);
809 LoadImmediate(TMP, imm);
810 orq(dst, TMP);
811 }
812}
813
814void Assembler::XorImmediate(Register dst, const Immediate& imm) {
815 if (imm.is_int32()) {
816 xorq(dst, imm);
817 } else {
818 ASSERT(dst != TMP);
819 LoadImmediate(TMP, imm);
820 xorq(dst, TMP);
821 }
822}
823
824void Assembler::cqo() {
825 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
826 EmitRegisterREX(RAX, REX_W);
827 EmitUint8(0x99);
828}
829
830void Assembler::EmitUnaryQ(Register reg, int opcode, int modrm_code) {
831 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
832 EmitRegisterREX(reg, REX_W);
833 EmitUint8(opcode);
834 EmitOperand(modrm_code, Operand(reg));
835}
836
837void Assembler::EmitUnaryL(Register reg, int opcode, int modrm_code) {
838 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
839 EmitRegisterREX(reg, REX_NONE);
840 EmitUint8(opcode);
841 EmitOperand(modrm_code, Operand(reg));
842}
843
844void Assembler::EmitUnaryQ(const Address& address, int opcode, int modrm_code) {
845 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
846 Operand operand(address);
847 EmitOperandREX(modrm_code, operand, REX_W);
848 EmitUint8(opcode);
849 EmitOperand(modrm_code, operand);
850}
851
852void Assembler::EmitUnaryL(const Address& address, int opcode, int modrm_code) {
853 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
854 Operand operand(address);
855 EmitOperandREX(modrm_code, operand, REX_NONE);
856 EmitUint8(opcode);
857 EmitOperand(modrm_code, operand);
858}
859
860void Assembler::imull(Register reg, const Immediate& imm) {
861 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
862 Operand operand(reg);
863 EmitOperandREX(reg, operand, REX_NONE);
864 EmitUint8(0x69);
865 EmitOperand(reg & 7, Operand(reg));
866 EmitImmediate(imm);
867}
868
869void Assembler::imulq(Register reg, const Immediate& imm) {
870 if (imm.is_int32()) {
871 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
872 Operand operand(reg);
873 EmitOperandREX(reg, operand, REX_W);
874 EmitUint8(0x69);
875 EmitOperand(reg & 7, Operand(reg));
876 EmitImmediate(imm);
877 } else {
878 ASSERT(reg != TMP);
879 movq(TMP, imm);
880 imulq(reg, TMP);
881 }
882}
883
884void Assembler::MulImmediate(Register reg,
885 const Immediate& imm,
888 if (Utils::IsPowerOfTwo(imm.value())) {
889 const intptr_t shift = Utils::ShiftForPowerOfTwo(imm.value());
890 if (width == kFourBytes) {
891 shll(reg, Immediate(shift));
892 } else {
893 shlq(reg, Immediate(shift));
894 }
895 } else if (imm.is_int32()) {
896 if (width == kFourBytes) {
897 imull(reg, imm);
898 } else {
899 imulq(reg, imm);
900 }
901 } else {
902 ASSERT(reg != TMP);
904 movq(TMP, imm);
905 imulq(reg, TMP);
906 }
907}
908
909void Assembler::shll(Register reg, const Immediate& imm) {
910 EmitGenericShift(false, 4, reg, imm);
911}
912
913void Assembler::shll(Register operand, Register shifter) {
914 EmitGenericShift(false, 4, operand, shifter);
915}
916
917void Assembler::shrl(Register reg, const Immediate& imm) {
918 EmitGenericShift(false, 5, reg, imm);
919}
920
921void Assembler::shrl(Register operand, Register shifter) {
922 EmitGenericShift(false, 5, operand, shifter);
923}
924
925void Assembler::sarl(Register reg, const Immediate& imm) {
926 EmitGenericShift(false, 7, reg, imm);
927}
928
929void Assembler::sarl(Register operand, Register shifter) {
930 EmitGenericShift(false, 7, operand, shifter);
931}
932
933void Assembler::shldl(Register dst, Register src, const Immediate& imm) {
934 EmitL(src, dst, 0xA4, 0x0F);
935 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
936 ASSERT(imm.is_int8());
937 EmitUint8(imm.value() & 0xFF);
938}
939
940void Assembler::shlq(Register reg, const Immediate& imm) {
941 EmitGenericShift(true, 4, reg, imm);
942}
943
944void Assembler::shlq(Register operand, Register shifter) {
945 EmitGenericShift(true, 4, operand, shifter);
946}
947
948void Assembler::shrq(Register reg, const Immediate& imm) {
949 EmitGenericShift(true, 5, reg, imm);
950}
951
952void Assembler::shrq(Register operand, Register shifter) {
953 EmitGenericShift(true, 5, operand, shifter);
954}
955
956void Assembler::sarq(Register reg, const Immediate& imm) {
957 EmitGenericShift(true, 7, reg, imm);
958}
959
960void Assembler::sarq(Register operand, Register shifter) {
961 EmitGenericShift(true, 7, operand, shifter);
962}
963
964void Assembler::shldq(Register dst, Register src, const Immediate& imm) {
965 EmitQ(src, dst, 0xA4, 0x0F);
966 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
967 ASSERT(imm.is_int8());
968 EmitUint8(imm.value() & 0xFF);
969}
970
971void Assembler::btq(Register base, int bit) {
972 ASSERT(bit >= 0 && bit < 64);
973 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
974 Operand operand(base);
975 EmitOperandREX(4, operand, bit >= 32 ? REX_W : REX_NONE);
976 EmitUint8(0x0F);
977 EmitUint8(0xBA);
978 EmitOperand(4, operand);
979 EmitUint8(bit);
980}
981
982void Assembler::enter(const Immediate& imm) {
983 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
984 EmitUint8(0xC8);
985 ASSERT(imm.is_uint16());
986 EmitUint8(imm.value() & 0xFF);
987 EmitUint8((imm.value() >> 8) & 0xFF);
988 EmitUint8(0x00);
989}
990
991void Assembler::nop(int size) {
992 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
993 // There are nops up to size 15, but for now just provide up to size 8.
994 ASSERT(0 < size && size <= MAX_NOP_SIZE);
995 switch (size) {
996 case 1:
997 EmitUint8(0x90);
998 break;
999 case 2:
1000 EmitUint8(0x66);
1001 EmitUint8(0x90);
1002 break;
1003 case 3:
1004 EmitUint8(0x0F);
1005 EmitUint8(0x1F);
1006 EmitUint8(0x00);
1007 break;
1008 case 4:
1009 EmitUint8(0x0F);
1010 EmitUint8(0x1F);
1011 EmitUint8(0x40);
1012 EmitUint8(0x00);
1013 break;
1014 case 5:
1015 EmitUint8(0x0F);
1016 EmitUint8(0x1F);
1017 EmitUint8(0x44);
1018 EmitUint8(0x00);
1019 EmitUint8(0x00);
1020 break;
1021 case 6:
1022 EmitUint8(0x66);
1023 EmitUint8(0x0F);
1024 EmitUint8(0x1F);
1025 EmitUint8(0x44);
1026 EmitUint8(0x00);
1027 EmitUint8(0x00);
1028 break;
1029 case 7:
1030 EmitUint8(0x0F);
1031 EmitUint8(0x1F);
1032 EmitUint8(0x80);
1033 EmitUint8(0x00);
1034 EmitUint8(0x00);
1035 EmitUint8(0x00);
1036 EmitUint8(0x00);
1037 break;
1038 case 8:
1039 EmitUint8(0x0F);
1040 EmitUint8(0x1F);
1041 EmitUint8(0x84);
1042 EmitUint8(0x00);
1043 EmitUint8(0x00);
1044 EmitUint8(0x00);
1045 EmitUint8(0x00);
1046 EmitUint8(0x00);
1047 break;
1048 default:
1049 UNIMPLEMENTED();
1050 }
1051}
1052
1053void Assembler::j(Condition condition, Label* label, JumpDistance distance) {
1054 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1055 if (label->IsBound()) {
1056 const int kShortSize = 2;
1057 const int kLongSize = 6;
1058 intptr_t offset = label->Position() - buffer_.Size();
1059 ASSERT(offset <= 0);
1060 if (Utils::IsInt(8, offset - kShortSize)) {
1061 EmitUint8(0x70 + condition);
1062 EmitUint8((offset - kShortSize) & 0xFF);
1063 } else {
1064 EmitUint8(0x0F);
1065 EmitUint8(0x80 + condition);
1066 EmitInt32(offset - kLongSize);
1067 }
1068 } else if (distance == kNearJump) {
1069 EmitUint8(0x70 + condition);
1070 EmitNearLabelLink(label);
1071 } else {
1072 EmitUint8(0x0F);
1073 EmitUint8(0x80 + condition);
1074 EmitLabelLink(label);
1075 }
1076}
1077
1078void Assembler::J(Condition condition, const Code& target, Register pp) {
1079 Label no_jump;
1080 // Negate condition.
1081 j(static_cast<Condition>(condition ^ 1), &no_jump, kNearJump);
1082 Jmp(target, pp);
1083 Bind(&no_jump);
1084}
1085
1086void Assembler::jmp(Label* label, JumpDistance distance) {
1087 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1088 if (label->IsBound()) {
1089 const int kShortSize = 2;
1090 const int kLongSize = 5;
1091 intptr_t offset = label->Position() - buffer_.Size();
1092 ASSERT(offset <= 0);
1093 if (Utils::IsInt(8, offset - kShortSize)) {
1094 EmitUint8(0xEB);
1095 EmitUint8((offset - kShortSize) & 0xFF);
1096 } else {
1097 EmitUint8(0xE9);
1098 EmitInt32(offset - kLongSize);
1099 }
1100 } else if (distance == kNearJump) {
1101 EmitUint8(0xEB);
1102 EmitNearLabelLink(label);
1103 } else {
1104 EmitUint8(0xE9);
1105 EmitLabelLink(label);
1106 }
1107}
1108
1109void Assembler::jmp(const ExternalLabel* label) {
1110 { // Encode movq(TMP, Immediate(label->address())), but always as imm64.
1111 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1112 EmitRegisterREX(TMP, REX_W);
1113 EmitUint8(0xB8 | (TMP & 7));
1114 EmitInt64(label->address());
1115 }
1116 jmp(TMP);
1117}
1118
1119void Assembler::JmpPatchable(const Code& target, Register pp) {
1120 ASSERT((pp != PP) || constant_pool_allowed());
1121 const intptr_t idx = object_pool_builder().AddObject(
1122 ToObject(target), ObjectPoolBuilderEntry::kPatchable);
1123 const int32_t offset = target::ObjectPool::element_offset(idx);
1124 movq(CODE_REG, Address(pp, offset - kHeapObjectTag));
1125 movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1126 jmp(TMP);
1127}
1128
1129void Assembler::Jmp(const Code& target, Register pp) {
1130 ASSERT((pp != PP) || constant_pool_allowed());
1131 const intptr_t idx = object_pool_builder().FindObject(
1132 ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
1133 const int32_t offset = target::ObjectPool::element_offset(idx);
1134 movq(CODE_REG, FieldAddress(pp, offset));
1135 jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1136}
1137
1138void Assembler::CompareRegisters(Register a, Register b) {
1139 cmpq(a, b);
1140}
1141
1142void Assembler::LoadFromStack(Register dst, intptr_t depth) {
1143 ASSERT(depth >= 0);
1144 movq(dst, Address(SPREG, depth * target::kWordSize));
1145}
1146
1147void Assembler::StoreToStack(Register src, intptr_t depth) {
1148 ASSERT(depth >= 0);
1149 movq(Address(SPREG, depth * target::kWordSize), src);
1150}
1151
1152void Assembler::CompareToStack(Register src, intptr_t depth) {
1153 ASSERT(depth >= 0);
1154 cmpq(src, Address(SPREG, depth * target::kWordSize));
1155}
1156
1157void Assembler::ExtendValue(Register to, Register from, OperandSize sz) {
1158 switch (sz) {
1159 case kEightBytes:
1160 if (to == from) return; // No operation needed.
1161 return movq(to, from);
1162 case kUnsignedFourBytes:
1163 return movl(to, from);
1164 case kFourBytes:
1165 return movsxd(to, from);
1166 case kUnsignedTwoBytes:
1167 return movzxw(to, from);
1168 case kTwoBytes:
1169 return movsxw(to, from);
1170 case kUnsignedByte:
1171 return movzxb(to, from);
1172 case kByte:
1173 return movsxb(to, from);
1174 default:
1175 UNIMPLEMENTED();
1176 break;
1177 }
1178}
1179
1180void Assembler::PushRegister(Register r) {
1181 pushq(r);
1182}
1183
1184void Assembler::PopRegister(Register r) {
1185 popq(r);
1186}
1187
1188void Assembler::AddImmediate(Register reg,
1189 const Immediate& imm,
1192 const int64_t value = imm.value();
1193 if (value == 0) {
1194 return;
1195 }
1196 if ((value > 0) || (value == kMinInt64)) {
1197 if (value == 1) {
1198 if (width == kFourBytes) {
1199 incl(reg);
1200 } else {
1201 incq(reg);
1202 }
1203 } else {
1204 if (imm.is_int32() || (width == kFourBytes && imm.is_uint32())) {
1205 if (width == kFourBytes) {
1206 addl(reg, imm);
1207 } else {
1208 addq(reg, imm);
1209 }
1210 } else {
1211 ASSERT(reg != TMP);
1213 LoadImmediate(TMP, imm);
1214 addq(reg, TMP);
1215 }
1216 }
1217 } else {
1218 SubImmediate(reg, Immediate(-value), width);
1219 }
1220}
1221
1222void Assembler::AddImmediate(Register dest, Register src, int64_t value) {
1223 if (dest == src) {
1224 AddImmediate(dest, value);
1225 return;
1226 }
1227 if (value == 0) {
1228 MoveRegister(dest, src);
1229 return;
1230 }
1231 if (Utils::IsInt(32, value)) {
1232 leaq(dest, Address(src, value));
1233 return;
1234 }
1235 LoadImmediate(dest, value);
1236 addq(dest, src);
1237}
1238
1239void Assembler::AddImmediate(const Address& address, const Immediate& imm) {
1240 const int64_t value = imm.value();
1241 if (value == 0) {
1242 return;
1243 }
1244 if ((value > 0) || (value == kMinInt64)) {
1245 if (value == 1) {
1246 incq(address);
1247 } else {
1248 if (imm.is_int32()) {
1249 addq(address, imm);
1250 } else {
1251 LoadImmediate(TMP, imm);
1252 addq(address, TMP);
1253 }
1254 }
1255 } else {
1256 SubImmediate(address, Immediate(-value));
1257 }
1258}
1259
1260void Assembler::SubImmediate(Register reg,
1261 const Immediate& imm,
1264 const int64_t value = imm.value();
1265 if (value == 0) {
1266 return;
1267 }
1268 if ((value > 0) || (value == kMinInt64) ||
1269 (value == kMinInt32 && width == kFourBytes)) {
1270 if (value == 1) {
1271 if (width == kFourBytes) {
1272 decl(reg);
1273 } else {
1274 decq(reg);
1275 }
1276 } else {
1277 if (imm.is_int32()) {
1278 if (width == kFourBytes) {
1279 subl(reg, imm);
1280 } else {
1281 subq(reg, imm);
1282 }
1283 } else {
1284 ASSERT(reg != TMP);
1286 LoadImmediate(TMP, imm);
1287 subq(reg, TMP);
1288 }
1289 }
1290 } else {
1291 AddImmediate(reg, Immediate(-value), width);
1292 }
1293}
1294
1295void Assembler::SubImmediate(const Address& address, const Immediate& imm) {
1296 const int64_t value = imm.value();
1297 if (value == 0) {
1298 return;
1299 }
1300 if ((value > 0) || (value == kMinInt64)) {
1301 if (value == 1) {
1302 decq(address);
1303 } else {
1304 if (imm.is_int32()) {
1305 subq(address, imm);
1306 } else {
1307 LoadImmediate(TMP, imm);
1308 subq(address, TMP);
1309 }
1310 }
1311 } else {
1312 AddImmediate(address, Immediate(-value));
1313 }
1314}
1315
1316void Assembler::Drop(intptr_t stack_elements, Register tmp) {
1317 ASSERT(stack_elements >= 0);
1318 if (stack_elements <= 4) {
1319 for (intptr_t i = 0; i < stack_elements; i++) {
1320 popq(tmp);
1321 }
1322 return;
1323 }
1324 addq(RSP, Immediate(stack_elements * target::kWordSize));
1325}
1326
1327bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1328 ASSERT(IsOriginalObject(object));
1329 if (!constant_pool_allowed()) {
1330 return false;
1331 }
1332
1333 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
1334 ASSERT(IsInOldSpace(object));
1335 return true;
1336}
1337
1338void Assembler::LoadWordFromPoolIndex(Register dst, intptr_t idx) {
1339 ASSERT(constant_pool_allowed());
1340 ASSERT(dst != PP);
1341 // PP is tagged on X64.
1342 movq(dst, FieldAddress(PP, target::ObjectPool::element_offset(idx)));
1343}
1344
1345void Assembler::StoreWordToPoolIndex(Register src, intptr_t idx) {
1346 ASSERT(constant_pool_allowed());
1347 ASSERT(src != PP);
1348 // PP is tagged on X64.
1349 movq(FieldAddress(PP, target::ObjectPool::element_offset(idx)), src);
1350}
1351
1352void Assembler::LoadInt64FromBoxOrSmi(Register result, Register value) {
1353 compiler::Label done;
1354#if !defined(DART_COMPRESSED_POINTERS)
1355 // Optimistically untag value.
1356 SmiUntag(result, value);
1357 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1358 // Undo untagging by multiplying value by 2.
1359 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
1360 movq(result, compiler::Address(result, result, TIMES_1,
1361 target::Mint::value_offset()));
1362#else
1363 if (result == value) {
1364 ASSERT(TMP != value);
1365 MoveRegister(TMP, value);
1366 value = TMP;
1367 }
1368 ASSERT(value != result);
1369 // Cannot speculatively untag with value == result because it erases the
1370 // upper bits needed to dereference when it is a Mint.
1371 SmiUntagAndSignExtend(result, value);
1372 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1373 movq(result, compiler::FieldAddress(value, target::Mint::value_offset()));
1374#endif
1375 Bind(&done);
1376}
1377
1378void Assembler::LoadInt32FromBoxOrSmi(Register result, Register value) {
1379 compiler::Label done;
1380#if !defined(DART_COMPRESSED_POINTERS)
1381 // Optimistically untag value.
1382 SmiUntag(result, value);
1383 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1384 // Undo untagging by multiplying value by 2.
1385 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
1386 movsxd(result, compiler::Address(result, result, TIMES_1,
1387 compiler::target::Mint::value_offset()));
1388#else
1389 if (result == value) {
1390 ASSERT(TMP != value);
1391 MoveRegister(TMP, value);
1392 value = TMP;
1393 }
1394 ASSERT(value != result);
1395 // Cannot speculatively untag with value == result because it erases the
1396 // upper bits needed to dereference when it is a Mint.
1397 SmiUntagAndSignExtend(result, value);
1398 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
1399 movsxd(result,
1400 compiler::FieldAddress(value, compiler::target::Mint::value_offset()));
1401#endif
1402 Bind(&done);
1403}
1404
1405void Assembler::LoadIsolate(Register dst) {
1406 movq(dst, Address(THR, target::Thread::isolate_offset()));
1407}
1408
1409void Assembler::LoadIsolateGroup(Register dst) {
1410 movq(dst, Address(THR, target::Thread::isolate_group_offset()));
1411}
1412
1413void Assembler::LoadDispatchTable(Register dst) {
1414 movq(dst, Address(THR, target::Thread::dispatch_table_array_offset()));
1415}
1416
1417void Assembler::LoadObjectHelper(
1418 Register dst,
1419 const Object& object,
1420 bool is_unique,
1421 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1422 ASSERT(IsOriginalObject(object));
1423
1424 // `is_unique == true` effectively means object has to be patchable.
1425 if (!is_unique) {
1426 intptr_t offset;
1427 if (target::CanLoadFromThread(object, &offset)) {
1428 movq(dst, Address(THR, offset));
1429 return;
1430 }
1431 if (target::IsSmi(object)) {
1432 LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
1433 return;
1434 }
1435 }
1436 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1437 const intptr_t index =
1438 is_unique
1439 ? object_pool_builder().AddObject(
1440 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1441 : object_pool_builder().FindObject(
1442 object, ObjectPoolBuilderEntry::kNotPatchable,
1443 snapshot_behavior);
1444 LoadWordFromPoolIndex(dst, index);
1445}
1446
1447void Assembler::LoadObject(Register dst, const Object& object) {
1448 LoadObjectHelper(dst, object, false);
1449}
1450
1451void Assembler::LoadUniqueObject(
1452 Register dst,
1453 const Object& object,
1454 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1455 LoadObjectHelper(dst, object, true, snapshot_behavior);
1456}
1457
1458void Assembler::StoreObject(const Address& dst,
1459 const Object& object,
1460 OperandSize size) {
1461 ASSERT(IsOriginalObject(object));
1463
1464 intptr_t offset_from_thread;
1465 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1466 movq(TMP, Address(THR, offset_from_thread));
1467 Store(TMP, dst, size);
1468 } else if (target::IsSmi(object)) {
1469 MoveImmediate(dst, Immediate(target::ToRawSmi(object)), size);
1470 } else {
1471 LoadObject(TMP, object);
1472 Store(TMP, dst, size);
1473 }
1474}
1475
1476void Assembler::PushObject(const Object& object) {
1477 ASSERT(IsOriginalObject(object));
1478
1479 intptr_t offset_from_thread;
1480 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1481 pushq(Address(THR, offset_from_thread));
1482 } else if (target::IsSmi(object)) {
1483 PushImmediate(Immediate(target::ToRawSmi(object)));
1484 } else {
1485 LoadObject(TMP, object);
1486 pushq(TMP);
1487 }
1488}
1489
1490void Assembler::CompareObject(Register reg, const Object& object) {
1491 ASSERT(IsOriginalObject(object));
1492
1493 intptr_t offset_from_thread;
1494 if (target::CanLoadFromThread(object, &offset_from_thread)) {
1495 OBJ(cmp)(reg, Address(THR, offset_from_thread));
1496 } else if (target::IsSmi(object)) {
1497 CompareImmediate(reg, Immediate(target::ToRawSmi(object)), kObjectBytes);
1498 } else {
1499 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1500 const intptr_t idx = object_pool_builder().FindObject(
1501 object, ObjectPoolBuilderEntry::kNotPatchable);
1502 const int32_t offset = target::ObjectPool::element_offset(idx);
1503 OBJ(cmp)(reg, Address(PP, offset - kHeapObjectTag));
1504 }
1505}
1506
1507void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
1508 if (imm.value() == 0) {
1509 xorl(reg, reg);
1510 } else if (imm.is_int32() || !constant_pool_allowed()) {
1511 movq(reg, imm);
1512 } else {
1513 const intptr_t idx = object_pool_builder().FindImmediate(imm.value());
1514 LoadWordFromPoolIndex(reg, idx);
1515 }
1516}
1517
1518void Assembler::MoveImmediate(const Address& dst,
1519 const Immediate& imm,
1520 OperandSize size) {
1521 if (imm.is_int32()) {
1522 if (size == kFourBytes) {
1523 movl(dst, imm);
1524 } else {
1526 movq(dst, imm);
1527 }
1528 } else {
1529 LoadImmediate(TMP, imm);
1530 if (size == kFourBytes) {
1531 movl(dst, TMP);
1532 } else {
1534 movq(dst, TMP);
1535 }
1536 }
1537}
1538
1539void Assembler::LoadSImmediate(FpuRegister dst, float immediate) {
1540 int32_t bits = bit_cast<int32_t>(immediate);
1541 if (bits == 0) {
1542 xorps(dst, dst);
1543 } else {
1544 intptr_t index = object_pool_builder().FindImmediate(bits);
1545 LoadUnboxedSingle(
1546 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1547 }
1548}
1549
1550void Assembler::LoadDImmediate(FpuRegister dst, double immediate) {
1551 int64_t bits = bit_cast<int64_t>(immediate);
1552 if (bits == 0) {
1553 xorps(dst, dst);
1554 } else {
1555 intptr_t index = object_pool_builder().FindImmediate64(bits);
1556 LoadUnboxedDouble(
1557 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1558 }
1559}
1560
1561void Assembler::LoadQImmediate(FpuRegister dst, simd128_value_t immediate) {
1562 intptr_t index = object_pool_builder().FindImmediate128(immediate);
1563 movups(dst, Address(PP, target::ObjectPool::element_offset(index) -
1565}
1566
1567#if defined(DART_COMPRESSED_POINTERS)
1568void Assembler::LoadCompressed(Register dest, const Address& slot) {
1569 movl(dest, slot); // Zero-extension.
1570 addq(dest, Address(THR, target::Thread::heap_base_offset()));
1571}
1572#endif
1573
1574void Assembler::StoreBarrier(Register object,
1576 CanBeSmi can_be_smi,
1577 Register scratch) {
1578 // x.slot = x. Barrier should have be removed at the IL level.
1579 ASSERT(object != value);
1580 ASSERT(object != scratch);
1581 ASSERT(value != scratch);
1582 ASSERT(scratch != kNoRegister);
1583
1584 // In parallel, test whether
1585 // - object is old and not remembered and value is new, or
1586 // - object is old and value is old and not marked and concurrent marking is
1587 // in progress
1588 // If so, call the WriteBarrier stub, which will either add object to the
1589 // store buffer (case 1) or add value to the marking stack (case 2).
1590 // Compare UntaggedObject::StorePointer.
1591 Label done;
1592 if (can_be_smi == kValueCanBeSmi) {
1593 BranchIfSmi(value, &done, kNearJump);
1594 } else {
1595#if defined(DEBUG)
1596 Label passed_check;
1597 BranchIfNotSmi(value, &passed_check, kNearJump);
1598 Breakpoint();
1599 Bind(&passed_check);
1600#endif
1601 }
1602 movb(ByteRegisterOf(scratch),
1603 FieldAddress(object, target::Object::tags_offset()));
1604 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1605 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1606 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1607 j(ZERO, &done, kNearJump);
1608
1609 Register object_for_call = object;
1611 // Unlikely. Only non-graph intrinsics.
1612 // TODO(rmacnak): Shuffle registers in intrinsics.
1613 pushq(kWriteBarrierValueReg);
1614 if (object == kWriteBarrierValueReg) {
1617 object_for_call = (value == RBX) ? RCX : RBX;
1618 pushq(object_for_call);
1619 movq(object_for_call, object);
1620 }
1622 }
1623 generate_invoke_write_barrier_wrapper_(object_for_call);
1625 if (object == kWriteBarrierValueReg) {
1626 popq(object_for_call);
1627 }
1629 }
1630 Bind(&done);
1631}
1632
1633void Assembler::ArrayStoreBarrier(Register object,
1634 Register slot,
1636 CanBeSmi can_be_smi,
1637 Register scratch) {
1638 ASSERT(object != scratch);
1639 ASSERT(value != scratch);
1640 ASSERT(slot != scratch);
1641 ASSERT(scratch != kNoRegister);
1642
1643 // In parallel, test whether
1644 // - object is old and not remembered and value is new, or
1645 // - object is old and value is old and not marked and concurrent marking is
1646 // in progress
1647 // If so, call the WriteBarrier stub, which will either add object to the
1648 // store buffer (case 1) or add value to the marking stack (case 2).
1649 // Compare UntaggedObject::StorePointer.
1650 Label done;
1651 if (can_be_smi == kValueCanBeSmi) {
1652 BranchIfSmi(value, &done, kNearJump);
1653 } else {
1654#if defined(DEBUG)
1655 Label passed_check;
1656 BranchIfNotSmi(value, &passed_check, kNearJump);
1657 Breakpoint();
1658 Bind(&passed_check);
1659#endif
1660 }
1661 movb(ByteRegisterOf(scratch),
1662 FieldAddress(object, target::Object::tags_offset()));
1663 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1664 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1665 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1666 j(ZERO, &done, kNearJump);
1667
1668 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1669 (slot != kWriteBarrierSlotReg)) {
1670 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1671 // from StoreIndexInstr, which gets these exact registers from the register
1672 // allocator.
1673 UNIMPLEMENTED();
1674 }
1675
1676 generate_invoke_array_write_barrier_();
1677
1678 Bind(&done);
1679}
1680
1681void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1682 Register value) {
1683 // We can't assert the incremental barrier is not needed here, only the
1684 // generational barrier. We sometimes omit the write barrier when 'value' is
1685 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1686 // reachable via a constant pool, so it doesn't matter if it is not traced via
1687 // 'object'.
1688 Label done;
1689 BranchIfSmi(value, &done, kNearJump);
1690 testb(FieldAddress(value, target::Object::tags_offset()),
1691 Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
1692 j(ZERO, &done, Assembler::kNearJump);
1693 testb(FieldAddress(object, target::Object::tags_offset()),
1694 Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1695 j(ZERO, &done, Assembler::kNearJump);
1696 Stop("Write barrier is required");
1697 Bind(&done);
1698}
1699
1700void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1701 const Address& dest,
1702 const Object& value,
1703 MemoryOrder memory_order,
1704 OperandSize size) {
1705 if (memory_order == kRelease) {
1706 LoadObject(TMP, value);
1707 StoreIntoObjectNoBarrier(object, dest, TMP, memory_order, size);
1708 } else {
1709 StoreObject(dest, value, size);
1710 }
1711}
1712
1713void Assembler::StoreInternalPointer(Register object,
1714 const Address& dest,
1715 Register value) {
1716 movq(dest, value);
1717}
1718
1719void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
1720#if defined(DEBUG)
1721 Label done;
1722 testq(value, Immediate(kHeapObjectTag));
1723 j(ZERO, &done);
1724 Stop("New value must be Smi.");
1725 Bind(&done);
1726#endif // defined(DEBUG)
1727 movq(dest, value);
1728}
1729
1730void Assembler::ZeroInitSmiField(const Address& dest) {
1731 Immediate zero(target::ToRawSmi(0));
1732 movq(dest, zero);
1733}
1734
1735void Assembler::ZeroInitCompressedSmiField(const Address& dest) {
1736 Immediate zero(target::ToRawSmi(0));
1737 OBJ(mov)(dest, zero);
1738}
1739
1740void Assembler::IncrementCompressedSmiField(const Address& dest,
1741 int64_t increment) {
1742 // Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
1743 // the length of this instruction sequence.
1744 Immediate inc_imm(target::ToRawSmi(increment));
1745 OBJ(add)(dest, inc_imm);
1746}
1747
1748void Assembler::Bind(Label* label) {
1749 intptr_t bound = buffer_.Size();
1750 ASSERT(!label->IsBound()); // Labels can only be bound once.
1751 while (label->IsLinked()) {
1752 intptr_t position = label->LinkPosition();
1753 intptr_t next = buffer_.Load<int32_t>(position);
1754 buffer_.Store<int32_t>(position, bound - (position + 4));
1755 label->position_ = next;
1756 }
1757 while (label->HasNear()) {
1758 intptr_t position = label->NearPosition();
1759 intptr_t offset = bound - (position + 1);
1760 ASSERT(Utils::IsInt(8, offset));
1761 buffer_.Store<int8_t>(position, offset);
1762 }
1763 label->BindTo(bound);
1764}
1765
1766void Assembler::Load(Register reg, const Address& address, OperandSize sz) {
1767 switch (sz) {
1768 case kByte:
1769 return movsxb(reg, address);
1770 case kUnsignedByte:
1771 return movzxb(reg, address);
1772 case kTwoBytes:
1773 return movsxw(reg, address);
1774 case kUnsignedTwoBytes:
1775 return movzxw(reg, address);
1776 case kFourBytes:
1777 return movsxd(reg, address);
1778 case kUnsignedFourBytes:
1779 return movl(reg, address);
1780 case kEightBytes:
1781 return movq(reg, address);
1782 default:
1783 UNREACHABLE();
1784 break;
1785 }
1786}
1787
1788void Assembler::Store(Register reg, const Address& address, OperandSize sz) {
1789 switch (sz) {
1790 case kByte:
1791 case kUnsignedByte:
1792 return movb(address, ByteRegisterOf(reg));
1793 case kTwoBytes:
1794 case kUnsignedTwoBytes:
1795 return movw(address, reg);
1796 case kFourBytes:
1797 case kUnsignedFourBytes:
1798 return movl(address, reg);
1799 case kEightBytes:
1800 return movq(address, reg);
1801 default:
1802 UNREACHABLE();
1803 break;
1804 }
1805}
1806
1807void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1808 sarq(reg, Immediate(shift));
1809}
1810
1811void Assembler::CompareWords(Register reg1,
1812 Register reg2,
1813 intptr_t offset,
1815 Register temp,
1816 Label* equals) {
1817 Label loop;
1818 Bind(&loop);
1819 decq(count);
1820 j(LESS, equals, Assembler::kNearJump);
1822 movq(temp, FieldAddress(reg1, count, TIMES_8, offset));
1823 cmpq(temp, FieldAddress(reg2, count, TIMES_8, offset));
1824 BranchIf(EQUAL, &loop, Assembler::kNearJump);
1825}
1826
1827void Assembler::EnterFrame(intptr_t frame_size) {
1828 if (prologue_offset_ == -1) {
1829 prologue_offset_ = CodeSize();
1830 Comment("PrologueOffset = %" Pd "", CodeSize());
1831 }
1832#ifdef DEBUG
1833 intptr_t check_offset = CodeSize();
1834#endif
1835 pushq(RBP);
1836 movq(RBP, RSP);
1837#ifdef DEBUG
1838 ProloguePattern pp(CodeAddress(check_offset));
1839 ASSERT(pp.IsValid());
1840#endif
1841 if (frame_size != 0) {
1842 Immediate frame_space(frame_size);
1843 subq(RSP, frame_space);
1844 }
1845}
1846
1847void Assembler::LeaveFrame() {
1848 movq(RSP, RBP);
1849 popq(RBP);
1850}
1851
1852void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1853 // Reserve space for arguments and align frame before entering
1854 // the C++ world.
1855 if (frame_space != 0) {
1856 subq(RSP, Immediate(frame_space));
1857 }
1858 if (OS::ActivationFrameAlignment() > 1) {
1859 andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1860 }
1861}
1862
1863void Assembler::EmitEntryFrameVerification() {
1864#if defined(DEBUG)
1865 Label ok;
1866 leaq(RAX, Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
1868 cmpq(RAX, RSP);
1869 j(EQUAL, &ok);
1870 Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
1871 Bind(&ok);
1872#endif
1873}
1874
1875void Assembler::PushRegisters(const RegisterSet& register_set) {
1876 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1877 if (xmm_regs_count > 0) {
1878 AddImmediate(RSP, Immediate(-xmm_regs_count * kFpuRegisterSize));
1879 // Store XMM registers with the lowest register number at the lowest
1880 // address.
1881 intptr_t offset = 0;
1882 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1883 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1884 if (register_set.ContainsFpuRegister(xmm_reg)) {
1885 movups(Address(RSP, offset), xmm_reg);
1887 }
1888 }
1889 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1890 }
1891
1892 // The order in which the registers are pushed must match the order
1893 // in which the registers are encoded in the safe point's stack map.
1894 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1895 Register reg = static_cast<Register>(i);
1896 if (register_set.ContainsRegister(reg)) {
1897 pushq(reg);
1898 }
1899 }
1900}
1901
1902void Assembler::PopRegisters(const RegisterSet& register_set) {
1903 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1904 Register reg = static_cast<Register>(i);
1905 if (register_set.ContainsRegister(reg)) {
1906 popq(reg);
1907 }
1908 }
1909
1910 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1911 if (xmm_regs_count > 0) {
1912 // XMM registers have the lowest register number at the lowest address.
1913 intptr_t offset = 0;
1914 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
1915 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
1916 if (register_set.ContainsFpuRegister(xmm_reg)) {
1917 movups(xmm_reg, Address(RSP, offset));
1919 }
1920 }
1921 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
1922 AddImmediate(RSP, Immediate(offset));
1923 }
1924}
1925
1926void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
1927 for (Register reg : regs) {
1928 PushRegister(reg);
1929 }
1930}
1931
1932static const RegisterSet kVolatileRegisterSet(
1933 CallingConventions::kVolatileCpuRegisters,
1934 CallingConventions::kVolatileXmmRegisters);
1935
1936void Assembler::CallCFunction(Register reg, bool restore_rsp) {
1937 // Reserve shadow space for outgoing arguments.
1938 if (CallingConventions::kShadowSpaceBytes != 0) {
1939 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1940 }
1941 call(reg);
1942 // Restore stack.
1943 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1944 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1945 }
1946}
1947void Assembler::CallCFunction(Address address, bool restore_rsp) {
1948 // Reserve shadow space for outgoing arguments.
1949 if (CallingConventions::kShadowSpaceBytes != 0) {
1950 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1951 }
1952 call(address);
1953 // Restore stack.
1954 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1955 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1956 }
1957}
1958
1959void Assembler::CallRuntime(const RuntimeEntry& entry,
1960 intptr_t argument_count) {
1961 ASSERT(!entry.is_leaf());
1962 // Argument count is not checked here, but in the runtime entry for a more
1963 // informative error message.
1964 movq(RBX, compiler::Address(THR, entry.OffsetFromThread()));
1965 LoadImmediate(R10, compiler::Immediate(argument_count));
1966 call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1967}
1968
1969#define __ assembler_->
1970
1971LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1972 intptr_t frame_size,
1973 bool preserve_registers)
1974 : assembler_(assembler), preserve_registers_(preserve_registers) {
1975 __ Comment("EnterCallRuntimeFrame");
1976 __ EnterFrame(0);
1977
1978 if (preserve_registers_) {
1979 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
1980 __ PushRegisters(kVolatileRegisterSet);
1981 } else {
1982 // These registers must always be preserved.
1986 }
1987
1988 __ ReserveAlignedFrameSpace(frame_size);
1989}
1990
1991void LeafRuntimeScope::Call(const RuntimeEntry& entry,
1992 intptr_t argument_count) {
1993 ASSERT(entry.is_leaf());
1994 ASSERT(entry.argument_count() == argument_count);
1995 COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
1996 __ movq(RAX, compiler::Address(THR, entry.OffsetFromThread()));
1997 __ movq(compiler::Assembler::VMTagAddress(), RAX);
1998 __ CallCFunction(RAX);
1999 __ movq(compiler::Assembler::VMTagAddress(),
2000 compiler::Immediate(VMTag::kDartTagId));
2001}
2002
2003LeafRuntimeScope::~LeafRuntimeScope() {
2004 if (preserve_registers_) {
2005 // RSP might have been modified to reserve space for arguments
2006 // and ensure proper alignment of the stack frame.
2007 // We need to restore it before restoring registers.
2008 const intptr_t kPushedCpuRegistersCount =
2009 RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
2010 const intptr_t kPushedXmmRegistersCount =
2011 RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
2012 const intptr_t kPushedRegistersSize =
2013 kPushedCpuRegistersCount * target::kWordSize +
2014 kPushedXmmRegistersCount * kFpuRegisterSize;
2015
2016 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2017
2018 // TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
2019 __ PopRegisters(kVolatileRegisterSet);
2020 } else {
2021 const intptr_t kPushedRegistersSize =
2023 target::kWordSize; // From EnterStubFrame (excluding PC / FP)
2024 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2025 }
2026
2027 __ LeaveFrame();
2028}
2029
2030void Assembler::TsanLoadAcquire(Address addr) {
2031 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2033 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
2034}
2035
2036void Assembler::TsanStoreRelease(Address addr) {
2037 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2039 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
2040}
2041
2042void Assembler::RestoreCodePointer() {
2043 movq(CODE_REG,
2044 Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
2045}
2046
2047void Assembler::LoadPoolPointer(Register pp) {
2048 // Load new pool pointer.
2049 CheckCodePointer();
2050 movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
2051 set_constant_pool_allowed(pp == PP);
2052}
2053
2054void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
2055 ASSERT(!constant_pool_allowed());
2056 EnterFrame(0);
2057 if (!FLAG_precompiled_mode) {
2058 pushq(CODE_REG);
2059 pushq(PP);
2060 if (new_pp == kNoRegister) {
2061 LoadPoolPointer(PP);
2062 } else {
2063 movq(PP, new_pp);
2064 }
2065 }
2066 set_constant_pool_allowed(true);
2067 if (frame_size != 0) {
2068 subq(RSP, Immediate(frame_size));
2069 }
2070}
2071
2072void Assembler::LeaveDartFrame() {
2073 // Restore caller's PP register that was pushed in EnterDartFrame.
2074 if (!FLAG_precompiled_mode) {
2075 movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
2077 }
2078 set_constant_pool_allowed(false);
2079 LeaveFrame();
2080}
2081
2082void Assembler::CheckCodePointer() {
2083#ifdef DEBUG
2084 if (!FLAG_check_code_pointer) {
2085 return;
2086 }
2087 Comment("CheckCodePointer");
2088 Label cid_ok, instructions_ok;
2089 pushq(RAX);
2090 LoadClassId(RAX, CODE_REG);
2091 cmpq(RAX, Immediate(kCodeCid));
2092 j(EQUAL, &cid_ok);
2093 int3();
2094 Bind(&cid_ok);
2095 {
2096 const intptr_t kRIPRelativeLeaqSize = 7;
2097 const intptr_t header_to_entry_offset =
2099 const intptr_t header_to_rip_offset =
2100 CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
2101 leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
2102 ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
2103 }
2104 cmpq(RAX, FieldAddress(CODE_REG, target::Code::instructions_offset()));
2105 j(EQUAL, &instructions_ok);
2106 int3();
2107 Bind(&instructions_ok);
2108 popq(RAX);
2109#endif
2110}
2111
2112// On entry to a function compiled for OSR, the caller's frame pointer, the
2113// stack locals, and any copied parameters are already in place. The frame
2114// pointer is already set up. The PC marker is not correct for the
2115// optimized function and there may be extra space for spill slots to
2116// allocate.
2117void Assembler::EnterOsrFrame(intptr_t extra_size) {
2118 ASSERT(!constant_pool_allowed());
2119 if (prologue_offset_ == -1) {
2120 Comment("PrologueOffset = %" Pd "", CodeSize());
2121 prologue_offset_ = CodeSize();
2122 }
2123 RestoreCodePointer();
2124 LoadPoolPointer();
2125
2126 if (extra_size != 0) {
2127 subq(RSP, Immediate(extra_size));
2128 }
2129}
2130
2131void Assembler::EnterStubFrame() {
2132 EnterDartFrame(0, kNoRegister);
2133}
2134
2135void Assembler::LeaveStubFrame() {
2136 LeaveDartFrame();
2137}
2138
2139void Assembler::EnterCFrame(intptr_t frame_space) {
2140 // Already saved.
2143
2144 EnterFrame(0);
2145 ReserveAlignedFrameSpace(frame_space);
2146}
2147
2148void Assembler::LeaveCFrame() {
2149 LeaveFrame();
2150}
2151
2152// RDX receiver, RBX ICData entries array
2153// Preserve R10 (ARGS_DESC_REG), not required today, but maybe later.
2154void Assembler::MonomorphicCheckedEntryJIT() {
2155 has_monomorphic_entry_ = true;
2156 intptr_t start = CodeSize();
2157 Label have_cid, miss;
2158 Bind(&miss);
2159 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2160
2161 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
2162 // store them in ICData / MegamorphicCache arrays)
2163 nop(1);
2164
2165 Comment("MonomorphicCheckedEntry");
2166 ASSERT_EQUAL(CodeSize() - start,
2167 target::Instructions::kMonomorphicEntryOffsetJIT);
2168 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2169
2170 const intptr_t cid_offset = target::Array::element_offset(0);
2171 const intptr_t count_offset = target::Array::element_offset(1);
2172
2173 LoadTaggedClassIdMayBeSmi(RAX, RDX);
2174
2175 OBJ(cmp)(RAX, FieldAddress(RBX, cid_offset));
2176 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2177 OBJ(add)(FieldAddress(RBX, count_offset), Immediate(target::ToRawSmi(1)));
2178 xorq(R10, R10); // GC-safe for OptimizeInvokedFunction.
2179#if defined(DART_COMPRESSED_POINTERS)
2180 nop(4);
2181#else
2182 nop(1);
2183#endif
2184
2185 // Fall through to unchecked entry.
2186 ASSERT_EQUAL(CodeSize() - start,
2187 target::Instructions::kPolymorphicEntryOffsetJIT);
2188 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
2189}
2190
2191// RBX - input: class id smi
2192// RDX - input: receiver object
2193void Assembler::MonomorphicCheckedEntryAOT() {
2194 has_monomorphic_entry_ = true;
2195 intptr_t start = CodeSize();
2196 Label have_cid, miss;
2197 Bind(&miss);
2198 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2199
2200 // Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
2201 // store them in ICData / MegamorphicCache arrays)
2202 nop(1);
2203
2204 Comment("MonomorphicCheckedEntry");
2205 ASSERT_EQUAL(CodeSize() - start,
2206 target::Instructions::kMonomorphicEntryOffsetAOT);
2207 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2208
2209 SmiUntag(RBX);
2210 LoadClassId(RAX, RDX);
2211 cmpq(RAX, RBX);
2212 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2213
2214 // Ensure the unchecked entry is 2-byte aligned (so GC can see them if we
2215 // store them in ICData / MegamorphicCache arrays).
2216#if defined(DART_COMPRESSED_POINTERS)
2217 nop(1);
2218#endif
2219
2220 // Fall through to unchecked entry.
2221 ASSERT_EQUAL(CodeSize() - start,
2222 target::Instructions::kPolymorphicEntryOffsetAOT);
2223 ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
2224}
2225
2226void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
2227 has_monomorphic_entry_ = true;
2228 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
2229 int3();
2230 }
2231 jmp(label);
2232 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
2233 int3();
2234 }
2235}
2236
2238 // hash += other_hash
2239 addl(dst, other);
2240 // hash += hash << 10
2241 movl(other, dst);
2242 shll(other, Immediate(10));
2243 addl(dst, other);
2244 // hash ^= hash >> 6
2245 movl(other, dst);
2246 shrl(other, Immediate(6));
2247 xorl(dst, other);
2248}
2249
2250void Assembler::FinalizeHashForSize(intptr_t bit_size,
2251 Register dst,
2252 Register scratch) {
2253 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
2254 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
2255 // reasonably expect that the returned values fill the entire bit space.
2256 ASSERT(bit_size <= kBitsPerInt32);
2257 ASSERT(scratch != kNoRegister);
2258 // hash += hash << 3;
2259 movl(scratch, dst);
2260 shll(scratch, Immediate(3));
2261 addl(dst, scratch);
2262 // hash ^= hash >> 11; // Logical shift, unsigned hash.
2263 movl(scratch, dst);
2264 shrl(scratch, Immediate(11));
2265 xorl(dst, scratch);
2266 // hash += hash << 15;
2267 movl(scratch, dst);
2268 shll(scratch, Immediate(15));
2269 addl(dst, scratch);
2270 // Size to fit.
2271 if (bit_size < kBitsPerInt32) {
2272 andl(dst, Immediate(Utils::NBitMask(bit_size)));
2273 }
2274 // return (hash == 0) ? 1 : hash;
2275 Label done;
2276 j(NOT_ZERO, &done, kNearJump);
2277 incl(dst);
2278 Bind(&done);
2279}
2280
2281#ifndef PRODUCT
2282void Assembler::MaybeTraceAllocation(Register cid,
2283 Label* trace,
2284 Register temp_reg,
2285 JumpDistance distance) {
2286 if (temp_reg == kNoRegister) {
2287 temp_reg = TMP;
2288 }
2289 ASSERT(temp_reg != cid);
2290 LoadIsolateGroup(temp_reg);
2291 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2292
2293 movq(temp_reg,
2294 Address(temp_reg,
2295 target::ClassTable::allocation_tracing_state_table_offset()));
2296 cmpb(Address(temp_reg, cid, TIMES_1,
2297 target::ClassTable::AllocationTracingStateSlotOffsetFor(0)),
2298 Immediate(0));
2299 // We are tracing for this class, jump to the trace label which will use
2300 // the allocation stub.
2301 j(NOT_ZERO, trace, distance);
2302}
2303
2304void Assembler::MaybeTraceAllocation(intptr_t cid,
2305 Label* trace,
2306 Register temp_reg,
2307 JumpDistance distance) {
2308 ASSERT(cid > 0);
2309
2310 if (temp_reg == kNoRegister) {
2311 temp_reg = TMP;
2312 }
2313 LoadIsolateGroup(temp_reg);
2314 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2315 movq(temp_reg,
2316 Address(temp_reg,
2317 target::ClassTable::allocation_tracing_state_table_offset()));
2318 cmpb(Address(temp_reg,
2319 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid)),
2320 Immediate(0));
2321 // We are tracing for this class, jump to the trace label which will use
2322 // the allocation stub.
2323 j(NOT_ZERO, trace, distance);
2324}
2325#endif // !PRODUCT
2326
2327void Assembler::TryAllocateObject(intptr_t cid,
2328 intptr_t instance_size,
2329 Label* failure,
2330 JumpDistance distance,
2331 Register instance_reg,
2332 Register temp_reg) {
2333 ASSERT(failure != nullptr);
2334 ASSERT(instance_size != 0);
2335 ASSERT(Utils::IsAligned(instance_size,
2337 if (FLAG_inline_alloc &&
2339 // If this allocation is traced, program will jump to failure path
2340 // (i.e. the allocation stub) which will allocate the object and trace the
2341 // allocation call site.
2342 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
2343 movq(instance_reg, Address(THR, target::Thread::top_offset()));
2344 addq(instance_reg, Immediate(instance_size));
2345 // instance_reg: potential next object start.
2346 cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
2347 j(ABOVE_EQUAL, failure, distance);
2348 CheckAllocationCanary(instance_reg);
2349 // Successfully allocated the object, now update top to point to
2350 // next object start and store the class in the class field of object.
2351 movq(Address(THR, target::Thread::top_offset()), instance_reg);
2352 ASSERT(instance_size >= kHeapObjectTag);
2353 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
2354 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2355 MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
2356 Immediate(tags));
2357 } else {
2358 jmp(failure);
2359 }
2360}
2361
2362void Assembler::TryAllocateArray(intptr_t cid,
2363 intptr_t instance_size,
2364 Label* failure,
2365 JumpDistance distance,
2367 Register end_address,
2368 Register temp) {
2369 ASSERT(failure != nullptr);
2370 if (FLAG_inline_alloc &&
2372 // If this allocation is traced, program will jump to failure path
2373 // (i.e. the allocation stub) which will allocate the object and trace the
2374 // allocation call site.
2375 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp, distance));
2376 movq(instance, Address(THR, target::Thread::top_offset()));
2377 movq(end_address, instance);
2378
2379 addq(end_address, Immediate(instance_size));
2380 j(CARRY, failure);
2381
2382 // Check if the allocation fits into the remaining space.
2383 // instance: potential new object start.
2384 // end_address: potential next object start.
2385 cmpq(end_address, Address(THR, target::Thread::end_offset()));
2386 j(ABOVE_EQUAL, failure);
2387 CheckAllocationCanary(instance);
2388
2389 // Successfully allocated the object(s), now update top to point to
2390 // next object start and initialize the object.
2391 movq(Address(THR, target::Thread::top_offset()), end_address);
2392 addq(instance, Immediate(kHeapObjectTag));
2393
2394 // Initialize the tags.
2395 // instance: new object start as a tagged pointer.
2396 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2397 movq(FieldAddress(instance, target::Object::tags_offset()),
2398 Immediate(tags));
2399 } else {
2400 jmp(failure);
2401 }
2402}
2403
2404void Assembler::CopyMemoryWords(Register src,
2405 Register dst,
2406 Register size,
2407 Register temp) {
2408 // This loop is equivalent to
2409 // shrq(size, Immediate(target::kWordSizeLog2));
2410 // rep_movsq()
2411 // but shows better performance on certain micro-benchmarks.
2412 Label loop, done;
2413 cmpq(size, Immediate(0));
2414 j(EQUAL, &done, kNearJump);
2415 Bind(&loop);
2416 movq(temp, Address(src, 0));
2417 addq(src, Immediate(target::kWordSize));
2418 movq(Address(dst, 0), temp);
2419 addq(dst, Immediate(target::kWordSize));
2420 subq(size, Immediate(target::kWordSize));
2421 j(NOT_ZERO, &loop, kNearJump);
2422 Bind(&done);
2423}
2424
2425void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2426 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2427 buffer_.Emit<uint8_t>(0xe8);
2428 buffer_.Emit<int32_t>(0);
2429
2430 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2431 PcRelativeCallPattern::kLengthInBytes);
2432 pattern.set_distance(offset_into_target);
2433}
2434
2435void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2436 intptr_t offset_into_target) {
2437 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2438 buffer_.Emit<uint8_t>(0xe9);
2439 buffer_.Emit<int32_t>(0);
2440
2441 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2442 PcRelativeCallPattern::kLengthInBytes);
2443 pattern.set_distance(offset_into_target);
2444}
2445
2446void Assembler::Align(int alignment, intptr_t offset) {
2447 ASSERT(Utils::IsPowerOfTwo(alignment));
2448 intptr_t pos = offset + buffer_.GetPosition();
2449 int mod = pos & (alignment - 1);
2450 if (mod == 0) {
2451 return;
2452 }
2453 intptr_t bytes_needed = alignment - mod;
2454 while (bytes_needed > MAX_NOP_SIZE) {
2455 nop(MAX_NOP_SIZE);
2457 }
2458 if (bytes_needed != 0) {
2459 nop(bytes_needed);
2460 }
2461 ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
2462}
2463
2464void Assembler::EmitOperand(int rm, const Operand& operand) {
2465 ASSERT(rm >= 0 && rm < 8);
2466 const intptr_t length = operand.length_;
2467 ASSERT(length > 0);
2468 // Emit the ModRM byte updated with the given RM value.
2469 ASSERT((operand.encoding_[0] & 0x38) == 0);
2470 EmitUint8(operand.encoding_[0] + (rm << 3));
2471 // Emit the rest of the encoded operand.
2472 for (intptr_t i = 1; i < length; i++) {
2473 EmitUint8(operand.encoding_[i]);
2474 }
2475}
2476
2477void Assembler::EmitRegisterOperand(int rm, int reg) {
2478 Operand operand;
2479 operand.SetModRM(3, static_cast<Register>(reg));
2480 EmitOperand(rm, operand);
2481}
2482
2483void Assembler::EmitImmediate(const Immediate& imm) {
2484 if (imm.is_int32()) {
2485 EmitInt32(static_cast<int32_t>(imm.value()));
2486 } else {
2487 EmitInt64(imm.value());
2488 }
2489}
2490
2491void Assembler::EmitSignExtendedInt8(int rm,
2492 const Operand& operand,
2493 const Immediate& immediate) {
2494 EmitUint8(0x83);
2495 EmitOperand(rm, operand);
2496 EmitUint8(immediate.value() & 0xFF);
2497}
2498
2499void Assembler::EmitComplex(int rm,
2500 const Operand& operand,
2501 const Immediate& immediate) {
2502 ASSERT(rm >= 0 && rm < 8);
2503 ASSERT(immediate.is_int32());
2504 if (immediate.is_int8()) {
2505 EmitSignExtendedInt8(rm, operand, immediate);
2506 } else if (operand.IsRegister(RAX)) {
2507 // Use short form if the destination is rax.
2508 EmitUint8(0x05 + (rm << 3));
2509 EmitImmediate(immediate);
2510 } else {
2511 EmitUint8(0x81);
2512 EmitOperand(rm, operand);
2513 EmitImmediate(immediate);
2514 }
2515}
2516
2517void Assembler::EmitLabel(Label* label, intptr_t instruction_size) {
2518 if (label->IsBound()) {
2519 intptr_t offset = label->Position() - buffer_.Size();
2520 ASSERT(offset <= 0);
2521 EmitInt32(offset - instruction_size);
2522 } else {
2523 EmitLabelLink(label);
2524 }
2525}
2526
2527void Assembler::EmitLabelLink(Label* label) {
2528 ASSERT(!label->IsBound());
2529 intptr_t position = buffer_.Size();
2530 EmitInt32(label->position_);
2531 label->LinkTo(position);
2532}
2533
2534void Assembler::EmitNearLabelLink(Label* label) {
2535 ASSERT(!label->IsBound());
2536 intptr_t position = buffer_.Size();
2537 EmitUint8(0);
2538 label->NearLinkTo(position);
2539}
2540
2541void Assembler::EmitGenericShift(bool wide,
2542 int rm,
2543 Register reg,
2544 const Immediate& imm) {
2545 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2546 ASSERT(imm.is_int8());
2547 if (wide) {
2548 EmitRegisterREX(reg, REX_W);
2549 } else {
2550 EmitRegisterREX(reg, REX_NONE);
2551 }
2552 if (imm.value() == 1) {
2553 EmitUint8(0xD1);
2554 EmitOperand(rm, Operand(reg));
2555 } else {
2556 EmitUint8(0xC1);
2557 EmitOperand(rm, Operand(reg));
2558 EmitUint8(imm.value() & 0xFF);
2559 }
2560}
2561
2562void Assembler::EmitGenericShift(bool wide,
2563 int rm,
2564 Register operand,
2565 Register shifter) {
2566 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2567 ASSERT(shifter == RCX);
2568 EmitRegisterREX(operand, wide ? REX_W : REX_NONE);
2569 EmitUint8(0xD3);
2570 EmitOperand(rm, Operand(operand));
2571}
2572
2573void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
2574 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2575 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2576 movl(result, tags);
2577 shrl(result, Immediate(12));
2578}
2579
2580void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
2581 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2582 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2583 movzxw(result, tags);
2584 shrl(result, Immediate(target::UntaggedObject::kSizeTagPos -
2586 AndImmediate(result,
2587 Immediate(Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2589}
2590
2591void Assembler::LoadClassId(Register result, Register object) {
2592 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2593 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2594 movl(result, FieldAddress(object, target::Object::tags_offset()));
2595 shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
2596}
2597
2598void Assembler::LoadClassById(Register result, Register class_id) {
2599 ASSERT(result != class_id);
2600 const intptr_t table_offset =
2601 target::IsolateGroup::cached_class_table_table_offset();
2602
2603 LoadIsolateGroup(result);
2604 movq(result, Address(result, table_offset));
2605 movq(result, Address(result, class_id, TIMES_8, 0));
2606}
2607
2608void Assembler::CompareClassId(Register object,
2609 intptr_t class_id,
2610 Register scratch) {
2611 LoadClassId(TMP, object);
2612 cmpl(TMP, Immediate(class_id));
2613}
2614
2615void Assembler::SmiUntagOrCheckClass(Register object,
2616 intptr_t class_id,
2617 Label* is_smi) {
2618#if !defined(DART_COMPRESSED_POINTERS)
2619 ASSERT(kSmiTagShift == 1);
2620 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2621 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2622 // Untag optimistically. Tag bit is shifted into the CARRY.
2623 SmiUntag(object);
2624 j(NOT_CARRY, is_smi, kNearJump);
2625 // Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
2626 // factor in the addressing mode to compensate for this.
2627 movl(TMP, Address(object, TIMES_2,
2628 target::Object::tags_offset() + kHeapObjectTag));
2629 shrl(TMP, Immediate(target::UntaggedObject::kClassIdTagPos));
2630 cmpl(TMP, Immediate(class_id));
2631#else
2632 // Cannot speculatively untag compressed Smis because it erases upper address
2633 // bits.
2634 UNREACHABLE();
2635#endif
2636}
2637
2638void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2639 Label smi;
2640
2641 if (result == object) {
2642 Label join;
2643
2644 testq(object, Immediate(kSmiTagMask));
2645 j(EQUAL, &smi, Assembler::kNearJump);
2646 LoadClassId(result, object);
2647 jmp(&join, Assembler::kNearJump);
2648
2649 Bind(&smi);
2650 movq(result, Immediate(kSmiCid));
2651
2652 Bind(&join);
2653 } else {
2654 testq(object, Immediate(kSmiTagMask));
2655 movq(result, Immediate(kSmiCid));
2656 j(EQUAL, &smi, Assembler::kNearJump);
2657 LoadClassId(result, object);
2658
2659 Bind(&smi);
2660 }
2661}
2662
2663void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2664 Label smi;
2665
2666 if (result == object) {
2667 Label join;
2668
2669 testq(object, Immediate(kSmiTagMask));
2670 j(EQUAL, &smi, Assembler::kNearJump);
2671 LoadClassId(result, object);
2672 SmiTag(result);
2673 jmp(&join, Assembler::kNearJump);
2674
2675 Bind(&smi);
2676 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2677
2678 Bind(&join);
2679 } else {
2680 testq(object, Immediate(kSmiTagMask));
2681 movq(result, Immediate(target::ToRawSmi(kSmiCid)));
2682 j(EQUAL, &smi, Assembler::kNearJump);
2683 LoadClassId(result, object);
2684 SmiTag(result);
2685
2686 Bind(&smi);
2687 }
2688}
2689
2690void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2691 Register src,
2692 Register scratch,
2693 bool can_be_null) {
2694#if defined(DEBUG)
2695 Comment("Check that object in register has cid %" Pd "", cid);
2696 Label matches;
2697 LoadClassIdMayBeSmi(scratch, src);
2698 CompareImmediate(scratch, cid);
2699 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2700 if (can_be_null) {
2701 CompareImmediate(scratch, kNullCid);
2702 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2703 }
2704 Breakpoint();
2705 Bind(&matches);
2706#endif
2707}
2708
2709Address Assembler::VMTagAddress() {
2710 return Address(THR, target::Thread::vm_tag_offset());
2711}
2712
2713bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
2714 bool is_external,
2715 intptr_t cid,
2716 intptr_t index_scale) {
2717 if (!IsSafeSmi(constant)) return false;
2718 const int64_t index = target::SmiValue(constant);
2719 const int64_t disp =
2720 index * index_scale +
2721 (is_external ? 0 : target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
2722 return Utils::IsInt(32, disp);
2723}
2724
2725Address Assembler::ElementAddressForIntIndex(bool is_external,
2726 intptr_t cid,
2727 intptr_t index_scale,
2728 Register array,
2729 intptr_t index) {
2730 if (is_external) {
2731 return Address(array, index * index_scale);
2732 } else {
2733 const int64_t disp = static_cast<int64_t>(index) * index_scale +
2734 target::Instance::DataOffsetFor(cid);
2735 ASSERT(Utils::IsInt(32, disp));
2736 return FieldAddress(array, static_cast<int32_t>(disp));
2737 }
2738}
2739
2740Address Assembler::ElementAddressForRegIndex(bool is_external,
2741 intptr_t cid,
2742 intptr_t index_scale,
2743 bool index_unboxed,
2744 Register array,
2745 Register index) {
2746 if (is_external) {
2747 return Address(array, index, ToScaleFactor(index_scale, index_unboxed), 0);
2748 } else {
2749 return FieldAddress(array, index, ToScaleFactor(index_scale, index_unboxed),
2750 target::Instance::DataOffsetFor(cid));
2751 }
2752}
2753
2754void Assembler::RangeCheck(Register value,
2755 Register temp,
2756 intptr_t low,
2757 intptr_t high,
2758 RangeCheckCondition condition,
2759 Label* target) {
2760 auto cc = condition == kIfInRange ? BELOW_EQUAL : ABOVE;
2761 Register to_check = value;
2762 if (temp != kNoRegister) {
2763 movq(temp, value);
2764 to_check = temp;
2765 }
2766 subq(to_check, Immediate(low));
2767 cmpq(to_check, Immediate(high - low));
2768 j(cc, target);
2769}
2770
2771} // namespace compiler
2772} // namespace dart
2773
2774#endif // defined(TARGET_ARCH_X64)
Align
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
SkPoint pos
static float next(float f)
static const double J
static bool ok(int result)
#define EQUAL(field)
bool equals(SkDrawable *a, SkDrawable *b)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define UNIMPLEMENTED
@ kNormal
Default priority level.
Definition: embedder.h:262
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
static bool b
struct MyStruct a[10]
#define FATAL(error)
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition: fuchsia.cc:52
size_t length
constexpr int kSize
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
bool IsOriginalObject(const Object &object)
Definition: runtime_api.cc:226
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
bool IsInOldSpace(const Object &obj)
Definition: runtime_api.cc:101
constexpr OperandSize kWordBytes
const Object & ToObject(const Code &handle)
Definition: runtime_api.h:173
static constexpr int HeaderSize
Definition: dart_vm.cc:33
constexpr int64_t kMinInt64
Definition: globals.h:485
const Register kWriteBarrierSlotReg
const Register THR
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
Definition: globals.h:482
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
@ kNullCid
Definition: class_id.h:252
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ NOT_CARRY
Definition: constants_x86.h:35
@ NOT_ZERO
@ BELOW_EQUAL
Definition: constants_x86.h:19
@ NOT_EQUAL
@ ABOVE_EQUAL
Definition: constants_x86.h:16
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
const Register TMP
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
const intptr_t cid
@ REX_PREFIX
@ REX_NONE
const int MAX_NOP_SIZE
constexpr intptr_t kWordSize
Definition: globals.h:509
const Register PP
const Register kArg1Reg
@ kNoByteRegister
QRegister FpuRegister
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Definition: constants.h:95
const Register SPREG
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
ByteRegister ByteRegisterOf(Register reg)
@ kNumberOfXmmRegisters
def call(args)
Definition: dom.py:159
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
static size_t bytes_needed(int vertex_count, Flags flags, int index_count)
Definition: dl_vertices.cc:23
def matches(file)
Definition: gen_manifest.py:38
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
dest
Definition: zip.py:79
#define Pd
Definition: globals.h:408
int32_t width
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
SeparatedVector2 offset
intptr_t dart_fixed_frame_size
Definition: frame_layout.h:40
#define NOT_IN_PRODUCT(code)
Definition: globals.h:84