Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_ARM64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
12#include "vm/cpu.h"
13#include "vm/instructions.h"
14#include "vm/simulator.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
23
24// For use by LR related macros (e.g. CLOBBERS_LR).
25#define __ this->
26
27namespace compiler {
28
31 switch (rep) {
32 case kUnboxedFloat:
33 return kSWord;
34 case kUnboxedDouble:
35 return kDWord;
36 case kUnboxedInt32x4:
37 case kUnboxedFloat32x4:
38 case kUnboxedFloat64x2:
39 return kQWord;
40 default:
42 }
43}
44
45Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
46 intptr_t far_branch_level)
47 : AssemblerBase(object_pool_builder),
48 use_far_branches_(far_branch_level != 0),
49 constant_pool_allowed_(false) {
50 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
51 Call(Address(THR,
52 target::Thread::write_barrier_wrappers_thread_offset(reg)));
53 };
54 generate_invoke_array_write_barrier_ = [&]() {
55 Call(
56 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
57 };
58}
59
60void Assembler::Emit(int32_t value) {
61 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
62 buffer_.Emit<int32_t>(value);
63}
64
65void Assembler::Emit64(int64_t value) {
66 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
67 buffer_.Emit<int64_t>(value);
68}
69
70int32_t Assembler::BindImm26Branch(int64_t position, int64_t dest) {
71 ASSERT(CanEncodeImm26BranchOffset(dest));
72 const int32_t next = buffer_.Load<int32_t>(position);
73 const int32_t encoded = EncodeImm26BranchOffset(dest, next);
74 buffer_.Store<int32_t>(position, encoded);
75 return DecodeImm26BranchOffset(next);
76}
77
78int32_t Assembler::BindImm19Branch(int64_t position, int64_t dest) {
79 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
80 // Far branches are enabled, and we can't encode the branch offset in
81 // 19 bits.
82
83 // Grab the guarding branch instruction.
84 const int32_t guard_branch =
85 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
86
87 // Grab the far branch instruction.
88 const int32_t far_branch =
89 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
90 const Condition c = DecodeImm19BranchCondition(guard_branch);
91 ASSERT(c != NV);
92
93 // Grab the link to the next branch.
94 const int32_t next = DecodeImm26BranchOffset(far_branch);
95
96 // dest is the offset is from the guarding branch instruction.
97 // Correct it to be from the following instruction.
98 const int64_t offset = dest - Instr::kInstrSize;
99
100 // Encode the branch.
101 const int32_t encoded_branch = EncodeImm26BranchOffset(offset, far_branch);
102
103 // Write the far branch into the buffer and link to the next branch.
104 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
105 return next;
106 } else if (use_far_branches() && CanEncodeImm19BranchOffset(dest)) {
107 // We assembled a far branch, but we don't need it. Replace it with a near
108 // branch.
109
110 // Grab the guarding branch instruction.
111 const int32_t guard_branch =
112 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
113
114 // Grab the far branch instruction.
115 const int32_t far_branch =
116 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
117
118 // Grab the link to the next branch.
119 const int32_t next = DecodeImm26BranchOffset(far_branch);
120
121 // Re-target the guarding branch and flip the conditional sense.
122 int32_t encoded_guard_branch = EncodeImm19BranchOffset(dest, guard_branch);
123 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch);
124 encoded_guard_branch =
125 EncodeImm19BranchCondition(InvertCondition(c), encoded_guard_branch);
126
127 // Write back the re-encoded instructions. The far branch becomes a nop.
128 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
129 encoded_guard_branch);
130 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
131 Instr::kNopInstruction);
132 return next;
133 } else {
134 const int32_t next = buffer_.Load<int32_t>(position);
135 const int32_t encoded = EncodeImm19BranchOffset(dest, next);
136 buffer_.Store<int32_t>(position, encoded);
137 return DecodeImm19BranchOffset(next);
138 }
139}
140
141int32_t Assembler::BindImm14Branch(int64_t position, int64_t dest) {
142 if (use_far_branches() && !CanEncodeImm14BranchOffset(dest)) {
143 // Far branches are enabled, and we can't encode the branch offset in
144 // 14 bits.
145
146 // Grab the guarding branch instruction.
147 const int32_t guard_branch =
148 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
149
150 // Grab the far branch instruction.
151 const int32_t far_branch =
152 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
153 const Condition c = DecodeImm14BranchCondition(guard_branch);
154 ASSERT(c != NV);
155
156 // Grab the link to the next branch.
157 const int32_t next = DecodeImm26BranchOffset(far_branch);
158
159 // dest is the offset is from the guarding branch instruction.
160 // Correct it to be from the following instruction.
161 const int64_t offset = dest - Instr::kInstrSize;
162
163 // Encode the branch.
164 const int32_t encoded_branch = EncodeImm26BranchOffset(offset, far_branch);
165
166 // Write the far branch into the buffer and link to the next branch.
167 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
168 return next;
169 } else if (use_far_branches() && CanEncodeImm14BranchOffset(dest)) {
170 // We assembled a far branch, but we don't need it. Replace it with a near
171 // branch.
172
173 // Grab the guarding branch instruction.
174 const int32_t guard_branch =
175 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
176
177 // Grab the far branch instruction.
178 const int32_t far_branch =
179 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
180
181 // Grab the link to the next branch.
182 const int32_t next = DecodeImm26BranchOffset(far_branch);
183
184 // Re-target the guarding branch and flip the conditional sense.
185 int32_t encoded_guard_branch = EncodeImm14BranchOffset(dest, guard_branch);
186 const Condition c = DecodeImm14BranchCondition(encoded_guard_branch);
187 encoded_guard_branch =
188 EncodeImm14BranchCondition(InvertCondition(c), encoded_guard_branch);
189
190 // Write back the re-encoded instructions. The far branch becomes a nop.
191 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
192 encoded_guard_branch);
193 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
194 Instr::kNopInstruction);
195 return next;
196 } else {
197 const int32_t next = buffer_.Load<int32_t>(position);
198 const int32_t encoded = EncodeImm14BranchOffset(dest, next);
199 buffer_.Store<int32_t>(position, encoded);
200 return DecodeImm14BranchOffset(next);
201 }
202}
203
204void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
205 switch (sz) {
206 case kEightBytes:
207 if (rd == rn) return; // No operation needed.
208 return mov(rd, rn);
210 return uxtw(rd, rn);
211 case kFourBytes:
212 return sxtw(rd, rn);
214 return uxth(rd, rn);
215 case kTwoBytes:
216 return sxth(rd, rn);
217 case kUnsignedByte:
218 return uxtb(rd, rn);
219 case kByte:
220 return sxtb(rd, rn);
221 default:
223 break;
224 }
225}
226
227// Equivalent to left rotate of kSmiTagSize.
228static constexpr intptr_t kBFMTagRotate = kBitsPerInt64 - kSmiTagSize;
229
230void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
231 switch (sz) {
232 case kEightBytes:
233 return sbfm(rd, rn, kBFMTagRotate, target::kSmiBits + 1);
235 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
236 case kFourBytes:
237 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
239 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
240 case kTwoBytes:
241 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
242 case kUnsignedByte:
243 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
244 case kByte:
245 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
246 default:
248 break;
249 }
250}
251
252void Assembler::Bind(Label* label) {
253 ASSERT(!label->IsBound());
254 const intptr_t bound_pc = buffer_.Size();
255
256 while (label->IsLinked()) {
257 const int64_t position = label->Position();
258 const int64_t dest = bound_pc - position;
259 const int32_t instr = buffer_.Load<int32_t>(position);
260 if (IsTestAndBranch(instr)) {
261 label->position_ = BindImm14Branch(position, dest);
262 } else if (IsConditionalBranch(instr) || IsCompareAndBranch(instr)) {
263 label->position_ = BindImm19Branch(position, dest);
264 } else if (IsUnconditionalBranch(instr)) {
265 label->position_ = BindImm26Branch(position, dest);
266 } else {
267 UNREACHABLE();
268 }
269 }
270 label->BindTo(bound_pc, lr_state());
271}
272
273void Assembler::Align(intptr_t alignment, intptr_t offset) {
274 ASSERT(Utils::IsPowerOfTwo(alignment));
275 intptr_t pos = offset + buffer_.GetPosition();
276 intptr_t mod = pos & (alignment - 1);
277 if (mod == 0) {
278 return;
279 }
280 intptr_t bytes_needed = alignment - mod;
281 ASSERT((bytes_needed % Instr::kInstrSize) == 0);
282 while (bytes_needed > 0) {
283 nop();
284 bytes_needed -= Instr::kInstrSize;
285 }
286 ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
287}
288
289#if defined(TARGET_USES_THREAD_SANITIZER)
290void Assembler::TsanLoadAcquire(Register addr) {
291 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
292 MoveRegister(R0, addr);
293 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
294}
295
296void Assembler::TsanStoreRelease(Register addr) {
297 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
298 MoveRegister(R0, addr);
299 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
300}
301#endif
302
303static int CountLeadingZeros(uint64_t value, int width) {
304 if (width == 64) return Utils::CountLeadingZeros64(value);
305 if (width == 32) return Utils::CountLeadingZeros32(value);
306 UNREACHABLE();
307 return 0;
308}
309
310static int CountOneBits(uint64_t value, int width) {
311 // Mask out unused bits to ensure that they are not counted.
312 value &= (0xffffffffffffffffULL >> (64 - width));
313
314 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
315 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
316 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
317 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
318 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
319 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
320
321 return value;
322}
323
324// Test if a given value can be encoded in the immediate field of a logical
325// instruction.
326// If it can be encoded, the function returns true, and values pointed to by n,
327// imm_s and imm_r are updated with immediates encoded in the format required
328// by the corresponding fields in the logical instruction.
329// If it can't be encoded, the function returns false, and the operand is
330// undefined.
331bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) {
332 ASSERT(imm_op != nullptr);
333 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
334 if (width == kWRegSizeInBits) {
335 value &= 0xffffffffUL;
336 }
337 uint8_t n = 0;
338 uint8_t imm_s = 0;
339 uint8_t imm_r = 0;
340
341 // Logical immediates are encoded using parameters n, imm_s and imm_r using
342 // the following table:
343 //
344 // N imms immr size S R
345 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
346 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
347 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
348 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
349 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
350 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
351 // (s bits must not be all set)
352 //
353 // A pattern is constructed of size bits, where the least significant S+1
354 // bits are set. The pattern is rotated right by R, and repeated across a
355 // 32 or 64-bit value, depending on destination register width.
356 //
357 // To test if an arbitrary immediate can be encoded using this scheme, an
358 // iterative algorithm is used.
359
360 // 1. If the value has all set or all clear bits, it can't be encoded.
361 if ((value == 0) || (value == 0xffffffffffffffffULL) ||
362 ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
363 return false;
364 }
365
366 int lead_zero = CountLeadingZeros(value, width);
367 int lead_one = CountLeadingZeros(~value, width);
368 int trail_zero = Utils::CountTrailingZerosWord(value);
369 int trail_one = Utils::CountTrailingZerosWord(~value);
370 int set_bits = CountOneBits(value, width);
371
372 // The fixed bits in the immediate s field.
373 // If width == 64 (X reg), start at 0xFFFFFF80.
374 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
375 // widths won't be executed.
376 int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
377 int imm_s_mask = 0x3F;
378
379 for (;;) {
380 // 2. If the value is two bits wide, it can be encoded.
381 if (width == 2) {
382 n = 0;
383 imm_s = 0x3C;
384 imm_r = (value & 3) - 1;
385 *imm_op = Operand(n, imm_s, imm_r);
386 return true;
387 }
388
389 n = (width == 64) ? 1 : 0;
390 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
391 if ((lead_zero + set_bits) == width) {
392 imm_r = 0;
393 } else {
394 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
395 }
396
397 // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
398 // the bit width of the value, it can be encoded.
399 if (lead_zero + trail_zero + set_bits == width) {
400 *imm_op = Operand(n, imm_s, imm_r);
401 return true;
402 }
403
404 // 4. If the sum of leading ones, trailing ones and unset bits in the
405 // value is equal to the bit width of the value, it can be encoded.
406 if (lead_one + trail_one + (width - set_bits) == width) {
407 *imm_op = Operand(n, imm_s, imm_r);
408 return true;
409 }
410
411 // 5. If the most-significant half of the bitwise value is equal to the
412 // least-significant half, return to step 2 using the least-significant
413 // half of the value.
414 uint64_t mask = (1ULL << (width >> 1)) - 1;
415 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
416 width >>= 1;
417 set_bits >>= 1;
418 imm_s_fixed >>= 1;
419 continue;
420 }
421
422 // 6. Otherwise, the value can't be encoded.
423 return false;
424 }
425}
426
427void Assembler::LoadPoolPointer(Register pp) {
428 CheckCodePointer();
429 ldr(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
430
431 // When in the PP register, the pool pointer is untagged. When we
432 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
433 // then untags when restoring from the stack. This will make loading from the
434 // object pool only one instruction for the first 4096 entries. Otherwise,
435 // because the offset wouldn't be aligned, it would be only one instruction
436 // for the first 64 entries.
437 sub(pp, pp, Operand(kHeapObjectTag));
438 set_constant_pool_allowed(pp == PP);
439}
440
441void Assembler::LoadWordFromPoolIndex(Register dst,
442 intptr_t index,
443 Register pp) {
444 ASSERT((pp != PP) || constant_pool_allowed());
445 ASSERT(dst != pp);
446 Operand op;
447 // PP is _un_tagged on ARM64.
448 const uint32_t offset = target::ObjectPool::element_offset(index);
449 const uint32_t upper20 = offset & 0xfffff000;
450 if (Address::CanHoldOffset(offset)) {
451 ldr(dst, Address(pp, offset));
452 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
453 Operand::Immediate) {
454 const uint32_t lower12 = offset & 0x00000fff;
455 ASSERT(Address::CanHoldOffset(lower12));
456 add(dst, pp, op);
457 ldr(dst, Address(dst, lower12));
458 } else {
459 const uint16_t offset_low = Utils::Low16Bits(offset);
460 const uint16_t offset_high = Utils::High16Bits(offset);
461 movz(dst, Immediate(offset_low), 0);
462 movk(dst, Immediate(offset_high), 1);
463 ldr(dst, Address(pp, dst));
464 }
465}
466
467void Assembler::StoreWordToPoolIndex(Register src,
468 intptr_t index,
469 Register pp) {
470 ASSERT((pp != PP) || constant_pool_allowed());
471 ASSERT(src != pp);
472 Operand op;
473 // PP is _un_tagged on ARM64.
474 const uint32_t offset = target::ObjectPool::element_offset(index);
475 const uint32_t upper20 = offset & 0xfffff000;
476 if (Address::CanHoldOffset(offset)) {
477 str(src, Address(pp, offset));
478 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
479 Operand::Immediate) {
480 const uint32_t lower12 = offset & 0x00000fff;
481 ASSERT(Address::CanHoldOffset(lower12));
482 add(TMP, pp, op);
483 str(src, Address(TMP, lower12));
484 } else {
485 const uint16_t offset_low = Utils::Low16Bits(offset);
486 const uint16_t offset_high = Utils::High16Bits(offset);
487 movz(TMP, Immediate(offset_low), 0);
488 movk(TMP, Immediate(offset_high), 1);
489 str(src, Address(pp, TMP));
490 }
491}
492
493void Assembler::LoadDoubleWordFromPoolIndex(Register lower,
494 Register upper,
495 intptr_t index) {
496 // This implementation needs to be kept in sync with
497 // [InstructionPattern::DecodeLoadDoubleWordFromPool].
498 ASSERT(constant_pool_allowed());
499 ASSERT(lower != PP && upper != PP);
500
501 Operand op;
502 // PP is _un_tagged on ARM64.
503 const uint32_t offset = target::ObjectPool::element_offset(index);
504 ASSERT(offset < (1 << 24));
505 const uint32_t upper20 = offset & 0xfffff000;
506 const uint32_t lower12 = offset & 0x00000fff;
507 if (Address::CanHoldOffset(offset, Address::PairOffset)) {
508 ldp(lower, upper, Address(PP, offset, Address::PairOffset));
509 } else if (Operand::CanHold(offset, kXRegSizeInBits, &op) ==
510 Operand::Immediate) {
511 add(TMP, PP, op);
512 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
513 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
514 Operand::Immediate &&
515 Address::CanHoldOffset(lower12, Address::PairOffset)) {
516 add(TMP, PP, op);
517 ldp(lower, upper, Address(TMP, lower12, Address::PairOffset));
518 } else {
519 const uint32_t lower12 = offset & 0xfff;
520 const uint32_t higher12 = offset & 0xfff000;
521
522 Operand op_high, op_low;
523 bool ok = Operand::CanHold(higher12, kXRegSizeInBits, &op_high) ==
524 Operand::Immediate &&
525 Operand::CanHold(lower12, kXRegSizeInBits, &op_low) ==
526 Operand::Immediate;
528
529 add(TMP, PP, op_high);
530 add(TMP, TMP, op_low);
531 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
532 }
533}
534
535bool Assembler::CanLoadFromObjectPool(const Object& object) const {
536 ASSERT(IsOriginalObject(object));
537 if (!constant_pool_allowed()) {
538 return false;
539 }
540
541 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
542 ASSERT(IsInOldSpace(object));
543 return true;
544}
545
546void Assembler::LoadNativeEntry(
547 Register dst,
548 const ExternalLabel* label,
549 ObjectPoolBuilderEntry::Patchability patchable) {
550 const intptr_t index =
551 object_pool_builder().FindNativeFunction(label, patchable);
552 LoadWordFromPoolIndex(dst, index);
553}
554
555void Assembler::LoadIsolate(Register dst) {
556 ldr(dst, Address(THR, target::Thread::isolate_offset()));
557}
558
559void Assembler::LoadIsolateGroup(Register rd) {
560 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
561}
562
563void Assembler::LoadObjectHelper(Register dst,
564 const Object& object,
565 bool is_unique) {
566 ASSERT(IsOriginalObject(object));
567 // `is_unique == true` effectively means object has to be patchable.
568 // (even if the object is null)
569 if (!is_unique) {
570 if (IsSameObject(compiler::NullObject(), object)) {
571 mov(dst, NULL_REG);
572 return;
573 }
574 if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
575 AddImmediate(dst, NULL_REG, kTrueOffsetFromNull);
576 return;
577 }
578 if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
579 AddImmediate(dst, NULL_REG, kFalseOffsetFromNull);
580 return;
581 }
582 word offset = 0;
583 if (target::CanLoadFromThread(object, &offset)) {
584 ldr(dst, Address(THR, offset));
585 return;
586 }
587 if (target::IsSmi(object)) {
588 LoadImmediate(dst, target::ToRawSmi(object));
589 return;
590 }
591 }
592 RELEASE_ASSERT(CanLoadFromObjectPool(object));
593 const intptr_t index =
594 is_unique ? object_pool_builder().AddObject(
595 object, ObjectPoolBuilderEntry::kPatchable)
596 : object_pool_builder().FindObject(
597 object, ObjectPoolBuilderEntry::kNotPatchable);
598 LoadWordFromPoolIndex(dst, index);
599}
600
601void Assembler::LoadObject(Register dst, const Object& object) {
602 LoadObjectHelper(dst, object, false);
603}
604
605void Assembler::LoadUniqueObject(Register dst, const Object& object) {
606 LoadObjectHelper(dst, object, true);
607}
608
609void Assembler::LoadFromStack(Register dst, intptr_t depth) {
610 ASSERT(depth >= 0);
611 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
612}
613
614void Assembler::StoreToStack(Register src, intptr_t depth) {
615 ASSERT(depth >= 0);
616 StoreToOffset(src, SPREG, depth * target::kWordSize);
617}
618
619void Assembler::CompareToStack(Register src, intptr_t depth) {
620 LoadFromStack(TMP, depth);
621 CompareRegisters(src, TMP);
622}
623
624void Assembler::CompareObject(Register reg, const Object& object) {
625 ASSERT(IsOriginalObject(object));
626 if (IsSameObject(compiler::NullObject(), object)) {
627 CompareObjectRegisters(reg, NULL_REG);
628 } else if (target::IsSmi(object)) {
629 CompareImmediate(reg, target::ToRawSmi(object), kObjectBytes);
630 } else {
631 LoadObject(TMP, object);
632 CompareObjectRegisters(reg, TMP);
633 }
634}
635
636void Assembler::LoadImmediate(Register reg, int64_t imm) {
637 // Is it 0?
638 if (imm == 0) {
639 movz(reg, Immediate(0), 0);
640 return;
641 }
642
643 // Can we use one orri operation?
644 Operand op;
645 Operand::OperandType ot;
646 ot = Operand::CanHold(imm, kXRegSizeInBits, &op);
647 if (ot == Operand::BitfieldImm) {
648 orri(reg, ZR, Immediate(imm));
649 return;
650 }
651
652 // We may fall back on movz, movk, movn.
653 const uint32_t w0 = Utils::Low32Bits(imm);
654 const uint32_t w1 = Utils::High32Bits(imm);
655 const uint16_t h0 = Utils::Low16Bits(w0);
656 const uint16_t h1 = Utils::High16Bits(w0);
657 const uint16_t h2 = Utils::Low16Bits(w1);
658 const uint16_t h3 = Utils::High16Bits(w1);
659
660 // Special case for w1 == 0xffffffff
661 if (w1 == 0xffffffff) {
662 if (h1 == 0xffff) {
663 movn(reg, Immediate(~h0), 0);
664 } else {
665 movn(reg, Immediate(~h1), 1);
666 movk(reg, Immediate(h0), 0);
667 }
668 return;
669 }
670
671 // Special case for h3 == 0xffff
672 if (h3 == 0xffff) {
673 // We know h2 != 0xffff.
674 movn(reg, Immediate(~h2), 2);
675 if (h1 != 0xffff) {
676 movk(reg, Immediate(h1), 1);
677 }
678 if (h0 != 0xffff) {
679 movk(reg, Immediate(h0), 0);
680 }
681 return;
682 }
683
684 // Use constant pool if allowed, unless we can load imm with 2 instructions.
685 if ((w1 != 0) && constant_pool_allowed()) {
686 const intptr_t index = object_pool_builder().FindImmediate(imm);
687 LoadWordFromPoolIndex(reg, index);
688 return;
689 }
690
691 bool initialized = false;
692 if (h0 != 0) {
693 movz(reg, Immediate(h0), 0);
694 initialized = true;
695 }
696 if (h1 != 0) {
697 if (initialized) {
698 movk(reg, Immediate(h1), 1);
699 } else {
700 movz(reg, Immediate(h1), 1);
701 initialized = true;
702 }
703 }
704 if (h2 != 0) {
705 if (initialized) {
706 movk(reg, Immediate(h2), 2);
707 } else {
708 movz(reg, Immediate(h2), 2);
709 initialized = true;
710 }
711 }
712 if (h3 != 0) {
713 if (initialized) {
714 movk(reg, Immediate(h3), 3);
715 } else {
716 movz(reg, Immediate(h3), 3);
717 }
718 }
719}
720
721void Assembler::LoadSImmediate(VRegister vd, float imms) {
722 int32_t imm32 = bit_cast<int32_t, float>(imms);
723 if (imm32 == 0) {
724 veor(vd, vd, vd);
725 } else if (constant_pool_allowed()) {
726 intptr_t index = object_pool_builder().FindImmediate(imm32);
727 intptr_t offset = target::ObjectPool::element_offset(index);
728 LoadSFromOffset(vd, PP, offset);
729 } else {
730 LoadImmediate(TMP, imm32);
731 fmovsr(vd, TMP);
732 }
733}
734
735void Assembler::LoadDImmediate(VRegister vd, double immd) {
736 if (fmovdi(vd, immd)) return;
737
738 int64_t imm64 = bit_cast<int64_t, double>(immd);
739 if (imm64 == 0) {
740 veor(vd, vd, vd);
741 } else if (constant_pool_allowed()) {
742 intptr_t index = object_pool_builder().FindImmediate64(imm64);
743 intptr_t offset = target::ObjectPool::element_offset(index);
744 LoadDFromOffset(vd, PP, offset);
745 } else {
746 LoadImmediate(TMP, imm64);
747 fmovdr(vd, TMP);
748 }
749}
750
751void Assembler::LoadQImmediate(VRegister vd, simd128_value_t immq) {
752 ASSERT(constant_pool_allowed());
753 intptr_t index = object_pool_builder().FindImmediate128(immq);
754 intptr_t offset = target::ObjectPool::element_offset(index);
755 LoadQFromOffset(vd, PP, offset);
756}
757
758void Assembler::BranchLink(intptr_t target_code_pool_index,
759 CodeEntryKind entry_kind) {
760 CLOBBERS_LR({
761 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
762 // We don't actually use CODE_REG in the callee and caller might
763 // be using CODE_REG for a live value (e.g. a value that is alive
764 // across invocation of a shared stub like the one we use for
765 // allocating Mint boxes).
766 const Register code_reg = FLAG_precompiled_mode ? LR : CODE_REG;
767 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
768 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
769 });
770}
771
772void Assembler::BranchLink(
773 const Code& target,
774 ObjectPoolBuilderEntry::Patchability patchable,
775 CodeEntryKind entry_kind,
776 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
777 const intptr_t index = object_pool_builder().FindObject(
778 ToObject(target), patchable, snapshot_behavior);
779 BranchLink(index, entry_kind);
780}
781
782void Assembler::BranchLinkWithEquivalence(const Code& target,
783 const Object& equivalence,
784 CodeEntryKind entry_kind) {
785 const intptr_t index =
786 object_pool_builder().FindObject(ToObject(target), equivalence);
787 BranchLink(index, entry_kind);
788}
789
790void Assembler::AddImmediate(Register dest,
791 Register rn,
792 int64_t imm,
793 OperandSize sz) {
794 ASSERT(sz == kEightBytes || sz == kFourBytes);
796 Operand op;
797 if (imm == 0) {
798 if (dest != rn) {
799 mov(dest, rn);
800 }
801 return;
802 }
803 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
804 add(dest, rn, op, sz);
805 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
806 Operand::Immediate) {
807 sub(dest, rn, op, sz);
808 } else {
809 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
810 ASSERT(rn != TMP2);
811 LoadImmediate(TMP2, imm);
812 add(dest, rn, Operand(TMP2), sz);
813 }
814}
815
816void Assembler::AddImmediateSetFlags(Register dest,
817 Register rn,
818 int64_t imm,
819 OperandSize sz) {
820 ASSERT(sz == kEightBytes || sz == kFourBytes);
822 Operand op;
823 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
824 // Handles imm == kMinInt64.
825 adds(dest, rn, op, sz);
826 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
827 Operand::Immediate) {
828 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
829 subs(dest, rn, op, sz);
830 } else {
831 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
832 ASSERT(rn != TMP2);
833 LoadImmediate(TMP2, imm);
834 adds(dest, rn, Operand(TMP2), sz);
835 }
836}
837
838void Assembler::SubImmediateSetFlags(Register dest,
839 Register rn,
840 int64_t imm,
841 OperandSize sz) {
842 ASSERT(sz == kEightBytes || sz == kFourBytes);
844 Operand op;
845 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
846 // Handles imm == kMinInt64.
847 subs(dest, rn, op, sz);
848 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
849 Operand::Immediate) {
850 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
851 adds(dest, rn, op, sz);
852 } else {
853 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits.
854 ASSERT(rn != TMP2);
855 LoadImmediate(TMP2, imm);
856 subs(dest, rn, Operand(TMP2), sz);
857 }
858}
859
860void Assembler::AndImmediate(Register rd,
861 Register rn,
862 int64_t imm,
863 OperandSize sz) {
864 ASSERT(sz == kEightBytes || sz == kFourBytes);
866 Operand imm_op;
867 if (Operand::IsImmLogical(imm, width, &imm_op)) {
868 andi(rd, rn, Immediate(imm), sz);
869 } else {
870 LoadImmediate(TMP, imm);
871 and_(rd, rn, Operand(TMP), sz);
872 }
873}
874
875void Assembler::OrImmediate(Register rd,
876 Register rn,
877 int64_t imm,
878 OperandSize sz) {
879 ASSERT(sz == kEightBytes || sz == kFourBytes);
881 Operand imm_op;
882 if (Operand::IsImmLogical(imm, width, &imm_op)) {
883 orri(rd, rn, Immediate(imm), sz);
884 } else {
885 LoadImmediate(TMP, imm);
886 orr(rd, rn, Operand(TMP), sz);
887 }
888}
889
890void Assembler::XorImmediate(Register rd,
891 Register rn,
892 int64_t imm,
893 OperandSize sz) {
894 ASSERT(sz == kEightBytes || sz == kFourBytes);
896 Operand imm_op;
897 if (Operand::IsImmLogical(imm, width, &imm_op)) {
898 eori(rd, rn, Immediate(imm), sz);
899 } else {
900 LoadImmediate(TMP, imm);
901 eor(rd, rn, Operand(TMP), sz);
902 }
903}
904
905void Assembler::TestImmediate(Register rn, int64_t imm, OperandSize sz) {
906 ASSERT(sz == kEightBytes || sz == kFourBytes);
908 Operand imm_op;
909 if (Operand::IsImmLogical(imm, width, &imm_op)) {
910 tsti(rn, Immediate(imm), sz);
911 } else {
912 LoadImmediate(TMP, imm);
913 tst(rn, Operand(TMP), sz);
914 }
915}
916
917void Assembler::CompareImmediate(Register rn, int64_t imm, OperandSize sz) {
918 ASSERT(sz == kEightBytes || sz == kFourBytes);
920 Operand op;
921 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
922 cmp(rn, op, sz);
923 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
924 Operand::Immediate) {
925 cmn(rn, op, sz);
926 } else {
927 ASSERT(rn != TMP2);
928 LoadImmediate(TMP2, imm);
929 cmp(rn, Operand(TMP2), sz);
930 }
931}
932
933Address Assembler::PrepareLargeOffset(Register base,
934 int32_t offset,
935 OperandSize sz,
936 Address::AddressType addr_type) {
937 ASSERT(addr_type == Address::AddressType::Offset ||
938 addr_type == Address::AddressType::PairOffset);
939 if (Address::CanHoldOffset(offset, addr_type, sz)) {
940 return Address(base, offset, addr_type);
941 }
942 ASSERT(base != TMP2);
943 Operand op;
944 const uint32_t upper20 = offset & 0xfffff000;
945 const uint32_t lower12 = offset & 0x00000fff;
946 if ((base != CSP) &&
947 (Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
948 Address::CanHoldOffset(lower12, addr_type, sz)) {
949 add(TMP2, base, op);
950 return Address(TMP2, lower12, addr_type);
951 }
952 LoadImmediate(TMP2, offset);
953 if (addr_type == Address::AddressType::Offset) {
954 return Address(base, TMP2);
955 } else {
956 add(TMP2, TMP2, Operand(base));
957 return Address(TMP2, 0, Address::AddressType::PairOffset);
958 }
959}
960
961void Assembler::Load(Register dst, const Address& addr, OperandSize sz) {
962 if (addr.type() == Address::AddressType::Offset ||
963 addr.type() == Address::AddressType::PairOffset) {
964 ldr(dst, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()),
965 sz);
966 } else {
967 // Pass the address through unchanged.
968 ldr(dst, addr, sz);
969 }
970}
971
972void Assembler::LoadSFromOffset(VRegister dest, Register base, int32_t offset) {
973 auto const type = Address::AddressType::Offset;
974 fldrs(dest, PrepareLargeOffset(base, offset, kSWord, type));
975}
976
977void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) {
978 auto const type = Address::AddressType::Offset;
979 fldrd(dest, PrepareLargeOffset(base, offset, kDWord, type));
980}
981
982void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
983 auto const type = Address::AddressType::Offset;
984 fldrq(dest, PrepareLargeOffset(base, offset, kQWord, type));
985}
986
987void Assembler::Store(Register src, const Address& addr, OperandSize sz) {
988 if (addr.type() == Address::AddressType::Offset ||
989 addr.type() == Address::AddressType::PairOffset) {
990 str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()),
991 sz);
992 } else {
993 // Pass the address through unchanged.
994 str(src, addr, sz);
995 }
996}
997
998void Assembler::StorePairToOffset(Register low,
999 Register high,
1000 Register base,
1001 int32_t offset,
1002 OperandSize sz) {
1003 auto const type = Address::AddressType::PairOffset;
1004 stp(low, high, PrepareLargeOffset(base, offset, sz, type), sz);
1005}
1006
1007void Assembler::StoreSToOffset(VRegister src, Register base, int32_t offset) {
1008 auto const type = Address::AddressType::Offset;
1009 fstrs(src, PrepareLargeOffset(base, offset, kSWord, type));
1010}
1011
1012void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) {
1013 auto const type = Address::AddressType::Offset;
1014 fstrd(src, PrepareLargeOffset(base, offset, kDWord, type));
1015}
1016
1017void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) {
1018 auto const type = Address::AddressType::Offset;
1019 fstrq(src, PrepareLargeOffset(base, offset, kQWord, type));
1020}
1021
1022void Assembler::VRecps(VRegister vd, VRegister vn) {
1023 ASSERT(vn != VTMP);
1024 ASSERT(vd != VTMP);
1025
1026 // Reciprocal estimate.
1027 vrecpes(vd, vn);
1028 // 2 Newton-Raphson steps.
1029 vrecpss(VTMP, vn, vd);
1030 vmuls(vd, vd, VTMP);
1031 vrecpss(VTMP, vn, vd);
1032 vmuls(vd, vd, VTMP);
1033}
1034
1035void Assembler::VRSqrts(VRegister vd, VRegister vn) {
1036 ASSERT(vd != VTMP);
1037 ASSERT(vn != VTMP);
1038
1039 // Reciprocal square root estimate.
1040 vrsqrtes(vd, vn);
1041 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2.
1042 // First step.
1043 vmuls(VTMP, vd, vd); // VTMP <- xn^2
1044 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2.
1045 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP
1046 // Second step.
1047 vmuls(VTMP, vd, vd);
1048 vrsqrtss(VTMP, vn, VTMP);
1049 vmuls(vd, vd, VTMP);
1050}
1051
1052#if defined(DART_COMPRESSED_POINTERS)
1053void Assembler::LoadCompressed(Register dest, const Address& slot) {
1054 Load(dest, slot, kUnsignedFourBytes); // Zero-extension.
1055 add(dest, dest, Operand(HEAP_BITS, LSL, 32));
1056}
1057#endif
1058
1059void Assembler::StoreBarrier(Register object,
1060 Register value,
1061 CanBeSmi can_be_smi,
1062 Register scratch) {
1063 const bool spill_lr = lr_state().LRContainsReturnAddress();
1064 // x.slot = x. Barrier should have be removed at the IL level.
1065 ASSERT(object != value);
1066 ASSERT(object != scratch);
1067 ASSERT(value != scratch);
1068 ASSERT(object != LINK_REGISTER);
1069 ASSERT(value != LINK_REGISTER);
1070 ASSERT(scratch != LINK_REGISTER);
1071 ASSERT(object != TMP2);
1072 ASSERT(value != TMP2);
1073 ASSERT(scratch != TMP2);
1074 ASSERT(scratch != kNoRegister);
1075
1076 // In parallel, test whether
1077 // - object is old and not remembered and value is new, or
1078 // - object is old and value is old and not marked and concurrent marking is
1079 // in progress
1080 // If so, call the WriteBarrier stub, which will either add object to the
1081 // store buffer (case 1) or add value to the marking stack (case 2).
1082 // Compare UntaggedObject::StorePointer.
1083 Label done;
1084 if (can_be_smi == kValueCanBeSmi) {
1085 BranchIfSmi(value, &done);
1086 } else {
1087#if defined(DEBUG)
1088 Label passed_check;
1089 BranchIfNotSmi(value, &passed_check, kNearJump);
1090 Breakpoint();
1091 Bind(&passed_check);
1092#endif
1093 }
1094 ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
1095 kUnsignedByte);
1096 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1097 and_(scratch, TMP2,
1098 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1099 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1100 b(&done, ZERO);
1101
1102 if (spill_lr) {
1103 SPILLS_LR_TO_FRAME(Push(LR));
1104 }
1105 Register objectForCall = object;
1106 if (value != kWriteBarrierValueReg) {
1107 // Unlikely. Only non-graph intrinsics.
1108 // TODO(rmacnak): Shuffle registers in intrinsics.
1109 if (object != kWriteBarrierValueReg) {
1110 Push(kWriteBarrierValueReg);
1111 } else {
1112 COMPILE_ASSERT(R2 != kWriteBarrierValueReg);
1113 COMPILE_ASSERT(R3 != kWriteBarrierValueReg);
1114 objectForCall = (value == R2) ? R3 : R2;
1115 PushPair(kWriteBarrierValueReg, objectForCall);
1116 mov(objectForCall, object);
1117 }
1118 mov(kWriteBarrierValueReg, value);
1119 }
1120
1121 generate_invoke_write_barrier_wrapper_(objectForCall);
1122
1123 if (value != kWriteBarrierValueReg) {
1124 if (object != kWriteBarrierValueReg) {
1125 Pop(kWriteBarrierValueReg);
1126 } else {
1127 PopPair(kWriteBarrierValueReg, objectForCall);
1128 }
1129 }
1130 if (spill_lr) {
1131 RESTORES_LR_FROM_FRAME(Pop(LR));
1132 }
1133 Bind(&done);
1134}
1135
1136void Assembler::ArrayStoreBarrier(Register object,
1137 Register slot,
1138 Register value,
1139 CanBeSmi can_be_smi,
1140 Register scratch) {
1141 const bool spill_lr = lr_state().LRContainsReturnAddress();
1142 ASSERT(object != slot);
1143 ASSERT(object != value);
1144 ASSERT(object != scratch);
1145 ASSERT(slot != value);
1146 ASSERT(slot != scratch);
1147 ASSERT(value != scratch);
1148 ASSERT(object != LINK_REGISTER);
1149 ASSERT(slot != LINK_REGISTER);
1150 ASSERT(value != LINK_REGISTER);
1151 ASSERT(scratch != LINK_REGISTER);
1152 ASSERT(object != TMP2);
1153 ASSERT(slot != TMP2);
1154 ASSERT(value != TMP2);
1155 ASSERT(scratch != TMP2);
1156 ASSERT(scratch != kNoRegister);
1157
1158 // In parallel, test whether
1159 // - object is old and not remembered and value is new, or
1160 // - object is old and value is old and not marked and concurrent marking is
1161 // in progress
1162 // If so, call the WriteBarrier stub, which will either add object to the
1163 // store buffer (case 1) or add value to the marking stack (case 2).
1164 // Compare UntaggedObject::StorePointer.
1165 Label done;
1166 if (can_be_smi == kValueCanBeSmi) {
1167 BranchIfSmi(value, &done);
1168 } else {
1169#if defined(DEBUG)
1170 Label passed_check;
1171 BranchIfNotSmi(value, &passed_check, kNearJump);
1172 Breakpoint();
1173 Bind(&passed_check);
1174#endif
1175 }
1176 ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
1177 kUnsignedByte);
1178 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1179 and_(scratch, TMP2,
1180 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1181 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1182 b(&done, ZERO);
1183 if (spill_lr) {
1184 SPILLS_LR_TO_FRAME(Push(LR));
1185 }
1186 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1187 (slot != kWriteBarrierSlotReg)) {
1188 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1189 // from StoreIndexInstr, which gets these exact registers from the register
1190 // allocator.
1191 UNIMPLEMENTED();
1192 }
1193 generate_invoke_array_write_barrier_();
1194 if (spill_lr) {
1195 RESTORES_LR_FROM_FRAME(Pop(LR));
1196 }
1197 Bind(&done);
1198}
1199
1200void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1201 const Address& address,
1202 const Object& value,
1203 MemoryOrder memory_order,
1204 OperandSize size) {
1205 ASSERT(IsOriginalObject(value));
1206 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
1208 if (IsSameObject(compiler::NullObject(), value)) {
1209 src = NULL_REG;
1210 } else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
1211 src = ZR;
1212 } else {
1213 // Store uses TMP2 when the address cannot be fully contained in the
1214 // instruction, so TMP is safe to use as a scratch register here.
1215 src = TMP;
1216 ASSERT(object != src);
1217 LoadObject(src, value);
1218 }
1219 if (memory_order == kRelease) {
1220 StoreRelease(src, address, size);
1221 } else {
1222 Store(src, address, size);
1223 }
1224}
1225
1226void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1227 Register value) {
1228 // We can't assert the incremental barrier is not needed here, only the
1229 // generational barrier. We sometimes omit the write barrier when 'value' is
1230 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1231 // reachable via a constant pool, so it doesn't matter if it is not traced via
1232 // 'object'.
1233 Label done;
1234 BranchIfSmi(value, &done, kNearJump);
1235 ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1236 tbz(&done, TMP, target::UntaggedObject::kNewBit);
1237 ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
1238 tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
1239 Stop("Write barrier is required");
1240 Bind(&done);
1241}
1242
1243void Assembler::StoreInternalPointer(Register object,
1244 const Address& dest,
1245 Register value) {
1246 str(value, dest);
1247}
1248
1249void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
1250 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1251 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
1252 ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
1253 target::UntaggedObject::kClassIdTagSize);
1254}
1255
1256void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
1257 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
1258 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
1259 ubfx(result, tags, target::UntaggedObject::kSizeTagPos,
1260 target::UntaggedObject::kSizeTagSize);
1261 LslImmediate(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
1262}
1263
1264void Assembler::LoadClassId(Register result, Register object) {
1265 ldr(result, FieldAddress(object, target::Object::tags_offset()));
1266 ExtractClassIdFromTags(result, result);
1267}
1268
1269void Assembler::LoadClassById(Register result, Register class_id) {
1270 ASSERT(result != class_id);
1271
1272 const intptr_t table_offset =
1273 target::IsolateGroup::cached_class_table_table_offset();
1274
1275 LoadIsolateGroup(result);
1276 LoadFromOffset(result, result, table_offset);
1277 ldr(result, Address(result, class_id, UXTX, Address::Scaled));
1278}
1279
1280void Assembler::CompareClassId(Register object,
1281 intptr_t class_id,
1282 Register scratch) {
1283 LoadClassId(TMP, object);
1284 CompareImmediate(TMP, class_id);
1285}
1286
1287void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
1288 ASSERT(result != object);
1289 Label done;
1290 LoadImmediate(result, kSmiCid);
1291 BranchIfSmi(object, &done);
1292 LoadClassId(result, object);
1293 Bind(&done);
1294}
1295
1296void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
1297 if (result == object) {
1298 LoadClassIdMayBeSmi(TMP, object);
1299 SmiTag(result, TMP);
1300 } else {
1301 Label done;
1302 LoadImmediate(result, target::ToRawSmi(kSmiCid));
1303 BranchIfSmi(object, &done);
1304 LoadClassId(result, object);
1305 SmiTag(result);
1306 Bind(&done);
1307 }
1308}
1309
1310void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
1311 Register src,
1312 Register scratch,
1313 bool can_be_null) {
1314#if defined(DEBUG)
1315 Comment("Check that object in register has cid %" Pd "", cid);
1316 Label matches;
1317 LoadClassIdMayBeSmi(scratch, src);
1318 CompareImmediate(scratch, cid);
1319 BranchIf(EQUAL, &matches, Assembler::kNearJump);
1320 if (can_be_null) {
1321 CompareImmediate(scratch, kNullCid);
1322 BranchIf(EQUAL, &matches, Assembler::kNearJump);
1323 }
1324 Breakpoint();
1325 Bind(&matches);
1326#endif
1327}
1328
1329// Frame entry and exit.
1330void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1331 // Reserve space for arguments and align frame before entering
1332 // the C++ world.
1333 if (frame_space != 0) {
1334 AddImmediate(SP, -frame_space);
1335 }
1336 if (OS::ActivationFrameAlignment() > 1) {
1337 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1338 }
1339}
1340
1341void Assembler::EmitEntryFrameVerification() {
1342#if defined(DEBUG)
1343 Label done;
1344 ASSERT(!constant_pool_allowed());
1345 LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
1346 target::kWordSize);
1347 add(TMP, TMP, Operand(FPREG));
1348 cmp(TMP, Operand(SPREG));
1349 b(&done, EQ);
1350
1351 Breakpoint();
1352
1353 Bind(&done);
1354#endif
1355}
1356
1357void Assembler::RestoreCodePointer() {
1358 ldr(CODE_REG,
1359 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1360 CheckCodePointer();
1361}
1362
1363void Assembler::RestorePinnedRegisters() {
1364 ldr(HEAP_BITS,
1365 compiler::Address(THR, target::Thread::write_barrier_mask_offset()));
1366 LslImmediate(HEAP_BITS, HEAP_BITS, 32);
1367 ldr(NULL_REG, compiler::Address(THR, target::Thread::object_null_offset()));
1368#if defined(DART_COMPRESSED_POINTERS)
1369 ldr(TMP, compiler::Address(THR, target::Thread::heap_base_offset()));
1370 orr(HEAP_BITS, HEAP_BITS, Operand(TMP, LSR, 32));
1371#endif
1372}
1373
1374void Assembler::SetupGlobalPoolAndDispatchTable() {
1375 ASSERT(FLAG_precompiled_mode);
1376 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1377 sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
1378 ldr(DISPATCH_TABLE_REG,
1379 Address(THR, target::Thread::dispatch_table_array_offset()));
1380}
1381
1382void Assembler::CheckCodePointer() {
1383#ifdef DEBUG
1384 if (!FLAG_check_code_pointer) {
1385 return;
1386 }
1387 Comment("CheckCodePointer");
1388 Label cid_ok, instructions_ok;
1389 Push(R0);
1390 CompareClassId(CODE_REG, kCodeCid);
1391 b(&cid_ok, EQ);
1392 brk(0);
1393 Bind(&cid_ok);
1394
1395 const intptr_t entry_offset =
1396 CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
1397 adr(R0, Immediate(-entry_offset));
1398 ldr(TMP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1399 cmp(R0, Operand(TMP));
1400 b(&instructions_ok, EQ);
1401 brk(1);
1402 Bind(&instructions_ok);
1403 Pop(R0);
1404#endif
1405}
1406
1407// The ARM64 ABI requires at all times
1408// - stack limit < CSP <= stack base
1409// - CSP mod 16 = 0
1410// - we do not access stack memory below CSP
1411// Practically, this means we need to keep the C stack pointer ahead of the
1412// Dart stack pointer and 16-byte aligned for signal handlers. We set
1413// CSP to a value near the stack limit during SetupDartSP*, and use a different
1414// register within our generated code to avoid the alignment requirement.
1415// Note that Fuchsia does not have signal handlers.
1416
1417void Assembler::SetupDartSP(intptr_t reserve /* = 4096 */) {
1418 mov(SP, CSP);
1419 // The caller doesn't have a Thread available. Just kick CSP forward a bit.
1420 AddImmediate(CSP, CSP, -Utils::RoundUp(reserve, 16));
1421}
1422
1423void Assembler::SetupCSPFromThread(Register thr) {
1424 // Thread::saved_stack_limit_ is OSThread::overflow_stack_limit(), which is
1425 // OSThread::stack_limit() with some headroom. Set CSP a bit below this value
1426 // so that signal handlers won't stomp on the stack of Dart code that pushs a
1427 // bit past overflow_stack_limit before its next overflow check. (We build
1428 // frames before doing an overflow check.)
1429 ldr(TMP, Address(thr, target::Thread::saved_stack_limit_offset()));
1430 AddImmediate(CSP, TMP, -4096);
1431
1432 // TODO(47824): This will probably cause signal handlers on Windows to crash.
1433 // Windows requires the stack to grow in order, one page at a time, but
1434 // pushing CSP to near the stack limit likely skips over many pages.
1435}
1436
1437void Assembler::RestoreCSP() {
1438 mov(CSP, SP);
1439}
1440
1441void Assembler::SetReturnAddress(Register value) {
1442 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
1443}
1444
1445void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1446 AsrImmediate(reg, reg, shift);
1447}
1448
1449void Assembler::CompareWords(Register reg1,
1450 Register reg2,
1451 intptr_t offset,
1452 Register count,
1453 Register temp,
1454 Label* equals) {
1455 Label loop;
1456
1457 AddImmediate(reg1, offset - kHeapObjectTag);
1458 AddImmediate(reg2, offset - kHeapObjectTag);
1459
1460 COMPILE_ASSERT(target::kWordSize == 8);
1461 Bind(&loop);
1462 BranchIfZero(count, equals, Assembler::kNearJump);
1463 AddImmediate(count, -1);
1464 ldr(temp, Address(reg1, 8, Address::PostIndex));
1465 ldr(TMP, Address(reg2, 8, Address::PostIndex));
1466 cmp(temp, Operand(TMP));
1467 BranchIf(EQUAL, &loop, Assembler::kNearJump);
1468}
1469
1470void Assembler::EnterFrame(intptr_t frame_size) {
1471 SPILLS_LR_TO_FRAME(PushPair(FP, LR)); // low: FP, high: LR.
1472 mov(FP, SP);
1473
1474 if (frame_size > 0) {
1475 sub(SP, SP, Operand(frame_size));
1476 }
1477}
1478
1479void Assembler::LeaveFrame() {
1480 mov(SP, FP);
1481 RESTORES_LR_FROM_FRAME(PopPair(FP, LR)); // low: FP, high: LR.
1482}
1483
1484void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
1485 ASSERT(!constant_pool_allowed());
1486 // Setup the frame.
1487 EnterFrame(0);
1488
1489 if (!FLAG_precompiled_mode) {
1490 TagAndPushPPAndPcMarker(); // Save PP and PC marker.
1491
1492 // Load the pool pointer.
1493 if (new_pp == kNoRegister) {
1494 LoadPoolPointer();
1495 } else {
1496 mov(PP, new_pp);
1497 }
1498 }
1499 set_constant_pool_allowed(true);
1500
1501 // Reserve space.
1502 if (frame_size > 0) {
1503 AddImmediate(SP, -frame_size);
1504 }
1505}
1506
1507// On entry to a function compiled for OSR, the caller's frame pointer, the
1508// stack locals, and any copied parameters are already in place. The frame
1509// pointer is already set up. The PC marker is not correct for the
1510// optimized function and there may be extra space for spill slots to
1511// allocate. We must also set up the pool pointer for the function.
1512void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
1513 ASSERT(!constant_pool_allowed());
1514 Comment("EnterOsrFrame");
1515 RestoreCodePointer();
1516 LoadPoolPointer();
1517
1518 if (extra_size > 0) {
1519 AddImmediate(SP, -extra_size);
1520 }
1521}
1522
1523void Assembler::LeaveDartFrame() {
1524 if (!FLAG_precompiled_mode) {
1525 // Restore and untag PP.
1526 LoadFromOffset(
1527 PP, FP,
1528 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
1529 sub(PP, PP, Operand(kHeapObjectTag));
1530 }
1531 set_constant_pool_allowed(false);
1532 LeaveFrame();
1533}
1534
1535void Assembler::EnterFullSafepoint(Register state) {
1536 // We generate the same number of instructions whether or not the slow-path is
1537 // forced. This simplifies GenerateJitCallbackTrampolines.
1538 // For TSAN, we always go to the runtime so TSAN is aware of the release
1539 // semantics of entering the safepoint.
1540
1541 Register addr = TMP2;
1542 ASSERT(addr != state);
1543
1544 Label slow_path, done, retry;
1545 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
1546 b(&slow_path);
1547 }
1548
1549 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1550 add(addr, THR, Operand(addr));
1551 Bind(&retry);
1552 ldxr(state, addr);
1553 cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
1554 b(&slow_path, NE);
1555
1556 movz(state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
1557 stxr(TMP, state, addr);
1558 cbz(&done, TMP); // 0 means stxr was successful.
1559
1560 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
1561 b(&retry);
1562 }
1563
1564 Bind(&slow_path);
1565 ldr(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
1566 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1567 blr(addr);
1568
1569 Bind(&done);
1570}
1571
1572void Assembler::TransitionGeneratedToNative(Register destination,
1573 Register new_exit_frame,
1574 Register new_exit_through_ffi,
1575 bool enter_safepoint) {
1576 // Save exit frame information to enable stack walking.
1577 StoreToOffset(new_exit_frame, THR,
1578 target::Thread::top_exit_frame_info_offset());
1579
1580 StoreToOffset(new_exit_through_ffi, THR,
1581 target::Thread::exit_through_ffi_offset());
1582 Register tmp = new_exit_through_ffi;
1583
1584 // Mark that the thread is executing native code.
1585 StoreToOffset(destination, THR, target::Thread::vm_tag_offset());
1586 LoadImmediate(tmp, target::Thread::native_execution_state());
1587 StoreToOffset(tmp, THR, target::Thread::execution_state_offset());
1588
1589 if (enter_safepoint) {
1590 EnterFullSafepoint(tmp);
1591 }
1592}
1593
1594void Assembler::ExitFullSafepoint(Register state,
1595 bool ignore_unwind_in_progress) {
1596 // We generate the same number of instructions whether or not the slow-path is
1597 // forced, for consistency with EnterFullSafepoint.
1598 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
1599 // semantics of leaving the safepoint.
1600 Register addr = TMP2;
1601 ASSERT(addr != state);
1602
1603 Label slow_path, done, retry;
1604 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
1605 b(&slow_path);
1606 }
1607
1608 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1609 add(addr, THR, Operand(addr));
1610 Bind(&retry);
1611 ldxr(state, addr);
1612 cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
1613 b(&slow_path, NE);
1614
1615 movz(state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
1616 stxr(TMP, state, addr);
1617 cbz(&done, TMP); // 0 means stxr was successful.
1618
1619 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
1620 b(&retry);
1621 }
1622
1623 Bind(&slow_path);
1624 if (ignore_unwind_in_progress) {
1625 ldr(addr,
1626 Address(THR,
1627 target::Thread::
1628 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
1629 } else {
1630 ldr(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
1631 }
1632 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1633 blr(addr);
1634
1635 Bind(&done);
1636}
1637
1638void Assembler::TransitionNativeToGenerated(Register state,
1639 bool exit_safepoint,
1640 bool ignore_unwind_in_progress) {
1641 if (exit_safepoint) {
1642 ExitFullSafepoint(state, ignore_unwind_in_progress);
1643 } else {
1644 // flag only makes sense if we are leaving safepoint
1645 ASSERT(!ignore_unwind_in_progress);
1646#if defined(DEBUG)
1647 // Ensure we've already left the safepoint.
1648 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
1649 LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
1650 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
1651 and_(TMP, TMP, Operand(state));
1652 Label ok;
1653 cbz(&ok, TMP);
1654 Breakpoint();
1655 Bind(&ok);
1656#endif
1657 }
1658
1659 // Mark that the thread is executing Dart code.
1660 LoadImmediate(state, target::Thread::vm_tag_dart_id());
1661 StoreToOffset(state, THR, target::Thread::vm_tag_offset());
1662 LoadImmediate(state, target::Thread::generated_execution_state());
1663 StoreToOffset(state, THR, target::Thread::execution_state_offset());
1664
1665 // Reset exit frame information in Isolate's mutator thread structure.
1666 StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
1667 LoadImmediate(state, 0);
1668 StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
1669}
1670
1671void Assembler::CallRuntime(const RuntimeEntry& entry,
1672 intptr_t argument_count) {
1673 ASSERT(!entry.is_leaf());
1674 // Argument count is not checked here, but in the runtime entry for a more
1675 // informative error message.
1676 ldr(R5, compiler::Address(THR, entry.OffsetFromThread()));
1677 LoadImmediate(R4, argument_count);
1678 Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1679}
1680
1681// FPU: Only the bottom 64-bits of v8-v15 are preserved by the caller. The upper
1682// bits might be in use by Dart, so we save the whole register.
1683static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
1684 kAllFpuRegistersList);
1685
1686#undef __
1687#define __ assembler_->
1688
1689LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1690 intptr_t frame_size,
1691 bool preserve_registers)
1692 : assembler_(assembler), preserve_registers_(preserve_registers) {
1693 __ Comment("EnterCallRuntimeFrame");
1694 __ EnterFrame(0);
1695
1696 if (preserve_registers) {
1697 __ PushRegisters(kRuntimeCallSavedRegisters);
1698 } else {
1699 // These registers must always be preserved.
1700 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
1701 COMPILE_ASSERT(IsCalleeSavedRegister(PP));
1702 COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
1703 COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
1704 COMPILE_ASSERT(IsCalleeSavedRegister(HEAP_BITS));
1705 COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
1706 }
1707
1708 __ ReserveAlignedFrameSpace(frame_size);
1709}
1710
1711void LeafRuntimeScope::Call(const RuntimeEntry& entry,
1712 intptr_t argument_count) {
1713 ASSERT(argument_count == entry.argument_count());
1714 // Since we are entering C++ code, we must restore the C stack pointer from
1715 // the stack limit to an aligned value nearer to the top of the stack.
1716 // We cache the stack limit in callee-saved registers, then align and call,
1717 // restoring CSP and SP on return from the call.
1718 // This sequence may occur in an intrinsic, so don't use registers an
1719 // intrinsic must preserve.
1720 __ mov(CSP, SP);
1721 __ ldr(TMP, compiler::Address(THR, entry.OffsetFromThread()));
1722 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1723 __ blr(TMP);
1724 __ LoadImmediate(TMP, VMTag::kDartTagId);
1725 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1726 __ SetupCSPFromThread(THR);
1727}
1728
1729LeafRuntimeScope::~LeafRuntimeScope() {
1730 if (preserve_registers_) {
1731 // SP might have been modified to reserve space for arguments
1732 // and ensure proper alignment of the stack frame.
1733 // We need to restore it before restoring registers.
1734 const intptr_t kPushedRegistersSize =
1735 kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
1736 kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize;
1737 __ AddImmediate(SP, FP, -kPushedRegistersSize);
1738 __ PopRegisters(kRuntimeCallSavedRegisters);
1739 }
1740
1741 __ LeaveFrame();
1742}
1743
1744// For use by LR related macros (e.g. CLOBBERS_LR).
1745#undef __
1746#define __ this->
1747
1748void Assembler::EnterStubFrame() {
1749 EnterDartFrame(0);
1750}
1751
1752void Assembler::LeaveStubFrame() {
1753 LeaveDartFrame();
1754}
1755
1756void Assembler::EnterCFrame(intptr_t frame_space) {
1757 // Already saved.
1758 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
1759 COMPILE_ASSERT(IsCalleeSavedRegister(PP));
1760 COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
1761 COMPILE_ASSERT(IsCalleeSavedRegister(HEAP_BITS));
1762 COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
1763
1764 Push(FP);
1765 mov(FP, SP);
1766 ReserveAlignedFrameSpace(frame_space);
1767}
1768
1769void Assembler::LeaveCFrame() {
1770 mov(SP, FP);
1771 Pop(FP);
1772}
1773
1774// R0 receiver, R5 ICData entries array
1775// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
1776void Assembler::MonomorphicCheckedEntryJIT() {
1777 has_monomorphic_entry_ = true;
1778 const bool saved_use_far_branches = use_far_branches();
1779 set_use_far_branches(false);
1780 const intptr_t start = CodeSize();
1781
1782 Label immediate, miss;
1783 Bind(&miss);
1784 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1785 br(IP0);
1786
1787 Comment("MonomorphicCheckedEntry");
1788 ASSERT_EQUAL(CodeSize() - start,
1789 target::Instructions::kMonomorphicEntryOffsetJIT);
1790
1791 const intptr_t cid_offset = target::Array::element_offset(0);
1792 const intptr_t count_offset = target::Array::element_offset(1);
1793
1794 // Sadly this cannot use ldp because ldp requires aligned offsets.
1795 ldr(R1, FieldAddress(R5, cid_offset), kObjectBytes);
1796 ldr(R2, FieldAddress(R5, count_offset), kObjectBytes);
1797 LoadClassIdMayBeSmi(IP0, R0);
1798 add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
1799 cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
1800 b(&miss, NE);
1801 str(R2, FieldAddress(R5, count_offset), kObjectBytes);
1802 LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction
1803
1804 // Fall through to unchecked entry.
1805 ASSERT_EQUAL(CodeSize() - start,
1806 target::Instructions::kPolymorphicEntryOffsetJIT);
1807
1808 set_use_far_branches(saved_use_far_branches);
1809}
1810
1811// R0 receiver, R5 guarded cid as Smi.
1812// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
1813void Assembler::MonomorphicCheckedEntryAOT() {
1814 has_monomorphic_entry_ = true;
1815 bool saved_use_far_branches = use_far_branches();
1816 set_use_far_branches(false);
1817
1818 const intptr_t start = CodeSize();
1819
1820 Label immediate, miss;
1821 Bind(&miss);
1822 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1823 br(IP0);
1824
1825 Comment("MonomorphicCheckedEntry");
1826 ASSERT_EQUAL(CodeSize() - start,
1827 target::Instructions::kMonomorphicEntryOffsetAOT);
1828 LoadClassId(IP0, R0);
1829 cmp(R5, Operand(IP0, LSL, 1), kObjectBytes);
1830 b(&miss, NE);
1831
1832 // Fall through to unchecked entry.
1833 ASSERT_EQUAL(CodeSize() - start,
1834 target::Instructions::kPolymorphicEntryOffsetAOT);
1835
1836 set_use_far_branches(saved_use_far_branches);
1837}
1838
1839void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
1840 has_monomorphic_entry_ = true;
1841 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
1842 brk(0);
1843 }
1844 b(label);
1845 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
1846 brk(0);
1847 }
1848}
1849
1850void Assembler::CombineHashes(Register hash, Register other) {
1851 // hash += other_hash
1852 add(hash, hash, Operand(other), kFourBytes);
1853 // hash += hash << 10
1854 add(hash, hash, Operand(hash, LSL, 10), kFourBytes);
1855 // hash ^= hash >> 6
1856 eor(hash, hash, Operand(hash, LSR, 6), kFourBytes);
1857}
1858
1859void Assembler::FinalizeHashForSize(intptr_t bit_size,
1860 Register hash,
1861 Register scratch) {
1862 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
1863 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
1864 // reasonably expect that the returned values fill the entire bit space.
1865 ASSERT(bit_size <= kBitsPerInt32);
1866 // hash += hash << 3;
1867 add(hash, hash, Operand(hash, LSL, 3), kFourBytes);
1868 // hash ^= hash >> 11; // Logical shift, unsigned hash.
1869 eor(hash, hash, Operand(hash, LSR, 11), kFourBytes);
1870 // hash += hash << 15;
1871 if (bit_size < kBitsPerInt32) {
1872 add(hash, hash, Operand(hash, LSL, 15), kFourBytes);
1873 // Size to fit.
1874 andis(hash, hash, Immediate(Utils::NBitMask(bit_size)));
1875 } else {
1876 adds(hash, hash, Operand(hash, LSL, 15), kFourBytes);
1877 }
1878 // return (hash == 0) ? 1 : hash;
1879 cinc(hash, hash, ZERO);
1880}
1881
1882#ifndef PRODUCT
1883void Assembler::MaybeTraceAllocation(intptr_t cid,
1884 Label* trace,
1885 Register temp_reg,
1886 JumpDistance distance) {
1887 ASSERT(cid > 0);
1888
1889 LoadIsolateGroup(temp_reg);
1890 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1891 ldr(temp_reg,
1892 Address(temp_reg,
1893 target::ClassTable::allocation_tracing_state_table_offset()));
1894 LoadFromOffset(temp_reg, temp_reg,
1895 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid),
1896 kUnsignedByte);
1897 cbnz(trace, temp_reg);
1898}
1899
1900void Assembler::MaybeTraceAllocation(Register cid,
1901 Label* trace,
1902 Register temp_reg,
1903 JumpDistance distance) {
1904 ASSERT(temp_reg != cid);
1905 LoadIsolateGroup(temp_reg);
1906 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1907 ldr(temp_reg,
1908 Address(temp_reg,
1909 target::ClassTable::allocation_tracing_state_table_offset()));
1910 AddRegisters(temp_reg, cid);
1911 LoadFromOffset(temp_reg, temp_reg,
1912 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
1913 kUnsignedByte);
1914 cbnz(trace, temp_reg);
1915}
1916#endif // !PRODUCT
1917
1918void Assembler::TryAllocateObject(intptr_t cid,
1919 intptr_t instance_size,
1920 Label* failure,
1921 JumpDistance distance,
1922 Register instance_reg,
1923 Register temp_reg) {
1924 ASSERT(failure != nullptr);
1925 ASSERT(instance_size != 0);
1926 ASSERT(instance_reg != temp_reg);
1927 ASSERT(temp_reg != kNoRegister);
1928 ASSERT(Utils::IsAligned(instance_size,
1929 target::ObjectAlignment::kObjectAlignment));
1930 if (FLAG_inline_alloc &&
1931 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1932 // If this allocation is traced, program will jump to failure path
1933 // (i.e. the allocation stub) which will allocate the object and trace the
1934 // allocation call site.
1935 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
1936 RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
1937 target::Thread::end_offset());
1938 ldp(instance_reg, temp_reg,
1939 Address(THR, target::Thread::top_offset(), Address::PairOffset));
1940 // instance_reg: current top (next object start).
1941 // temp_reg: heap end
1942
1943 // TODO(koda): Protect against unsigned overflow here.
1944 AddImmediate(instance_reg, instance_size);
1945 // instance_reg: potential top (next object start).
1946 // fail if heap end unsigned less than or equal to new heap top.
1947 cmp(temp_reg, Operand(instance_reg));
1948 b(failure, LS);
1949 CheckAllocationCanary(instance_reg, temp_reg);
1950
1951 // Successfully allocated the object, now update temp to point to
1952 // next object start and store the class in the class field of object.
1953 str(instance_reg, Address(THR, target::Thread::top_offset()));
1954 // Move instance_reg back to the start of the object and tag it.
1955 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
1956
1957 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
1958 LoadImmediate(temp_reg, tags);
1959 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
1960 } else {
1961 b(failure);
1962 }
1963}
1964
1965void Assembler::TryAllocateArray(intptr_t cid,
1966 intptr_t instance_size,
1967 Label* failure,
1968 Register instance,
1969 Register end_address,
1970 Register temp1,
1971 Register temp2) {
1972 if (FLAG_inline_alloc &&
1973 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1974 // If this allocation is traced, program will jump to failure path
1975 // (i.e. the allocation stub) which will allocate the object and trace the
1976 // allocation call site.
1977 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
1978 // Potential new object start.
1979 ldr(instance, Address(THR, target::Thread::top_offset()));
1980 AddImmediateSetFlags(end_address, instance, instance_size);
1981 b(failure, CS); // Fail on unsigned overflow.
1982
1983 // Check if the allocation fits into the remaining space.
1984 // instance: potential new object start.
1985 // end_address: potential next object start.
1986 ldr(temp2, Address(THR, target::Thread::end_offset()));
1987 cmp(end_address, Operand(temp2));
1988 b(failure, CS);
1989 CheckAllocationCanary(instance, temp2);
1990
1991 // Successfully allocated the object(s), now update top to point to
1992 // next object start and initialize the object.
1993 str(end_address, Address(THR, target::Thread::top_offset()));
1994 add(instance, instance, Operand(kHeapObjectTag));
1995 NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
1996
1997 // Initialize the tags.
1998 // instance: new object start as a tagged pointer.
1999 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2000 LoadImmediate(temp2, tags);
2001 str(temp2, FieldAddress(instance, target::Object::tags_offset()));
2002 } else {
2003 b(failure);
2004 }
2005}
2006
2007void Assembler::CopyMemoryWords(Register src,
2008 Register dst,
2009 Register size,
2010 Register temp) {
2011 Label loop, done;
2012 __ cbz(&done, size);
2013 __ Bind(&loop);
2014 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
2015 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
2016 __ subs(size, size, Operand(target::kWordSize));
2017 __ b(&loop, NOT_ZERO);
2018 __ Bind(&done);
2019}
2020
2021void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2022 // Emit "bl <offset>".
2023 EmitUnconditionalBranchOp(BL, 0);
2024
2025 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2026 PcRelativeCallPattern::kLengthInBytes);
2027 pattern.set_distance(offset_into_target);
2028}
2029
2030void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2031 intptr_t offset_into_target) {
2032 // Emit "b <offset>".
2033 EmitUnconditionalBranchOp(B, 0);
2034 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
2035 PcRelativeTailCallPattern::kLengthInBytes);
2036 pattern.set_distance(offset_into_target);
2037}
2038
2039bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
2040 bool is_external,
2041 intptr_t cid,
2042 intptr_t index_scale) {
2043 if (!IsSafeSmi(constant)) return false;
2044 const int64_t index = target::SmiValue(constant);
2045 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2046 if (!Utils::IsInt(32, offset)) {
2047 return false;
2048 }
2049 return Address::CanHoldOffset(static_cast<int32_t>(offset), Address::Offset,
2050 Address::OperandSizeFor(cid));
2051}
2052
2053Address Assembler::ElementAddressForIntIndex(bool is_external,
2054 intptr_t cid,
2055 intptr_t index_scale,
2056 Register array,
2057 intptr_t index) const {
2058 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2059 ASSERT(Utils::IsInt(32, offset));
2060 const OperandSize size = Address::OperandSizeFor(cid);
2061 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
2062 return Address(array, static_cast<int32_t>(offset));
2063}
2064
2065void Assembler::ComputeElementAddressForIntIndex(Register address,
2066 bool is_external,
2067 intptr_t cid,
2068 intptr_t index_scale,
2069 Register array,
2070 intptr_t index) {
2071 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2072 AddImmediate(address, array, offset);
2073}
2074
2075Address Assembler::ElementAddressForRegIndex(bool is_external,
2076 intptr_t cid,
2077 intptr_t index_scale,
2078 bool index_unboxed,
2079 Register array,
2080 Register index,
2081 Register temp) {
2082 return ElementAddressForRegIndexWithSize(
2083 is_external, cid, Address::OperandSizeFor(cid), index_scale,
2084 index_unboxed, array, index, temp);
2085}
2086
2087Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
2088 intptr_t cid,
2089 OperandSize size,
2090 intptr_t index_scale,
2091 bool index_unboxed,
2092 Register array,
2093 Register index,
2094 Register temp) {
2095 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
2096 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
2097 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2098 const int32_t offset = HeapDataOffset(is_external, cid);
2099#if !defined(DART_COMPRESSED_POINTERS)
2100 const bool index_is_32bit = false;
2101#else
2102 const bool index_is_32bit = !index_unboxed;
2103#endif
2104 ASSERT(array != temp);
2105 ASSERT(index != temp);
2106 if ((offset == 0) && (shift == 0)) {
2107 if (index_is_32bit) {
2108 return Address(array, index, SXTW, Address::Unscaled);
2109 } else {
2110 return Address(array, index, UXTX, Address::Unscaled);
2111 }
2112 } else if (shift < 0) {
2113 ASSERT(shift == -1);
2114 if (index_is_32bit) {
2115 AsrImmediate(temp, index, 1, kFourBytes);
2116 add(temp, array, Operand(temp, SXTW, 0));
2117 } else {
2118 add(temp, array, Operand(index, ASR, 1));
2119 }
2120 } else {
2121 if (index_is_32bit) {
2122 add(temp, array, Operand(index, SXTW, shift));
2123 } else {
2124 add(temp, array, Operand(index, LSL, shift));
2125 }
2126 }
2127 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
2128 return Address(temp, offset);
2129}
2130
2131void Assembler::ComputeElementAddressForRegIndex(Register address,
2132 bool is_external,
2133 intptr_t cid,
2134 intptr_t index_scale,
2135 bool index_unboxed,
2136 Register array,
2137 Register index) {
2138 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
2139 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
2140 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2141 const int32_t offset = HeapDataOffset(is_external, cid);
2142#if !defined(DART_COMPRESSED_POINTERS)
2143 const bool index_is_32bit = false;
2144#else
2145 const bool index_is_32bit = !index_unboxed;
2146#endif
2147 if (shift == 0) {
2148 if (index_is_32bit) {
2149 add(address, array, Operand(index, SXTW, 0));
2150 } else {
2151 add(address, array, Operand(index));
2152 }
2153 } else if (shift < 0) {
2154 ASSERT(shift == -1);
2155 if (index_is_32bit) {
2156 sxtw(index, index);
2157 add(address, array, Operand(index, ASR, 1));
2158 } else {
2159 add(address, array, Operand(index, ASR, 1));
2160 }
2161 } else {
2162 if (index_is_32bit) {
2163 add(address, array, Operand(index, SXTW, shift));
2164 } else {
2165 add(address, array, Operand(index, LSL, shift));
2166 }
2167 }
2168 if (offset != 0) {
2169 AddImmediate(address, offset);
2170 }
2171}
2172
2173void Assembler::LoadStaticFieldAddress(Register address,
2174 Register field,
2175 Register scratch) {
2176 LoadCompressedSmiFieldFromOffset(
2177 scratch, field, target::Field::host_offset_or_field_id_offset());
2178 const intptr_t field_table_offset =
2179 compiler::target::Thread::field_table_values_offset();
2180 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
2181 add(address, address,
2182 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
2183}
2184
2185#if defined(DART_COMPRESSED_POINTERS)
2186void Assembler::LoadCompressedFieldAddressForRegOffset(
2187 Register address,
2188 Register instance,
2189 Register offset_in_compressed_words_as_smi) {
2190 add(address, instance,
2191 Operand(offset_in_compressed_words_as_smi, LSL,
2192 target::kCompressedWordSizeLog2 - kSmiTagShift));
2193 AddImmediate(address, -kHeapObjectTag);
2194}
2195#endif
2196
2197void Assembler::LoadFieldAddressForRegOffset(Register address,
2198 Register instance,
2199 Register offset_in_words_as_smi) {
2200 add(address, instance,
2201 Operand(offset_in_words_as_smi, LSL,
2202 target::kWordSizeLog2 - kSmiTagShift));
2203 AddImmediate(address, -kHeapObjectTag);
2204}
2205
2206void Assembler::PushRegisters(const RegisterSet& regs) {
2207 VRegister vprev = kNoVRegister;
2208 // Store fpu registers with the lowest register number at the lowest
2209 // address.
2210 for (intptr_t i = kNumberOfVRegisters - 1; i >= 0; --i) {
2211 VRegister fpu_reg = static_cast<VRegister>(i);
2212 if (regs.ContainsFpuRegister(fpu_reg)) {
2213 if (vprev != kNoVRegister) {
2214 PushQuadPair(/*low=*/fpu_reg, /*high=*/vprev);
2215 vprev = kNoVRegister;
2216 } else {
2217 vprev = fpu_reg;
2218 }
2219 }
2220 }
2221 if (vprev != kNoVRegister) {
2222 PushQuad(vprev);
2223 }
2224
2225 // The order in which the registers are pushed must match the order
2226 // in which the registers are encoded in the safe point's stack map.
2228 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2229 Register reg = static_cast<Register>(i);
2230 if (regs.ContainsRegister(reg)) {
2231 if (prev != kNoRegister) {
2232 PushPair(/*low=*/reg, /*high=*/prev);
2233 prev = kNoRegister;
2234 } else {
2235 prev = reg;
2236 }
2237 }
2238 }
2239 if (prev != kNoRegister) {
2240 Push(prev);
2241 }
2242}
2243
2244void Assembler::PopRegisters(const RegisterSet& regs) {
2245 bool pop_single = (regs.CpuRegisterCount() & 1) == 1;
2247 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
2248 Register reg = static_cast<Register>(i);
2249 if (regs.ContainsRegister(reg)) {
2250 if (pop_single) {
2251 // Emit the leftover pop at the beginning instead of the end to
2252 // mirror PushRegisters.
2253 Pop(reg);
2254 pop_single = false;
2255 } else if (prev != kNoRegister) {
2256 PopPair(/*low=*/prev, /*high=*/reg);
2257 prev = kNoRegister;
2258 } else {
2259 prev = reg;
2260 }
2261 }
2262 }
2263 ASSERT(prev == kNoRegister);
2264
2265 pop_single = (regs.FpuRegisterCount() & 1) == 1;
2266 VRegister vprev = kNoVRegister;
2267 // Fpu registers have the lowest register number at the lowest address.
2268 for (intptr_t i = 0; i < kNumberOfVRegisters; ++i) {
2269 VRegister fpu_reg = static_cast<VRegister>(i);
2270 if (regs.ContainsFpuRegister(fpu_reg)) {
2271 if (pop_single) {
2272 PopQuad(fpu_reg);
2273 pop_single = false;
2274 } else if (vprev != kNoVRegister) {
2275 PopQuadPair(/*low=*/vprev, /*high=*/fpu_reg);
2276 vprev = kNoVRegister;
2277 } else {
2278 vprev = fpu_reg;
2279 }
2280 }
2281 }
2282 ASSERT(vprev == kNoVRegister);
2283}
2284
2285void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2286 // Use STP to push registers in pairs.
2287 Register pending_reg = kNoRegister;
2288 for (Register reg : regs) {
2289 if (pending_reg != kNoRegister) {
2290 PushPair(reg, pending_reg);
2291 pending_reg = kNoRegister;
2292 } else {
2293 pending_reg = reg;
2294 }
2295 }
2296 if (pending_reg != kNoRegister) {
2297 Push(pending_reg);
2298 }
2299}
2300
2301void Assembler::PushNativeCalleeSavedRegisters() {
2302 // Save the callee-saved registers.
2303 // We use str instead of the Push macro because we will be pushing the PP
2304 // register when it is not holding a pool-pointer since we are coming from
2305 // C++ code.
2307 for (int i = kAbiFirstPreservedCpuReg; i <= kAbiLastPreservedCpuReg; i++) {
2308 const Register r = static_cast<Register>(i);
2309 if (prev != kNoRegister) {
2310 stp(/*low=*/r, /*high=*/prev,
2311 Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
2312 prev = kNoRegister;
2313 } else {
2314 prev = r;
2315 }
2316 }
2317 if (prev != kNoRegister) {
2318 str(prev, Address(SP, -1 * target::kWordSize, Address::PreIndex));
2319 }
2320
2321 // Save the bottom 64-bits of callee-saved V registers.
2322 VRegister vprev = kNoVRegister;
2323 for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
2324 const VRegister r = static_cast<VRegister>(i);
2325 if (vprev != kNoVRegister) {
2326 PushDoublePair(/*low=*/r, /*high=*/vprev);
2327 vprev = kNoVRegister;
2328 } else {
2329 vprev = r;
2330 }
2331 }
2332 if (vprev != kNoVRegister) {
2333 PushDouble(vprev);
2334 }
2335}
2336
2337void Assembler::PopNativeCalleeSavedRegisters() {
2338 // Restore the bottom 64-bits of callee-saved V registers.
2339 bool pop_single = (kAbiPreservedFpuRegCount & 1) != 0;
2340 VRegister vprev = kNoVRegister;
2341 for (int i = kAbiLastPreservedFpuReg; i >= kAbiFirstPreservedFpuReg; i--) {
2342 const VRegister r = static_cast<VRegister>(i);
2343 if (pop_single) {
2344 PopDouble(r);
2345 pop_single = false;
2346 } else if (vprev != kNoVRegister) {
2347 PopDoublePair(/*low=*/vprev, /*high=*/r);
2348 vprev = kNoVRegister;
2349 } else {
2350 vprev = r;
2351 }
2352 }
2353
2354 // Restore C++ ABI callee-saved registers.
2355 // We use ldr instead of the Pop macro because we will be popping the PP
2356 // register when it is not holding a pool-pointer since we are returning to
2357 // C++ code. We also skip the dart stack pointer SP, since we are still
2358 // using it as the stack pointer.
2359 pop_single = (kAbiPreservedCpuRegCount & 1) != 0;
2361 for (int i = kAbiLastPreservedCpuReg; i >= kAbiFirstPreservedCpuReg; i--) {
2362 Register r = static_cast<Register>(i);
2363 if (pop_single) {
2364 ldr(r, Address(SP, 1 * target::kWordSize, Address::PostIndex));
2365 pop_single = false;
2366 } else if (prev != kNoRegister) {
2367 ldp(/*low=*/prev, /*high=*/r,
2368 Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
2369 prev = kNoRegister;
2370 } else {
2371 prev = r;
2372 }
2373 }
2374}
2375
2376bool Assembler::CanGenerateCbzTbz(Register rn, Condition cond) {
2377 if (rn == CSP) {
2378 return false;
2379 }
2380 switch (cond) {
2381 case EQ: // equal
2382 case NE: // not equal
2383 case MI: // minus/negative
2384 case LT: // signed less than
2385 case PL: // plus/positive or zero
2386 case GE: // signed greater than or equal
2387 return true;
2388 default:
2389 return false;
2390 }
2391}
2392
2393void Assembler::GenerateCbzTbz(Register rn,
2394 Condition cond,
2395 Label* label,
2396 OperandSize sz) {
2397 ASSERT((sz == kEightBytes) || (sz == kFourBytes));
2398 const int32_t sign_bit = sz == kEightBytes ? 63 : 31;
2399 ASSERT(rn != CSP);
2400 switch (cond) {
2401 case EQ: // equal
2402 cbz(label, rn, sz);
2403 return;
2404 case NE: // not equal
2405 cbnz(label, rn, sz);
2406 return;
2407 case MI: // minus/negative
2408 case LT: // signed less than
2409 tbnz(label, rn, sign_bit);
2410 return;
2411 case PL: // plus/positive or zero
2412 case GE: // signed greater than or equal
2413 tbz(label, rn, sign_bit);
2414 return;
2415 default:
2416 // Only conditions above allow single instruction emission.
2417 UNREACHABLE();
2418 }
2419}
2420
2421void Assembler::RangeCheck(Register value,
2422 Register temp,
2423 intptr_t low,
2424 intptr_t high,
2425 RangeCheckCondition condition,
2426 Label* target) {
2427 auto cc = condition == kIfInRange ? LS : HI;
2428 Register to_check = temp != kNoRegister ? temp : value;
2429 AddImmediate(to_check, value, -low);
2430 CompareImmediate(to_check, high - low);
2431 b(target, cc);
2432}
2433
2434} // namespace compiler
2435
2436} // namespace dart
2437
2438#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
SkPoint pos
static float next(float f)
static float prev(float f)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
static bool equals(T *a, T *b)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
static OperandSize OperandSizeFor(intptr_t cid)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define LR
#define LINK_REGISTER
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
AtkStateType state
uint8_t value
GAsyncResult * result
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
int argument_count
Definition fuchsia.cc:52
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
bool IsSameObject(const Object &a, const Object &b)
const Object & ToObject(const Code &handle)
const QRegister kAbiLastPreservedFpuReg
const int kXRegSizeInBits
const Register NULL_REG
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
const Register CODE_REG
const Register TMP2
@ kNumberOfCpuRegisters
@ kNoRegister
const Register kAbiLastPreservedCpuReg
@ kNumberOfVRegisters
const Register TMP
const int kAbiPreservedCpuRegCount
const Register kAbiFirstPreservedCpuReg
const intptr_t cid
const int kAbiPreservedFpuRegCount
const int kWRegSizeInBits
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const int kFpuRegisterSize
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dest
Definition zip.py:79
#define Pd
Definition globals.h:408
int32_t width
Point offset
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
constexpr bool kTargetUsesThreadSanitizer
#define NOT_IN_PRODUCT(code)
Definition globals.h:84