Flutter Engine
The Flutter Engine
assembler_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_ARM64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
12#include "vm/cpu.h"
13#include "vm/instructions.h"
14#include "vm/simulator.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
23
24// For use by LR related macros (e.g. CLOBBERS_LR).
25#define __ this->
26
27namespace compiler {
28
31 switch (rep) {
32 case kUnboxedFloat:
33 return kSWord;
34 case kUnboxedDouble:
35 return kDWord;
36 case kUnboxedInt32x4:
37 case kUnboxedFloat32x4:
38 case kUnboxedFloat64x2:
39 return kQWord;
40 default:
42 }
43}
44
45Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
46 intptr_t far_branch_level)
47 : AssemblerBase(object_pool_builder),
48 use_far_branches_(far_branch_level != 0),
49 constant_pool_allowed_(false) {
50 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
51 Call(Address(THR,
52 target::Thread::write_barrier_wrappers_thread_offset(reg)));
53 };
54 generate_invoke_array_write_barrier_ = [&]() {
55 Call(
56 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
57 };
58}
59
60void Assembler::Emit(int32_t value) {
61 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
62 buffer_.Emit<int32_t>(value);
63}
64
65void Assembler::Emit64(int64_t value) {
66 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
67 buffer_.Emit<int64_t>(value);
68}
69
70int32_t Assembler::BindImm26Branch(int64_t position, int64_t dest) {
71 ASSERT(CanEncodeImm26BranchOffset(dest));
72 const int32_t next = buffer_.Load<int32_t>(position);
73 const int32_t encoded = EncodeImm26BranchOffset(dest, next);
74 buffer_.Store<int32_t>(position, encoded);
75 return DecodeImm26BranchOffset(next);
76}
77
78int32_t Assembler::BindImm19Branch(int64_t position, int64_t dest) {
79 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
80 // Far branches are enabled, and we can't encode the branch offset in
81 // 19 bits.
82
83 // Grab the guarding branch instruction.
84 const int32_t guard_branch =
85 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
86
87 // Grab the far branch instruction.
88 const int32_t far_branch =
89 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
90 const Condition c = DecodeImm19BranchCondition(guard_branch);
91 ASSERT(c != NV);
92
93 // Grab the link to the next branch.
94 const int32_t next = DecodeImm26BranchOffset(far_branch);
95
96 // dest is the offset is from the guarding branch instruction.
97 // Correct it to be from the following instruction.
98 const int64_t offset = dest - Instr::kInstrSize;
99
100 // Encode the branch.
101 const int32_t encoded_branch = EncodeImm26BranchOffset(offset, far_branch);
102
103 // Write the far branch into the buffer and link to the next branch.
104 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
105 return next;
106 } else if (use_far_branches() && CanEncodeImm19BranchOffset(dest)) {
107 // We assembled a far branch, but we don't need it. Replace it with a near
108 // branch.
109
110 // Grab the guarding branch instruction.
111 const int32_t guard_branch =
112 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
113
114 // Grab the far branch instruction.
115 const int32_t far_branch =
116 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
117
118 // Grab the link to the next branch.
119 const int32_t next = DecodeImm26BranchOffset(far_branch);
120
121 // Re-target the guarding branch and flip the conditional sense.
122 int32_t encoded_guard_branch = EncodeImm19BranchOffset(dest, guard_branch);
123 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch);
124 encoded_guard_branch =
125 EncodeImm19BranchCondition(InvertCondition(c), encoded_guard_branch);
126
127 // Write back the re-encoded instructions. The far branch becomes a nop.
128 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
129 encoded_guard_branch);
130 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
131 Instr::kNopInstruction);
132 return next;
133 } else {
134 const int32_t next = buffer_.Load<int32_t>(position);
135 const int32_t encoded = EncodeImm19BranchOffset(dest, next);
136 buffer_.Store<int32_t>(position, encoded);
137 return DecodeImm19BranchOffset(next);
138 }
139}
140
141int32_t Assembler::BindImm14Branch(int64_t position, int64_t dest) {
142 if (use_far_branches() && !CanEncodeImm14BranchOffset(dest)) {
143 // Far branches are enabled, and we can't encode the branch offset in
144 // 14 bits.
145
146 // Grab the guarding branch instruction.
147 const int32_t guard_branch =
148 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
149
150 // Grab the far branch instruction.
151 const int32_t far_branch =
152 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
153 const Condition c = DecodeImm14BranchCondition(guard_branch);
154 ASSERT(c != NV);
155
156 // Grab the link to the next branch.
157 const int32_t next = DecodeImm26BranchOffset(far_branch);
158
159 // dest is the offset is from the guarding branch instruction.
160 // Correct it to be from the following instruction.
161 const int64_t offset = dest - Instr::kInstrSize;
162
163 // Encode the branch.
164 const int32_t encoded_branch = EncodeImm26BranchOffset(offset, far_branch);
165
166 // Write the far branch into the buffer and link to the next branch.
167 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
168 return next;
169 } else if (use_far_branches() && CanEncodeImm14BranchOffset(dest)) {
170 // We assembled a far branch, but we don't need it. Replace it with a near
171 // branch.
172
173 // Grab the guarding branch instruction.
174 const int32_t guard_branch =
175 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
176
177 // Grab the far branch instruction.
178 const int32_t far_branch =
179 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
180
181 // Grab the link to the next branch.
182 const int32_t next = DecodeImm26BranchOffset(far_branch);
183
184 // Re-target the guarding branch and flip the conditional sense.
185 int32_t encoded_guard_branch = EncodeImm14BranchOffset(dest, guard_branch);
186 const Condition c = DecodeImm14BranchCondition(encoded_guard_branch);
187 encoded_guard_branch =
188 EncodeImm14BranchCondition(InvertCondition(c), encoded_guard_branch);
189
190 // Write back the re-encoded instructions. The far branch becomes a nop.
191 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
192 encoded_guard_branch);
193 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
194 Instr::kNopInstruction);
195 return next;
196 } else {
197 const int32_t next = buffer_.Load<int32_t>(position);
198 const int32_t encoded = EncodeImm14BranchOffset(dest, next);
199 buffer_.Store<int32_t>(position, encoded);
200 return DecodeImm14BranchOffset(next);
201 }
202}
203
204void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
205 switch (sz) {
206 case kEightBytes:
207 if (rd == rn) return; // No operation needed.
208 return mov(rd, rn);
210 return uxtw(rd, rn);
211 case kFourBytes:
212 return sxtw(rd, rn);
214 return uxth(rd, rn);
215 case kTwoBytes:
216 return sxth(rd, rn);
217 case kUnsignedByte:
218 return uxtb(rd, rn);
219 case kByte:
220 return sxtb(rd, rn);
221 default:
223 break;
224 }
225}
226
227// Equivalent to left rotate of kSmiTagSize.
228static constexpr intptr_t kBFMTagRotate = kBitsPerInt64 - kSmiTagSize;
229
230void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
231 switch (sz) {
232 case kEightBytes:
233 return sbfm(rd, rn, kBFMTagRotate, target::kSmiBits + 1);
235 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
236 case kFourBytes:
237 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
239 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
240 case kTwoBytes:
241 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
242 case kUnsignedByte:
243 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
244 case kByte:
245 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
246 default:
248 break;
249 }
250}
251
252void Assembler::Bind(Label* label) {
253 ASSERT(!label->IsBound());
254 const intptr_t bound_pc = buffer_.Size();
255
256 while (label->IsLinked()) {
257 const int64_t position = label->Position();
258 const int64_t dest = bound_pc - position;
259 const int32_t instr = buffer_.Load<int32_t>(position);
260 if (IsTestAndBranch(instr)) {
261 label->position_ = BindImm14Branch(position, dest);
262 } else if (IsConditionalBranch(instr) || IsCompareAndBranch(instr)) {
263 label->position_ = BindImm19Branch(position, dest);
264 } else if (IsUnconditionalBranch(instr)) {
265 label->position_ = BindImm26Branch(position, dest);
266 } else {
267 UNREACHABLE();
268 }
269 }
270 label->BindTo(bound_pc, lr_state());
271}
272
273void Assembler::Align(intptr_t alignment, intptr_t offset) {
274 ASSERT(Utils::IsPowerOfTwo(alignment));
275 intptr_t pos = offset + buffer_.GetPosition();
276 intptr_t mod = pos & (alignment - 1);
277 if (mod == 0) {
278 return;
279 }
280 intptr_t bytes_needed = alignment - mod;
281 ASSERT((bytes_needed % Instr::kInstrSize) == 0);
282 while (bytes_needed > 0) {
283 nop();
284 bytes_needed -= Instr::kInstrSize;
285 }
286 ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
287}
288
289void Assembler::TsanLoadAcquire(Register addr) {
290 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
291 MoveRegister(R0, addr);
292 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
293}
294
295void Assembler::TsanStoreRelease(Register addr) {
296 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
297 MoveRegister(R0, addr);
298 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
299}
300
301static int CountLeadingZeros(uint64_t value, int width) {
302 if (width == 64) return Utils::CountLeadingZeros64(value);
303 if (width == 32) return Utils::CountLeadingZeros32(value);
304 UNREACHABLE();
305 return 0;
306}
307
308static int CountOneBits(uint64_t value, int width) {
309 // Mask out unused bits to ensure that they are not counted.
310 value &= (0xffffffffffffffffULL >> (64 - width));
311
312 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
313 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
314 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
315 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
316 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
317 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
318
319 return value;
320}
321
322// Test if a given value can be encoded in the immediate field of a logical
323// instruction.
324// If it can be encoded, the function returns true, and values pointed to by n,
325// imm_s and imm_r are updated with immediates encoded in the format required
326// by the corresponding fields in the logical instruction.
327// If it can't be encoded, the function returns false, and the operand is
328// undefined.
329bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) {
330 ASSERT(imm_op != nullptr);
332 if (width == kWRegSizeInBits) {
333 value &= 0xffffffffUL;
334 }
335 uint8_t n = 0;
336 uint8_t imm_s = 0;
337 uint8_t imm_r = 0;
338
339 // Logical immediates are encoded using parameters n, imm_s and imm_r using
340 // the following table:
341 //
342 // N imms immr size S R
343 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
344 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
345 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
346 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
347 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
348 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
349 // (s bits must not be all set)
350 //
351 // A pattern is constructed of size bits, where the least significant S+1
352 // bits are set. The pattern is rotated right by R, and repeated across a
353 // 32 or 64-bit value, depending on destination register width.
354 //
355 // To test if an arbitrary immediate can be encoded using this scheme, an
356 // iterative algorithm is used.
357
358 // 1. If the value has all set or all clear bits, it can't be encoded.
359 if ((value == 0) || (value == 0xffffffffffffffffULL) ||
360 ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
361 return false;
362 }
363
364 int lead_zero = CountLeadingZeros(value, width);
365 int lead_one = CountLeadingZeros(~value, width);
366 int trail_zero = Utils::CountTrailingZerosWord(value);
367 int trail_one = Utils::CountTrailingZerosWord(~value);
368 int set_bits = CountOneBits(value, width);
369
370 // The fixed bits in the immediate s field.
371 // If width == 64 (X reg), start at 0xFFFFFF80.
372 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
373 // widths won't be executed.
374 int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
375 int imm_s_mask = 0x3F;
376
377 for (;;) {
378 // 2. If the value is two bits wide, it can be encoded.
379 if (width == 2) {
380 n = 0;
381 imm_s = 0x3C;
382 imm_r = (value & 3) - 1;
383 *imm_op = Operand(n, imm_s, imm_r);
384 return true;
385 }
386
387 n = (width == 64) ? 1 : 0;
388 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
389 if ((lead_zero + set_bits) == width) {
390 imm_r = 0;
391 } else {
392 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
393 }
394
395 // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
396 // the bit width of the value, it can be encoded.
397 if (lead_zero + trail_zero + set_bits == width) {
398 *imm_op = Operand(n, imm_s, imm_r);
399 return true;
400 }
401
402 // 4. If the sum of leading ones, trailing ones and unset bits in the
403 // value is equal to the bit width of the value, it can be encoded.
404 if (lead_one + trail_one + (width - set_bits) == width) {
405 *imm_op = Operand(n, imm_s, imm_r);
406 return true;
407 }
408
409 // 5. If the most-significant half of the bitwise value is equal to the
410 // least-significant half, return to step 2 using the least-significant
411 // half of the value.
412 uint64_t mask = (1ULL << (width >> 1)) - 1;
413 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
414 width >>= 1;
415 set_bits >>= 1;
416 imm_s_fixed >>= 1;
417 continue;
418 }
419
420 // 6. Otherwise, the value can't be encoded.
421 return false;
422 }
423}
424
425void Assembler::LoadPoolPointer(Register pp) {
426 CheckCodePointer();
427 ldr(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
428
429 // When in the PP register, the pool pointer is untagged. When we
430 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
431 // then untags when restoring from the stack. This will make loading from the
432 // object pool only one instruction for the first 4096 entries. Otherwise,
433 // because the offset wouldn't be aligned, it would be only one instruction
434 // for the first 64 entries.
435 sub(pp, pp, Operand(kHeapObjectTag));
436 set_constant_pool_allowed(pp == PP);
437}
438
439void Assembler::LoadWordFromPoolIndex(Register dst,
440 intptr_t index,
441 Register pp) {
442 ASSERT((pp != PP) || constant_pool_allowed());
443 ASSERT(dst != pp);
444 Operand op;
445 // PP is _un_tagged on ARM64.
446 const uint32_t offset = target::ObjectPool::element_offset(index);
447 const uint32_t upper20 = offset & 0xfffff000;
448 if (Address::CanHoldOffset(offset)) {
449 ldr(dst, Address(pp, offset));
450 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
451 Operand::Immediate) {
452 const uint32_t lower12 = offset & 0x00000fff;
453 ASSERT(Address::CanHoldOffset(lower12));
454 add(dst, pp, op);
455 ldr(dst, Address(dst, lower12));
456 } else {
457 const uint16_t offset_low = Utils::Low16Bits(offset);
458 const uint16_t offset_high = Utils::High16Bits(offset);
459 movz(dst, Immediate(offset_low), 0);
460 movk(dst, Immediate(offset_high), 1);
461 ldr(dst, Address(pp, dst));
462 }
463}
464
465void Assembler::StoreWordToPoolIndex(Register src,
466 intptr_t index,
467 Register pp) {
468 ASSERT((pp != PP) || constant_pool_allowed());
469 ASSERT(src != pp);
470 Operand op;
471 // PP is _un_tagged on ARM64.
472 const uint32_t offset = target::ObjectPool::element_offset(index);
473 const uint32_t upper20 = offset & 0xfffff000;
474 if (Address::CanHoldOffset(offset)) {
475 str(src, Address(pp, offset));
476 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
477 Operand::Immediate) {
478 const uint32_t lower12 = offset & 0x00000fff;
479 ASSERT(Address::CanHoldOffset(lower12));
480 add(TMP, pp, op);
481 str(src, Address(TMP, lower12));
482 } else {
483 const uint16_t offset_low = Utils::Low16Bits(offset);
484 const uint16_t offset_high = Utils::High16Bits(offset);
485 movz(TMP, Immediate(offset_low), 0);
486 movk(TMP, Immediate(offset_high), 1);
487 str(src, Address(pp, TMP));
488 }
489}
490
491void Assembler::LoadDoubleWordFromPoolIndex(Register lower,
492 Register upper,
493 intptr_t index) {
494 // This implementation needs to be kept in sync with
495 // [InstructionPattern::DecodeLoadDoubleWordFromPool].
496 ASSERT(constant_pool_allowed());
497 ASSERT(lower != PP && upper != PP);
498
499 Operand op;
500 // PP is _un_tagged on ARM64.
501 const uint32_t offset = target::ObjectPool::element_offset(index);
502 ASSERT(offset < (1 << 24));
503 const uint32_t upper20 = offset & 0xfffff000;
504 const uint32_t lower12 = offset & 0x00000fff;
505 if (Address::CanHoldOffset(offset, Address::PairOffset)) {
506 ldp(lower, upper, Address(PP, offset, Address::PairOffset));
507 } else if (Operand::CanHold(offset, kXRegSizeInBits, &op) ==
508 Operand::Immediate) {
509 add(TMP, PP, op);
510 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
511 } else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
512 Operand::Immediate &&
513 Address::CanHoldOffset(lower12, Address::PairOffset)) {
514 add(TMP, PP, op);
515 ldp(lower, upper, Address(TMP, lower12, Address::PairOffset));
516 } else {
517 const uint32_t lower12 = offset & 0xfff;
518 const uint32_t higher12 = offset & 0xfff000;
519
520 Operand op_high, op_low;
521 bool ok = Operand::CanHold(higher12, kXRegSizeInBits, &op_high) ==
522 Operand::Immediate &&
523 Operand::CanHold(lower12, kXRegSizeInBits, &op_low) ==
524 Operand::Immediate;
526
527 add(TMP, PP, op_high);
528 add(TMP, TMP, op_low);
529 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
530 }
531}
532
533bool Assembler::CanLoadFromObjectPool(const Object& object) const {
534 ASSERT(IsOriginalObject(object));
535 if (!constant_pool_allowed()) {
536 return false;
537 }
538
539 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
540 ASSERT(IsInOldSpace(object));
541 return true;
542}
543
544void Assembler::LoadNativeEntry(
546 const ExternalLabel* label,
547 ObjectPoolBuilderEntry::Patchability patchable) {
548 const intptr_t index =
549 object_pool_builder().FindNativeFunction(label, patchable);
550 LoadWordFromPoolIndex(dst, index);
551}
552
553void Assembler::LoadIsolate(Register dst) {
554 ldr(dst, Address(THR, target::Thread::isolate_offset()));
555}
556
557void Assembler::LoadIsolateGroup(Register rd) {
558 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
559}
560
561void Assembler::LoadObjectHelper(Register dst,
562 const Object& object,
563 bool is_unique) {
564 ASSERT(IsOriginalObject(object));
565 // `is_unique == true` effectively means object has to be patchable.
566 // (even if the object is null)
567 if (!is_unique) {
568 if (IsSameObject(compiler::NullObject(), object)) {
569 mov(dst, NULL_REG);
570 return;
571 }
572 if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
573 AddImmediate(dst, NULL_REG, kTrueOffsetFromNull);
574 return;
575 }
576 if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
577 AddImmediate(dst, NULL_REG, kFalseOffsetFromNull);
578 return;
579 }
580 word offset = 0;
581 if (target::CanLoadFromThread(object, &offset)) {
582 ldr(dst, Address(THR, offset));
583 return;
584 }
585 if (target::IsSmi(object)) {
586 LoadImmediate(dst, target::ToRawSmi(object));
587 return;
588 }
589 }
590 RELEASE_ASSERT(CanLoadFromObjectPool(object));
591 const intptr_t index =
592 is_unique ? object_pool_builder().AddObject(
593 object, ObjectPoolBuilderEntry::kPatchable)
594 : object_pool_builder().FindObject(
595 object, ObjectPoolBuilderEntry::kNotPatchable);
596 LoadWordFromPoolIndex(dst, index);
597}
598
599void Assembler::LoadObject(Register dst, const Object& object) {
600 LoadObjectHelper(dst, object, false);
601}
602
603void Assembler::LoadUniqueObject(Register dst, const Object& object) {
604 LoadObjectHelper(dst, object, true);
605}
606
607void Assembler::LoadFromStack(Register dst, intptr_t depth) {
608 ASSERT(depth >= 0);
609 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
610}
611
612void Assembler::StoreToStack(Register src, intptr_t depth) {
613 ASSERT(depth >= 0);
614 StoreToOffset(src, SPREG, depth * target::kWordSize);
615}
616
617void Assembler::CompareToStack(Register src, intptr_t depth) {
618 LoadFromStack(TMP, depth);
619 CompareRegisters(src, TMP);
620}
621
622void Assembler::CompareObject(Register reg, const Object& object) {
623 ASSERT(IsOriginalObject(object));
624 if (IsSameObject(compiler::NullObject(), object)) {
625 CompareObjectRegisters(reg, NULL_REG);
626 } else if (target::IsSmi(object)) {
627 CompareImmediate(reg, target::ToRawSmi(object), kObjectBytes);
628 } else {
629 LoadObject(TMP, object);
630 CompareObjectRegisters(reg, TMP);
631 }
632}
633
634void Assembler::LoadImmediate(Register reg, int64_t imm) {
635 // Is it 0?
636 if (imm == 0) {
637 movz(reg, Immediate(0), 0);
638 return;
639 }
640
641 // Can we use one orri operation?
642 Operand op;
643 Operand::OperandType ot;
644 ot = Operand::CanHold(imm, kXRegSizeInBits, &op);
645 if (ot == Operand::BitfieldImm) {
646 orri(reg, ZR, Immediate(imm));
647 return;
648 }
649
650 // We may fall back on movz, movk, movn.
651 const uint32_t w0 = Utils::Low32Bits(imm);
652 const uint32_t w1 = Utils::High32Bits(imm);
653 const uint16_t h0 = Utils::Low16Bits(w0);
654 const uint16_t h1 = Utils::High16Bits(w0);
655 const uint16_t h2 = Utils::Low16Bits(w1);
656 const uint16_t h3 = Utils::High16Bits(w1);
657
658 // Special case for w1 == 0xffffffff
659 if (w1 == 0xffffffff) {
660 if (h1 == 0xffff) {
661 movn(reg, Immediate(~h0), 0);
662 } else {
663 movn(reg, Immediate(~h1), 1);
664 movk(reg, Immediate(h0), 0);
665 }
666 return;
667 }
668
669 // Special case for h3 == 0xffff
670 if (h3 == 0xffff) {
671 // We know h2 != 0xffff.
672 movn(reg, Immediate(~h2), 2);
673 if (h1 != 0xffff) {
674 movk(reg, Immediate(h1), 1);
675 }
676 if (h0 != 0xffff) {
677 movk(reg, Immediate(h0), 0);
678 }
679 return;
680 }
681
682 // Use constant pool if allowed, unless we can load imm with 2 instructions.
683 if ((w1 != 0) && constant_pool_allowed()) {
684 const intptr_t index = object_pool_builder().FindImmediate(imm);
685 LoadWordFromPoolIndex(reg, index);
686 return;
687 }
688
689 bool initialized = false;
690 if (h0 != 0) {
691 movz(reg, Immediate(h0), 0);
692 initialized = true;
693 }
694 if (h1 != 0) {
695 if (initialized) {
696 movk(reg, Immediate(h1), 1);
697 } else {
698 movz(reg, Immediate(h1), 1);
699 initialized = true;
700 }
701 }
702 if (h2 != 0) {
703 if (initialized) {
704 movk(reg, Immediate(h2), 2);
705 } else {
706 movz(reg, Immediate(h2), 2);
707 initialized = true;
708 }
709 }
710 if (h3 != 0) {
711 if (initialized) {
712 movk(reg, Immediate(h3), 3);
713 } else {
714 movz(reg, Immediate(h3), 3);
715 }
716 }
717}
718
719void Assembler::LoadSImmediate(VRegister vd, float imms) {
720 int32_t imm32 = bit_cast<int32_t, float>(imms);
721 if (imm32 == 0) {
722 veor(vd, vd, vd);
723 } else if (constant_pool_allowed()) {
724 intptr_t index = object_pool_builder().FindImmediate(imm32);
725 intptr_t offset = target::ObjectPool::element_offset(index);
726 LoadSFromOffset(vd, PP, offset);
727 } else {
728 LoadImmediate(TMP, imm32);
729 fmovsr(vd, TMP);
730 }
731}
732
733void Assembler::LoadDImmediate(VRegister vd, double immd) {
734 if (fmovdi(vd, immd)) return;
735
736 int64_t imm64 = bit_cast<int64_t, double>(immd);
737 if (imm64 == 0) {
738 veor(vd, vd, vd);
739 } else if (constant_pool_allowed()) {
740 intptr_t index = object_pool_builder().FindImmediate64(imm64);
741 intptr_t offset = target::ObjectPool::element_offset(index);
742 LoadDFromOffset(vd, PP, offset);
743 } else {
744 LoadImmediate(TMP, imm64);
745 fmovdr(vd, TMP);
746 }
747}
748
749void Assembler::LoadQImmediate(VRegister vd, simd128_value_t immq) {
750 ASSERT(constant_pool_allowed());
751 intptr_t index = object_pool_builder().FindImmediate128(immq);
752 intptr_t offset = target::ObjectPool::element_offset(index);
753 LoadQFromOffset(vd, PP, offset);
754}
755
756void Assembler::BranchLink(intptr_t target_code_pool_index,
757 CodeEntryKind entry_kind) {
758 CLOBBERS_LR({
759 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
760 // We don't actually use CODE_REG in the callee and caller might
761 // be using CODE_REG for a live value (e.g. a value that is alive
762 // across invocation of a shared stub like the one we use for
763 // allocating Mint boxes).
764 const Register code_reg = FLAG_precompiled_mode ? LR : CODE_REG;
765 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
766 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
767 });
768}
769
770void Assembler::BranchLink(
771 const Code& target,
772 ObjectPoolBuilderEntry::Patchability patchable,
773 CodeEntryKind entry_kind,
774 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
775 const intptr_t index = object_pool_builder().FindObject(
776 ToObject(target), patchable, snapshot_behavior);
777 BranchLink(index, entry_kind);
778}
779
780void Assembler::BranchLinkWithEquivalence(const Code& target,
781 const Object& equivalence,
782 CodeEntryKind entry_kind) {
783 const intptr_t index =
784 object_pool_builder().FindObject(ToObject(target), equivalence);
785 BranchLink(index, entry_kind);
786}
787
788void Assembler::AddImmediate(Register dest,
789 Register rn,
790 int64_t imm,
791 OperandSize sz) {
792 ASSERT(sz == kEightBytes || sz == kFourBytes);
794 Operand op;
795 if (imm == 0) {
796 if (dest != rn) {
797 mov(dest, rn);
798 }
799 return;
800 }
801 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
802 add(dest, rn, op, sz);
803 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
804 Operand::Immediate) {
805 sub(dest, rn, op, sz);
806 } else {
807 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
808 ASSERT(rn != TMP2);
809 LoadImmediate(TMP2, imm);
810 add(dest, rn, Operand(TMP2), sz);
811 }
812}
813
814void Assembler::AddImmediateSetFlags(Register dest,
815 Register rn,
816 int64_t imm,
817 OperandSize sz) {
818 ASSERT(sz == kEightBytes || sz == kFourBytes);
820 Operand op;
821 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
822 // Handles imm == kMinInt64.
823 adds(dest, rn, op, sz);
824 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
825 Operand::Immediate) {
826 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
827 subs(dest, rn, op, sz);
828 } else {
829 // TODO(zra): Try adding top 12 bits, then bottom 12 bits.
830 ASSERT(rn != TMP2);
831 LoadImmediate(TMP2, imm);
832 adds(dest, rn, Operand(TMP2), sz);
833 }
834}
835
836void Assembler::SubImmediateSetFlags(Register dest,
837 Register rn,
838 int64_t imm,
839 OperandSize sz) {
840 ASSERT(sz == kEightBytes || sz == kFourBytes);
842 Operand op;
843 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
844 // Handles imm == kMinInt64.
845 subs(dest, rn, op, sz);
846 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
847 Operand::Immediate) {
848 ASSERT(imm != kMinInt64); // Would cause erroneous overflow detection.
849 adds(dest, rn, op, sz);
850 } else {
851 // TODO(zra): Try subtracting top 12 bits, then bottom 12 bits.
852 ASSERT(rn != TMP2);
853 LoadImmediate(TMP2, imm);
854 subs(dest, rn, Operand(TMP2), sz);
855 }
856}
857
858void Assembler::AndImmediate(Register rd,
859 Register rn,
860 int64_t imm,
861 OperandSize sz) {
862 ASSERT(sz == kEightBytes || sz == kFourBytes);
864 Operand imm_op;
865 if (Operand::IsImmLogical(imm, width, &imm_op)) {
866 andi(rd, rn, Immediate(imm), sz);
867 } else {
868 LoadImmediate(TMP, imm);
869 and_(rd, rn, Operand(TMP), sz);
870 }
871}
872
873void Assembler::OrImmediate(Register rd,
874 Register rn,
875 int64_t imm,
876 OperandSize sz) {
877 ASSERT(sz == kEightBytes || sz == kFourBytes);
879 Operand imm_op;
880 if (Operand::IsImmLogical(imm, width, &imm_op)) {
881 orri(rd, rn, Immediate(imm), sz);
882 } else {
883 LoadImmediate(TMP, imm);
884 orr(rd, rn, Operand(TMP), sz);
885 }
886}
887
888void Assembler::XorImmediate(Register rd,
889 Register rn,
890 int64_t imm,
891 OperandSize sz) {
892 ASSERT(sz == kEightBytes || sz == kFourBytes);
894 Operand imm_op;
895 if (Operand::IsImmLogical(imm, width, &imm_op)) {
896 eori(rd, rn, Immediate(imm), sz);
897 } else {
898 LoadImmediate(TMP, imm);
899 eor(rd, rn, Operand(TMP), sz);
900 }
901}
902
903void Assembler::TestImmediate(Register rn, int64_t imm, OperandSize sz) {
904 ASSERT(sz == kEightBytes || sz == kFourBytes);
906 Operand imm_op;
907 if (Operand::IsImmLogical(imm, width, &imm_op)) {
908 tsti(rn, Immediate(imm), sz);
909 } else {
910 LoadImmediate(TMP, imm);
911 tst(rn, Operand(TMP), sz);
912 }
913}
914
915void Assembler::CompareImmediate(Register rn, int64_t imm, OperandSize sz) {
916 ASSERT(sz == kEightBytes || sz == kFourBytes);
918 Operand op;
919 if (Operand::CanHold(imm, width, &op) == Operand::Immediate) {
920 cmp(rn, op, sz);
921 } else if (Operand::CanHold(-static_cast<uint64_t>(imm), width, &op) ==
922 Operand::Immediate) {
923 cmn(rn, op, sz);
924 } else {
925 ASSERT(rn != TMP2);
926 LoadImmediate(TMP2, imm);
927 cmp(rn, Operand(TMP2), sz);
928 }
929}
930
931Address Assembler::PrepareLargeOffset(Register base,
932 int32_t offset,
933 OperandSize sz,
934 Address::AddressType addr_type) {
935 ASSERT(addr_type == Address::AddressType::Offset ||
936 addr_type == Address::AddressType::PairOffset);
937 if (Address::CanHoldOffset(offset, addr_type, sz)) {
938 return Address(base, offset, addr_type);
939 }
940 ASSERT(base != TMP2);
941 Operand op;
942 const uint32_t upper20 = offset & 0xfffff000;
943 const uint32_t lower12 = offset & 0x00000fff;
944 if ((base != CSP) &&
945 (Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
946 Address::CanHoldOffset(lower12, addr_type, sz)) {
947 add(TMP2, base, op);
948 return Address(TMP2, lower12, addr_type);
949 }
950 LoadImmediate(TMP2, offset);
951 if (addr_type == Address::AddressType::Offset) {
952 return Address(base, TMP2);
953 } else {
954 add(TMP2, TMP2, Operand(base));
955 return Address(TMP2, 0, Address::AddressType::PairOffset);
956 }
957}
958
959void Assembler::Load(Register dst, const Address& addr, OperandSize sz) {
960 if (addr.type() == Address::AddressType::Offset ||
961 addr.type() == Address::AddressType::PairOffset) {
962 ldr(dst, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()),
963 sz);
964 } else {
965 // Pass the address through unchanged.
966 ldr(dst, addr, sz);
967 }
968}
969
970void Assembler::LoadSFromOffset(VRegister dest, Register base, int32_t offset) {
971 auto const type = Address::AddressType::Offset;
972 fldrs(dest, PrepareLargeOffset(base, offset, kSWord, type));
973}
974
975void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) {
976 auto const type = Address::AddressType::Offset;
977 fldrd(dest, PrepareLargeOffset(base, offset, kDWord, type));
978}
979
980void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
981 auto const type = Address::AddressType::Offset;
982 fldrq(dest, PrepareLargeOffset(base, offset, kQWord, type));
983}
984
985void Assembler::Store(Register src, const Address& addr, OperandSize sz) {
986 if (addr.type() == Address::AddressType::Offset ||
987 addr.type() == Address::AddressType::PairOffset) {
988 str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()),
989 sz);
990 } else {
991 // Pass the address through unchanged.
992 str(src, addr, sz);
993 }
994}
995
996void Assembler::StorePairToOffset(Register low,
997 Register high,
999 int32_t offset,
1000 OperandSize sz) {
1001 auto const type = Address::AddressType::PairOffset;
1002 stp(low, high, PrepareLargeOffset(base, offset, sz, type), sz);
1003}
1004
1005void Assembler::StoreSToOffset(VRegister src, Register base, int32_t offset) {
1006 auto const type = Address::AddressType::Offset;
1007 fstrs(src, PrepareLargeOffset(base, offset, kSWord, type));
1008}
1009
1010void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) {
1011 auto const type = Address::AddressType::Offset;
1012 fstrd(src, PrepareLargeOffset(base, offset, kDWord, type));
1013}
1014
1015void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) {
1016 auto const type = Address::AddressType::Offset;
1017 fstrq(src, PrepareLargeOffset(base, offset, kQWord, type));
1018}
1019
1020void Assembler::VRecps(VRegister vd, VRegister vn) {
1021 ASSERT(vn != VTMP);
1022 ASSERT(vd != VTMP);
1023
1024 // Reciprocal estimate.
1025 vrecpes(vd, vn);
1026 // 2 Newton-Raphson steps.
1027 vrecpss(VTMP, vn, vd);
1028 vmuls(vd, vd, VTMP);
1029 vrecpss(VTMP, vn, vd);
1030 vmuls(vd, vd, VTMP);
1031}
1032
1033void Assembler::VRSqrts(VRegister vd, VRegister vn) {
1034 ASSERT(vd != VTMP);
1035 ASSERT(vn != VTMP);
1036
1037 // Reciprocal square root estimate.
1038 vrsqrtes(vd, vn);
1039 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2.
1040 // First step.
1041 vmuls(VTMP, vd, vd); // VTMP <- xn^2
1042 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2.
1043 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP
1044 // Second step.
1045 vmuls(VTMP, vd, vd);
1046 vrsqrtss(VTMP, vn, VTMP);
1047 vmuls(vd, vd, VTMP);
1048}
1049
1050#if defined(DART_COMPRESSED_POINTERS)
1051void Assembler::LoadCompressed(Register dest, const Address& slot) {
1052 Load(dest, slot, kUnsignedFourBytes); // Zero-extension.
1053 add(dest, dest, Operand(HEAP_BITS, LSL, 32));
1054}
1055#endif
1056
1057void Assembler::StoreBarrier(Register object,
1059 CanBeSmi can_be_smi,
1060 Register scratch) {
1061 const bool spill_lr = lr_state().LRContainsReturnAddress();
1062 // x.slot = x. Barrier should have be removed at the IL level.
1063 ASSERT(object != value);
1064 ASSERT(object != scratch);
1065 ASSERT(value != scratch);
1066 ASSERT(object != LINK_REGISTER);
1068 ASSERT(scratch != LINK_REGISTER);
1069 ASSERT(object != TMP2);
1070 ASSERT(value != TMP2);
1071 ASSERT(scratch != TMP2);
1072 ASSERT(scratch != kNoRegister);
1073
1074 // In parallel, test whether
1075 // - object is old and not remembered and value is new, or
1076 // - object is old and value is old and not marked and concurrent marking is
1077 // in progress
1078 // If so, call the WriteBarrier stub, which will either add object to the
1079 // store buffer (case 1) or add value to the marking stack (case 2).
1080 // Compare UntaggedObject::StorePointer.
1081 Label done;
1082 if (can_be_smi == kValueCanBeSmi) {
1083 BranchIfSmi(value, &done);
1084 } else {
1085#if defined(DEBUG)
1086 Label passed_check;
1087 BranchIfNotSmi(value, &passed_check, kNearJump);
1088 Breakpoint();
1089 Bind(&passed_check);
1090#endif
1091 }
1092 ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
1094 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1095 and_(scratch, TMP2,
1096 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1097 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1098 b(&done, ZERO);
1099
1100 if (spill_lr) {
1101 SPILLS_LR_TO_FRAME(Push(LR));
1102 }
1103 Register objectForCall = object;
1105 // Unlikely. Only non-graph intrinsics.
1106 // TODO(rmacnak): Shuffle registers in intrinsics.
1107 if (object != kWriteBarrierValueReg) {
1109 } else {
1112 objectForCall = (value == R2) ? R3 : R2;
1113 PushPair(kWriteBarrierValueReg, objectForCall);
1114 mov(objectForCall, object);
1115 }
1117 }
1118
1119 generate_invoke_write_barrier_wrapper_(objectForCall);
1120
1122 if (object != kWriteBarrierValueReg) {
1124 } else {
1125 PopPair(kWriteBarrierValueReg, objectForCall);
1126 }
1127 }
1128 if (spill_lr) {
1129 RESTORES_LR_FROM_FRAME(Pop(LR));
1130 }
1131 Bind(&done);
1132}
1133
1134void Assembler::ArrayStoreBarrier(Register object,
1135 Register slot,
1137 CanBeSmi can_be_smi,
1138 Register scratch) {
1139 const bool spill_lr = lr_state().LRContainsReturnAddress();
1140 ASSERT(object != slot);
1141 ASSERT(object != value);
1142 ASSERT(object != scratch);
1143 ASSERT(slot != value);
1144 ASSERT(slot != scratch);
1145 ASSERT(value != scratch);
1146 ASSERT(object != LINK_REGISTER);
1147 ASSERT(slot != LINK_REGISTER);
1149 ASSERT(scratch != LINK_REGISTER);
1150 ASSERT(object != TMP2);
1151 ASSERT(slot != TMP2);
1152 ASSERT(value != TMP2);
1153 ASSERT(scratch != TMP2);
1154 ASSERT(scratch != kNoRegister);
1155
1156 // In parallel, test whether
1157 // - object is old and not remembered and value is new, or
1158 // - object is old and value is old and not marked and concurrent marking is
1159 // in progress
1160 // If so, call the WriteBarrier stub, which will either add object to the
1161 // store buffer (case 1) or add value to the marking stack (case 2).
1162 // Compare UntaggedObject::StorePointer.
1163 Label done;
1164 if (can_be_smi == kValueCanBeSmi) {
1165 BranchIfSmi(value, &done);
1166 } else {
1167#if defined(DEBUG)
1168 Label passed_check;
1169 BranchIfNotSmi(value, &passed_check, kNearJump);
1170 Breakpoint();
1171 Bind(&passed_check);
1172#endif
1173 }
1174 ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
1176 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1177 and_(scratch, TMP2,
1178 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1179 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1180 b(&done, ZERO);
1181 if (spill_lr) {
1182 SPILLS_LR_TO_FRAME(Push(LR));
1183 }
1184 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1185 (slot != kWriteBarrierSlotReg)) {
1186 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1187 // from StoreIndexInstr, which gets these exact registers from the register
1188 // allocator.
1189 UNIMPLEMENTED();
1190 }
1191 generate_invoke_array_write_barrier_();
1192 if (spill_lr) {
1193 RESTORES_LR_FROM_FRAME(Pop(LR));
1194 }
1195 Bind(&done);
1196}
1197
1198void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1199 const Address& address,
1200 const Object& value,
1201 MemoryOrder memory_order,
1202 OperandSize size) {
1204 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
1207 src = NULL_REG;
1208 } else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
1209 src = ZR;
1210 } else {
1211 // Store uses TMP2 when the address cannot be fully contained in the
1212 // instruction, so TMP is safe to use as a scratch register here.
1213 src = TMP;
1214 ASSERT(object != src);
1215 LoadObject(src, value);
1216 }
1217 if (memory_order == kRelease) {
1218 StoreRelease(src, address, size);
1219 } else {
1220 Store(src, address, size);
1221 }
1222}
1223
1224void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1225 Register value) {
1226 // We can't assert the incremental barrier is not needed here, only the
1227 // generational barrier. We sometimes omit the write barrier when 'value' is
1228 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1229 // reachable via a constant pool, so it doesn't matter if it is not traced via
1230 // 'object'.
1231 Label done;
1232 BranchIfSmi(value, &done, kNearJump);
1233 ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1234 tbz(&done, TMP, target::UntaggedObject::kNewOrEvacuationCandidateBit);
1235 ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
1236 tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
1237 Stop("Write barrier is required");
1238 Bind(&done);
1239}
1240
1241void Assembler::StoreInternalPointer(Register object,
1242 const Address& dest,
1243 Register value) {
1244 str(value, dest);
1245}
1246
1247void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
1248 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1249 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
1250 ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
1251 target::UntaggedObject::kClassIdTagSize);
1252}
1253
1254void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
1255 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
1256 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
1257 ubfx(result, tags, target::UntaggedObject::kSizeTagPos,
1258 target::UntaggedObject::kSizeTagSize);
1260}
1261
1262void Assembler::LoadClassId(Register result, Register object) {
1263 ldr(result, FieldAddress(object, target::Object::tags_offset()));
1264 ExtractClassIdFromTags(result, result);
1265}
1266
1267void Assembler::LoadClassById(Register result, Register class_id) {
1268 ASSERT(result != class_id);
1269
1270 const intptr_t table_offset =
1271 target::IsolateGroup::cached_class_table_table_offset();
1272
1273 LoadIsolateGroup(result);
1274 LoadFromOffset(result, result, table_offset);
1275 ldr(result, Address(result, class_id, UXTX, Address::Scaled));
1276}
1277
1278void Assembler::CompareClassId(Register object,
1279 intptr_t class_id,
1280 Register scratch) {
1281 LoadClassId(TMP, object);
1282 CompareImmediate(TMP, class_id);
1283}
1284
1285void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
1286 ASSERT(result != object);
1287 Label done;
1288 LoadImmediate(result, kSmiCid);
1289 BranchIfSmi(object, &done);
1290 LoadClassId(result, object);
1291 Bind(&done);
1292}
1293
1294void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
1295 if (result == object) {
1296 LoadClassIdMayBeSmi(TMP, object);
1297 SmiTag(result, TMP);
1298 } else {
1299 Label done;
1300 LoadImmediate(result, target::ToRawSmi(kSmiCid));
1301 BranchIfSmi(object, &done);
1302 LoadClassId(result, object);
1303 SmiTag(result);
1304 Bind(&done);
1305 }
1306}
1307
1308void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
1309 Register src,
1310 Register scratch,
1311 bool can_be_null) {
1312#if defined(DEBUG)
1313 Comment("Check that object in register has cid %" Pd "", cid);
1314 Label matches;
1315 LoadClassIdMayBeSmi(scratch, src);
1316 CompareImmediate(scratch, cid);
1317 BranchIf(EQUAL, &matches, Assembler::kNearJump);
1318 if (can_be_null) {
1319 CompareImmediate(scratch, kNullCid);
1320 BranchIf(EQUAL, &matches, Assembler::kNearJump);
1321 }
1322 Breakpoint();
1323 Bind(&matches);
1324#endif
1325}
1326
1327// Frame entry and exit.
1328void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1329 // Reserve space for arguments and align frame before entering
1330 // the C++ world.
1331 if (frame_space != 0) {
1332 AddImmediate(SP, -frame_space);
1333 }
1334 if (OS::ActivationFrameAlignment() > 1) {
1335 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1336 }
1337}
1338
1339void Assembler::EmitEntryFrameVerification() {
1340#if defined(DEBUG)
1341 Label done;
1342 ASSERT(!constant_pool_allowed());
1343 LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
1345 add(TMP, TMP, Operand(FPREG));
1346 cmp(TMP, Operand(SPREG));
1347 b(&done, EQ);
1348
1349 Breakpoint();
1350
1351 Bind(&done);
1352#endif
1353}
1354
1355void Assembler::RestoreCodePointer() {
1356 ldr(CODE_REG,
1357 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1358 CheckCodePointer();
1359}
1360
1361void Assembler::RestorePinnedRegisters() {
1362 ldr(HEAP_BITS,
1363 compiler::Address(THR, target::Thread::write_barrier_mask_offset()));
1364 LslImmediate(HEAP_BITS, HEAP_BITS, 32);
1365 ldr(NULL_REG, compiler::Address(THR, target::Thread::object_null_offset()));
1366#if defined(DART_COMPRESSED_POINTERS)
1367 ldr(TMP, compiler::Address(THR, target::Thread::heap_base_offset()));
1368 orr(HEAP_BITS, HEAP_BITS, Operand(TMP, LSR, 32));
1369#endif
1370}
1371
1372void Assembler::SetupGlobalPoolAndDispatchTable() {
1373 ASSERT(FLAG_precompiled_mode);
1374 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1375 sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
1377 Address(THR, target::Thread::dispatch_table_array_offset()));
1378}
1379
1380void Assembler::CheckCodePointer() {
1381#ifdef DEBUG
1382 if (!FLAG_check_code_pointer) {
1383 return;
1384 }
1385 Comment("CheckCodePointer");
1386 Label cid_ok, instructions_ok;
1387 Push(R0);
1388 CompareClassId(CODE_REG, kCodeCid);
1389 b(&cid_ok, EQ);
1390 brk(0);
1391 Bind(&cid_ok);
1392
1393 const intptr_t entry_offset =
1395 adr(R0, Immediate(-entry_offset));
1396 ldr(TMP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1397 cmp(R0, Operand(TMP));
1398 b(&instructions_ok, EQ);
1399 brk(1);
1400 Bind(&instructions_ok);
1401 Pop(R0);
1402#endif
1403}
1404
1405// The ARM64 ABI requires at all times
1406// - stack limit < CSP <= stack base
1407// - CSP mod 16 = 0
1408// - we do not access stack memory below CSP
1409// Practically, this means we need to keep the C stack pointer ahead of the
1410// Dart stack pointer and 16-byte aligned for signal handlers. We set
1411// CSP to a value near the stack limit during SetupDartSP*, and use a different
1412// register within our generated code to avoid the alignment requirement.
1413// Note that Fuchsia does not have signal handlers.
1414
1415void Assembler::SetupDartSP(intptr_t reserve /* = 4096 */) {
1416 mov(SP, CSP);
1417 // The caller doesn't have a Thread available. Just kick CSP forward a bit.
1418 AddImmediate(CSP, CSP, -Utils::RoundUp(reserve, 16));
1419}
1420
1421void Assembler::SetupCSPFromThread(Register thr) {
1422 // Thread::saved_stack_limit_ is OSThread::overflow_stack_limit(), which is
1423 // OSThread::stack_limit() with some headroom. Set CSP a bit below this value
1424 // so that signal handlers won't stomp on the stack of Dart code that pushs a
1425 // bit past overflow_stack_limit before its next overflow check. (We build
1426 // frames before doing an overflow check.)
1427 ldr(TMP, Address(thr, target::Thread::saved_stack_limit_offset()));
1428 AddImmediate(CSP, TMP, -4096);
1429
1430 // TODO(47824): This will probably cause signal handlers on Windows to crash.
1431 // Windows requires the stack to grow in order, one page at a time, but
1432 // pushing CSP to near the stack limit likely skips over many pages.
1433}
1434
1435void Assembler::RestoreCSP() {
1436 mov(CSP, SP);
1437}
1438
1439void Assembler::SetReturnAddress(Register value) {
1440 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
1441}
1442
1443void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1444 AsrImmediate(reg, reg, shift);
1445}
1446
1447void Assembler::CompareWords(Register reg1,
1448 Register reg2,
1449 intptr_t offset,
1451 Register temp,
1452 Label* equals) {
1453 Label loop;
1454
1455 AddImmediate(reg1, offset - kHeapObjectTag);
1456 AddImmediate(reg2, offset - kHeapObjectTag);
1457
1459 Bind(&loop);
1460 BranchIfZero(count, equals, Assembler::kNearJump);
1461 AddImmediate(count, -1);
1462 ldr(temp, Address(reg1, 8, Address::PostIndex));
1463 ldr(TMP, Address(reg2, 8, Address::PostIndex));
1464 cmp(temp, Operand(TMP));
1465 BranchIf(EQUAL, &loop, Assembler::kNearJump);
1466}
1467
1468void Assembler::EnterFrame(intptr_t frame_size) {
1469 SPILLS_LR_TO_FRAME(PushPair(FP, LR)); // low: FP, high: LR.
1470 mov(FP, SP);
1471
1472 if (frame_size > 0) {
1473 sub(SP, SP, Operand(frame_size));
1474 }
1475}
1476
1477void Assembler::LeaveFrame() {
1478 mov(SP, FP);
1479 RESTORES_LR_FROM_FRAME(PopPair(FP, LR)); // low: FP, high: LR.
1480}
1481
1482void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
1483 ASSERT(!constant_pool_allowed());
1484 // Setup the frame.
1485 EnterFrame(0);
1486
1487 if (!FLAG_precompiled_mode) {
1488 TagAndPushPPAndPcMarker(); // Save PP and PC marker.
1489
1490 // Load the pool pointer.
1491 if (new_pp == kNoRegister) {
1492 LoadPoolPointer();
1493 } else {
1494 mov(PP, new_pp);
1495 }
1496 }
1497 set_constant_pool_allowed(true);
1498
1499 // Reserve space.
1500 if (frame_size > 0) {
1501 AddImmediate(SP, -frame_size);
1502 }
1503}
1504
1505// On entry to a function compiled for OSR, the caller's frame pointer, the
1506// stack locals, and any copied parameters are already in place. The frame
1507// pointer is already set up. The PC marker is not correct for the
1508// optimized function and there may be extra space for spill slots to
1509// allocate. We must also set up the pool pointer for the function.
1510void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
1511 ASSERT(!constant_pool_allowed());
1512 Comment("EnterOsrFrame");
1513 RestoreCodePointer();
1514 LoadPoolPointer();
1515
1516 if (extra_size > 0) {
1517 AddImmediate(SP, -extra_size);
1518 }
1519}
1520
1521void Assembler::LeaveDartFrame() {
1522 if (!FLAG_precompiled_mode) {
1523 // Restore and untag PP.
1524 LoadFromOffset(
1525 PP, FP,
1526 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
1527 sub(PP, PP, Operand(kHeapObjectTag));
1528 }
1529 set_constant_pool_allowed(false);
1530 LeaveFrame();
1531}
1532
1533void Assembler::EnterFullSafepoint(Register state) {
1534 // We generate the same number of instructions whether or not the slow-path is
1535 // forced. This simplifies GenerateJitCallbackTrampolines.
1536 // For TSAN, we always go to the runtime so TSAN is aware of the release
1537 // semantics of entering the safepoint.
1538
1539 Register addr = TMP2;
1540 ASSERT(addr != state);
1541
1542 Label slow_path, done, retry;
1543 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
1544 b(&slow_path);
1545 }
1546
1547 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1548 add(addr, THR, Operand(addr));
1549 Bind(&retry);
1550 ldxr(state, addr);
1551 cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
1552 b(&slow_path, NE);
1553
1554 movz(state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
1555 stxr(TMP, state, addr);
1556 cbz(&done, TMP); // 0 means stxr was successful.
1557
1558 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
1559 b(&retry);
1560 }
1561
1562 Bind(&slow_path);
1563 ldr(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
1564 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1565 blr(addr);
1566
1567 Bind(&done);
1568}
1569
1570void Assembler::TransitionGeneratedToNative(Register destination,
1571 Register new_exit_frame,
1572 Register new_exit_through_ffi,
1573 bool enter_safepoint) {
1574 // Save exit frame information to enable stack walking.
1575 StoreToOffset(new_exit_frame, THR,
1576 target::Thread::top_exit_frame_info_offset());
1577
1578 StoreToOffset(new_exit_through_ffi, THR,
1579 target::Thread::exit_through_ffi_offset());
1580 Register tmp = new_exit_through_ffi;
1581
1582 // Mark that the thread is executing native code.
1583 StoreToOffset(destination, THR, target::Thread::vm_tag_offset());
1584 LoadImmediate(tmp, target::Thread::native_execution_state());
1585 StoreToOffset(tmp, THR, target::Thread::execution_state_offset());
1586
1587 if (enter_safepoint) {
1588 EnterFullSafepoint(tmp);
1589 }
1590}
1591
1592void Assembler::ExitFullSafepoint(Register state,
1593 bool ignore_unwind_in_progress) {
1594 // We generate the same number of instructions whether or not the slow-path is
1595 // forced, for consistency with EnterFullSafepoint.
1596 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
1597 // semantics of leaving the safepoint.
1598 Register addr = TMP2;
1599 ASSERT(addr != state);
1600
1601 Label slow_path, done, retry;
1602 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
1603 b(&slow_path);
1604 }
1605
1606 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1607 add(addr, THR, Operand(addr));
1608 Bind(&retry);
1609 ldxr(state, addr);
1610 cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
1611 b(&slow_path, NE);
1612
1613 movz(state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
1614 stxr(TMP, state, addr);
1615 cbz(&done, TMP); // 0 means stxr was successful.
1616
1617 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
1618 b(&retry);
1619 }
1620
1621 Bind(&slow_path);
1622 if (ignore_unwind_in_progress) {
1623 ldr(addr,
1624 Address(THR,
1625 target::Thread::
1626 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
1627 } else {
1628 ldr(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
1629 }
1630 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1631 blr(addr);
1632
1633 Bind(&done);
1634}
1635
1636void Assembler::TransitionNativeToGenerated(Register state,
1637 bool exit_safepoint,
1638 bool ignore_unwind_in_progress,
1639 bool set_tag) {
1640 if (exit_safepoint) {
1641 ExitFullSafepoint(state, ignore_unwind_in_progress);
1642 } else {
1643 // flag only makes sense if we are leaving safepoint
1644 ASSERT(!ignore_unwind_in_progress);
1645#if defined(DEBUG)
1646 // Ensure we've already left the safepoint.
1647 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
1648 LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
1649 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
1650 and_(TMP, TMP, Operand(state));
1651 Label ok;
1652 cbz(&ok, TMP);
1653 Breakpoint();
1654 Bind(&ok);
1655#endif
1656 }
1657
1658 // Mark that the thread is executing Dart code.
1659 if (set_tag) {
1660 LoadImmediate(state, target::Thread::vm_tag_dart_id());
1661 StoreToOffset(state, THR, target::Thread::vm_tag_offset());
1662 }
1663 LoadImmediate(state, target::Thread::generated_execution_state());
1664 StoreToOffset(state, THR, target::Thread::execution_state_offset());
1665
1666 // Reset exit frame information in Isolate's mutator thread structure.
1667 StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
1668 LoadImmediate(state, 0);
1669 StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
1670}
1671
1672void Assembler::CallRuntime(const RuntimeEntry& entry,
1673 intptr_t argument_count) {
1674 ASSERT(!entry.is_leaf());
1675 // Argument count is not checked here, but in the runtime entry for a more
1676 // informative error message.
1677 ldr(R5, compiler::Address(THR, entry.OffsetFromThread()));
1678 LoadImmediate(R4, argument_count);
1679 Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1680}
1681
1682// FPU: Only the bottom 64-bits of v8-v15 are preserved by the caller. The upper
1683// bits might be in use by Dart, so we save the whole register.
1684static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
1686
1687#undef __
1688#define __ assembler_->
1689
1690LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1691 intptr_t frame_size,
1692 bool preserve_registers)
1693 : assembler_(assembler), preserve_registers_(preserve_registers) {
1694 __ Comment("EnterCallRuntimeFrame");
1695 __ EnterFrame(0);
1696
1697 if (preserve_registers) {
1698 __ PushRegisters(kRuntimeCallSavedRegisters);
1699 } else {
1700 // These registers must always be preserved.
1707 }
1708
1709 __ ReserveAlignedFrameSpace(frame_size);
1710}
1711
1712void LeafRuntimeScope::Call(const RuntimeEntry& entry,
1713 intptr_t argument_count) {
1714 ASSERT(argument_count == entry.argument_count());
1715 // Since we are entering C++ code, we must restore the C stack pointer from
1716 // the stack limit to an aligned value nearer to the top of the stack.
1717 // We cache the stack limit in callee-saved registers, then align and call,
1718 // restoring CSP and SP on return from the call.
1719 // This sequence may occur in an intrinsic, so don't use registers an
1720 // intrinsic must preserve.
1721 __ mov(CSP, SP);
1722 __ ldr(TMP, compiler::Address(THR, entry.OffsetFromThread()));
1723 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1724 __ blr(TMP);
1725 __ LoadImmediate(TMP, VMTag::kDartTagId);
1726 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1727 __ SetupCSPFromThread(THR);
1728}
1729
1730LeafRuntimeScope::~LeafRuntimeScope() {
1731 if (preserve_registers_) {
1732 // SP might have been modified to reserve space for arguments
1733 // and ensure proper alignment of the stack frame.
1734 // We need to restore it before restoring registers.
1735 const intptr_t kPushedRegistersSize =
1736 kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
1737 kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize;
1738 __ AddImmediate(SP, FP, -kPushedRegistersSize);
1739 __ PopRegisters(kRuntimeCallSavedRegisters);
1740 }
1741
1742 __ LeaveFrame();
1743}
1744
1745// For use by LR related macros (e.g. CLOBBERS_LR).
1746#undef __
1747#define __ this->
1748
1749void Assembler::EnterStubFrame() {
1750 EnterDartFrame(0);
1751}
1752
1753void Assembler::LeaveStubFrame() {
1754 LeaveDartFrame();
1755}
1756
1757void Assembler::EnterCFrame(intptr_t frame_space) {
1758 // Already saved.
1764
1765 Push(FP);
1766 mov(FP, SP);
1767 ReserveAlignedFrameSpace(frame_space);
1768}
1769
1770void Assembler::LeaveCFrame() {
1771 mov(SP, FP);
1772 Pop(FP);
1773}
1774
1775// R0 receiver, R5 ICData entries array
1776// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
1777void Assembler::MonomorphicCheckedEntryJIT() {
1778 has_monomorphic_entry_ = true;
1779 const bool saved_use_far_branches = use_far_branches();
1780 set_use_far_branches(false);
1781 const intptr_t start = CodeSize();
1782
1783 Label immediate, miss;
1784 Bind(&miss);
1785 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1786 br(IP0);
1787
1788 Comment("MonomorphicCheckedEntry");
1789 ASSERT_EQUAL(CodeSize() - start,
1790 target::Instructions::kMonomorphicEntryOffsetJIT);
1791
1792 const intptr_t cid_offset = target::Array::element_offset(0);
1793 const intptr_t count_offset = target::Array::element_offset(1);
1794
1795 // Sadly this cannot use ldp because ldp requires aligned offsets.
1796 ldr(R1, FieldAddress(R5, cid_offset), kObjectBytes);
1797 ldr(R2, FieldAddress(R5, count_offset), kObjectBytes);
1798 LoadClassIdMayBeSmi(IP0, R0);
1799 add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
1800 cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
1801 b(&miss, NE);
1802 str(R2, FieldAddress(R5, count_offset), kObjectBytes);
1803 LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction
1804
1805 // Fall through to unchecked entry.
1806 ASSERT_EQUAL(CodeSize() - start,
1807 target::Instructions::kPolymorphicEntryOffsetJIT);
1808
1809 set_use_far_branches(saved_use_far_branches);
1810}
1811
1812// R0 receiver, R5 guarded cid as Smi.
1813// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
1814void Assembler::MonomorphicCheckedEntryAOT() {
1815 has_monomorphic_entry_ = true;
1816 bool saved_use_far_branches = use_far_branches();
1817 set_use_far_branches(false);
1818
1819 const intptr_t start = CodeSize();
1820
1821 Label immediate, miss;
1822 Bind(&miss);
1823 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1824 br(IP0);
1825
1826 Comment("MonomorphicCheckedEntry");
1827 ASSERT_EQUAL(CodeSize() - start,
1828 target::Instructions::kMonomorphicEntryOffsetAOT);
1829 LoadClassId(IP0, R0);
1830 cmp(R5, Operand(IP0, LSL, 1), kObjectBytes);
1831 b(&miss, NE);
1832
1833 // Fall through to unchecked entry.
1834 ASSERT_EQUAL(CodeSize() - start,
1835 target::Instructions::kPolymorphicEntryOffsetAOT);
1836
1837 set_use_far_branches(saved_use_far_branches);
1838}
1839
1840void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
1841 has_monomorphic_entry_ = true;
1842 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
1843 brk(0);
1844 }
1845 b(label);
1846 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
1847 brk(0);
1848 }
1849}
1850
1852 // hash += other_hash
1853 add(hash, hash, Operand(other), kFourBytes);
1854 // hash += hash << 10
1855 add(hash, hash, Operand(hash, LSL, 10), kFourBytes);
1856 // hash ^= hash >> 6
1857 eor(hash, hash, Operand(hash, LSR, 6), kFourBytes);
1858}
1859
1860void Assembler::FinalizeHashForSize(intptr_t bit_size,
1861 Register hash,
1862 Register scratch) {
1863 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
1864 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
1865 // reasonably expect that the returned values fill the entire bit space.
1866 ASSERT(bit_size <= kBitsPerInt32);
1867 // hash += hash << 3;
1868 add(hash, hash, Operand(hash, LSL, 3), kFourBytes);
1869 // hash ^= hash >> 11; // Logical shift, unsigned hash.
1870 eor(hash, hash, Operand(hash, LSR, 11), kFourBytes);
1871 // hash += hash << 15;
1872 if (bit_size < kBitsPerInt32) {
1873 add(hash, hash, Operand(hash, LSL, 15), kFourBytes);
1874 // Size to fit.
1875 andis(hash, hash, Immediate(Utils::NBitMask(bit_size)));
1876 } else {
1877 adds(hash, hash, Operand(hash, LSL, 15), kFourBytes);
1878 }
1879 // return (hash == 0) ? 1 : hash;
1880 cinc(hash, hash, ZERO);
1881}
1882
1883#ifndef PRODUCT
1884void Assembler::MaybeTraceAllocation(intptr_t cid,
1885 Label* trace,
1886 Register temp_reg,
1887 JumpDistance distance) {
1888 ASSERT(cid > 0);
1889
1890 LoadIsolateGroup(temp_reg);
1891 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1892 ldr(temp_reg,
1893 Address(temp_reg,
1894 target::ClassTable::allocation_tracing_state_table_offset()));
1895 LoadFromOffset(temp_reg, temp_reg,
1896 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid),
1898 cbnz(trace, temp_reg);
1899}
1900
1901void Assembler::MaybeTraceAllocation(Register cid,
1902 Label* trace,
1903 Register temp_reg,
1904 JumpDistance distance) {
1905 ASSERT(temp_reg != cid);
1906 LoadIsolateGroup(temp_reg);
1907 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1908 ldr(temp_reg,
1909 Address(temp_reg,
1910 target::ClassTable::allocation_tracing_state_table_offset()));
1911 AddRegisters(temp_reg, cid);
1912 LoadFromOffset(temp_reg, temp_reg,
1913 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
1915 cbnz(trace, temp_reg);
1916}
1917#endif // !PRODUCT
1918
1919void Assembler::TryAllocateObject(intptr_t cid,
1920 intptr_t instance_size,
1921 Label* failure,
1922 JumpDistance distance,
1923 Register instance_reg,
1924 Register temp_reg) {
1925 ASSERT(failure != nullptr);
1926 ASSERT(instance_size != 0);
1927 ASSERT(instance_reg != temp_reg);
1928 ASSERT(temp_reg != kNoRegister);
1929 ASSERT(Utils::IsAligned(instance_size,
1931 if (FLAG_inline_alloc &&
1933 // If this allocation is traced, program will jump to failure path
1934 // (i.e. the allocation stub) which will allocate the object and trace the
1935 // allocation call site.
1936 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
1937 RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
1938 target::Thread::end_offset());
1939 ldp(instance_reg, temp_reg,
1940 Address(THR, target::Thread::top_offset(), Address::PairOffset));
1941 // instance_reg: current top (next object start).
1942 // temp_reg: heap end
1943
1944 // TODO(koda): Protect against unsigned overflow here.
1945 AddImmediate(instance_reg, instance_size);
1946 // instance_reg: potential top (next object start).
1947 // fail if heap end unsigned less than or equal to new heap top.
1948 cmp(temp_reg, Operand(instance_reg));
1949 b(failure, LS);
1950 CheckAllocationCanary(instance_reg, temp_reg);
1951
1952 // Successfully allocated the object, now update temp to point to
1953 // next object start and store the class in the class field of object.
1954 str(instance_reg, Address(THR, target::Thread::top_offset()));
1955 // Move instance_reg back to the start of the object and tag it.
1956 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
1957
1958 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
1959 LoadImmediate(temp_reg, tags);
1960 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
1961 } else {
1962 b(failure);
1963 }
1964}
1965
1966void Assembler::TryAllocateArray(intptr_t cid,
1967 intptr_t instance_size,
1968 Label* failure,
1970 Register end_address,
1971 Register temp1,
1972 Register temp2) {
1973 if (FLAG_inline_alloc &&
1975 // If this allocation is traced, program will jump to failure path
1976 // (i.e. the allocation stub) which will allocate the object and trace the
1977 // allocation call site.
1978 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
1979 // Potential new object start.
1980 ldr(instance, Address(THR, target::Thread::top_offset()));
1981 AddImmediateSetFlags(end_address, instance, instance_size);
1982 b(failure, CS); // Fail on unsigned overflow.
1983
1984 // Check if the allocation fits into the remaining space.
1985 // instance: potential new object start.
1986 // end_address: potential next object start.
1987 ldr(temp2, Address(THR, target::Thread::end_offset()));
1988 cmp(end_address, Operand(temp2));
1989 b(failure, CS);
1990 CheckAllocationCanary(instance, temp2);
1991
1992 // Successfully allocated the object(s), now update top to point to
1993 // next object start and initialize the object.
1994 str(end_address, Address(THR, target::Thread::top_offset()));
1995 add(instance, instance, Operand(kHeapObjectTag));
1996 NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
1997
1998 // Initialize the tags.
1999 // instance: new object start as a tagged pointer.
2000 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2001 LoadImmediate(temp2, tags);
2002 str(temp2, FieldAddress(instance, target::Object::tags_offset()));
2003 } else {
2004 b(failure);
2005 }
2006}
2007
2008void Assembler::CopyMemoryWords(Register src,
2009 Register dst,
2010 Register size,
2011 Register temp) {
2012 Label loop, done;
2013 __ cbz(&done, size);
2014 __ Bind(&loop);
2015 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
2016 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
2017 __ subs(size, size, Operand(target::kWordSize));
2018 __ b(&loop, NOT_ZERO);
2019 __ Bind(&done);
2020}
2021
2022void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2023 // Emit "bl <offset>".
2024 EmitUnconditionalBranchOp(BL, 0);
2025
2026 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2027 PcRelativeCallPattern::kLengthInBytes);
2028 pattern.set_distance(offset_into_target);
2029}
2030
2031void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2032 intptr_t offset_into_target) {
2033 // Emit "b <offset>".
2034 EmitUnconditionalBranchOp(B, 0);
2035 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
2036 PcRelativeTailCallPattern::kLengthInBytes);
2037 pattern.set_distance(offset_into_target);
2038}
2039
2040bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
2041 bool is_external,
2042 intptr_t cid,
2043 intptr_t index_scale) {
2044 if (!IsSafeSmi(constant)) return false;
2045 const int64_t index = target::SmiValue(constant);
2046 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2047 if (!Utils::IsInt(32, offset)) {
2048 return false;
2049 }
2050 return Address::CanHoldOffset(static_cast<int32_t>(offset), Address::Offset,
2051 Address::OperandSizeFor(cid));
2052}
2053
2054Address Assembler::ElementAddressForIntIndex(bool is_external,
2055 intptr_t cid,
2056 intptr_t index_scale,
2057 Register array,
2058 intptr_t index) const {
2059 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2060 ASSERT(Utils::IsInt(32, offset));
2061 const OperandSize size = Address::OperandSizeFor(cid);
2062 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
2063 return Address(array, static_cast<int32_t>(offset));
2064}
2065
2066void Assembler::ComputeElementAddressForIntIndex(Register address,
2067 bool is_external,
2068 intptr_t cid,
2069 intptr_t index_scale,
2070 Register array,
2071 intptr_t index) {
2072 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
2073 AddImmediate(address, array, offset);
2074}
2075
2076Address Assembler::ElementAddressForRegIndex(bool is_external,
2077 intptr_t cid,
2078 intptr_t index_scale,
2079 bool index_unboxed,
2080 Register array,
2081 Register index,
2082 Register temp) {
2083 return ElementAddressForRegIndexWithSize(
2084 is_external, cid, Address::OperandSizeFor(cid), index_scale,
2085 index_unboxed, array, index, temp);
2086}
2087
2088Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
2089 intptr_t cid,
2091 intptr_t index_scale,
2092 bool index_unboxed,
2093 Register array,
2094 Register index,
2095 Register temp) {
2096 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
2097 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
2098 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2099 const int32_t offset = HeapDataOffset(is_external, cid);
2100#if !defined(DART_COMPRESSED_POINTERS)
2101 const bool index_is_32bit = false;
2102#else
2103 const bool index_is_32bit = !index_unboxed;
2104#endif
2105 ASSERT(array != temp);
2106 ASSERT(index != temp);
2107 if ((offset == 0) && (shift == 0)) {
2108 if (index_is_32bit) {
2109 return Address(array, index, SXTW, Address::Unscaled);
2110 } else {
2111 return Address(array, index, UXTX, Address::Unscaled);
2112 }
2113 } else if (shift < 0) {
2114 ASSERT(shift == -1);
2115 if (index_is_32bit) {
2116 AsrImmediate(temp, index, 1, kFourBytes);
2117 add(temp, array, Operand(temp, SXTW, 0));
2118 } else {
2119 add(temp, array, Operand(index, ASR, 1));
2120 }
2121 } else {
2122 if (index_is_32bit) {
2123 add(temp, array, Operand(index, SXTW, shift));
2124 } else {
2125 add(temp, array, Operand(index, LSL, shift));
2126 }
2127 }
2128 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
2129 return Address(temp, offset);
2130}
2131
2132void Assembler::ComputeElementAddressForRegIndex(Register address,
2133 bool is_external,
2134 intptr_t cid,
2135 intptr_t index_scale,
2136 bool index_unboxed,
2137 Register array,
2138 Register index) {
2139 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
2140 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
2141 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2142 const int32_t offset = HeapDataOffset(is_external, cid);
2143#if !defined(DART_COMPRESSED_POINTERS)
2144 const bool index_is_32bit = false;
2145#else
2146 const bool index_is_32bit = !index_unboxed;
2147#endif
2148 if (shift == 0) {
2149 if (index_is_32bit) {
2150 add(address, array, Operand(index, SXTW, 0));
2151 } else {
2152 add(address, array, Operand(index));
2153 }
2154 } else if (shift < 0) {
2155 ASSERT(shift == -1);
2156 if (index_is_32bit) {
2157 sxtw(index, index);
2158 add(address, array, Operand(index, ASR, 1));
2159 } else {
2160 add(address, array, Operand(index, ASR, 1));
2161 }
2162 } else {
2163 if (index_is_32bit) {
2164 add(address, array, Operand(index, SXTW, shift));
2165 } else {
2166 add(address, array, Operand(index, LSL, shift));
2167 }
2168 }
2169 if (offset != 0) {
2170 AddImmediate(address, offset);
2171 }
2172}
2173
2174void Assembler::LoadStaticFieldAddress(Register address,
2175 Register field,
2176 Register scratch,
2177 bool is_shared) {
2178 LoadCompressedSmiFieldFromOffset(
2179 scratch, field, target::Field::host_offset_or_field_id_offset());
2180 const intptr_t field_table_offset =
2181 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
2182 : compiler::target::Thread::field_table_values_offset();
2183 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
2184 add(address, address,
2185 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
2186}
2187
2188#if defined(DART_COMPRESSED_POINTERS)
2189void Assembler::LoadCompressedFieldAddressForRegOffset(
2190 Register address,
2192 Register offset_in_compressed_words_as_smi) {
2193 add(address, instance,
2194 Operand(offset_in_compressed_words_as_smi, LSL,
2196 AddImmediate(address, -kHeapObjectTag);
2197}
2198#endif
2199
2200void Assembler::LoadFieldAddressForRegOffset(Register address,
2202 Register offset_in_words_as_smi) {
2203 add(address, instance,
2204 Operand(offset_in_words_as_smi, LSL,
2206 AddImmediate(address, -kHeapObjectTag);
2207}
2208
2209void Assembler::PushRegisters(const RegisterSet& regs) {
2210 VRegister vprev = kNoVRegister;
2211 // Store fpu registers with the lowest register number at the lowest
2212 // address.
2213 for (intptr_t i = kNumberOfVRegisters - 1; i >= 0; --i) {
2214 VRegister fpu_reg = static_cast<VRegister>(i);
2215 if (regs.ContainsFpuRegister(fpu_reg)) {
2216 if (vprev != kNoVRegister) {
2217 PushQuadPair(/*low=*/fpu_reg, /*high=*/vprev);
2218 vprev = kNoVRegister;
2219 } else {
2220 vprev = fpu_reg;
2221 }
2222 }
2223 }
2224 if (vprev != kNoVRegister) {
2225 PushQuad(vprev);
2226 }
2227
2228 // The order in which the registers are pushed must match the order
2229 // in which the registers are encoded in the safe point's stack map.
2231 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2232 Register reg = static_cast<Register>(i);
2233 if (regs.ContainsRegister(reg)) {
2234 if (prev != kNoRegister) {
2235 PushPair(/*low=*/reg, /*high=*/prev);
2236 prev = kNoRegister;
2237 } else {
2238 prev = reg;
2239 }
2240 }
2241 }
2242 if (prev != kNoRegister) {
2243 Push(prev);
2244 }
2245}
2246
2247void Assembler::PopRegisters(const RegisterSet& regs) {
2248 bool pop_single = (regs.CpuRegisterCount() & 1) == 1;
2250 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
2251 Register reg = static_cast<Register>(i);
2252 if (regs.ContainsRegister(reg)) {
2253 if (pop_single) {
2254 // Emit the leftover pop at the beginning instead of the end to
2255 // mirror PushRegisters.
2256 Pop(reg);
2257 pop_single = false;
2258 } else if (prev != kNoRegister) {
2259 PopPair(/*low=*/prev, /*high=*/reg);
2260 prev = kNoRegister;
2261 } else {
2262 prev = reg;
2263 }
2264 }
2265 }
2267
2268 pop_single = (regs.FpuRegisterCount() & 1) == 1;
2269 VRegister vprev = kNoVRegister;
2270 // Fpu registers have the lowest register number at the lowest address.
2271 for (intptr_t i = 0; i < kNumberOfVRegisters; ++i) {
2272 VRegister fpu_reg = static_cast<VRegister>(i);
2273 if (regs.ContainsFpuRegister(fpu_reg)) {
2274 if (pop_single) {
2275 PopQuad(fpu_reg);
2276 pop_single = false;
2277 } else if (vprev != kNoVRegister) {
2278 PopQuadPair(/*low=*/vprev, /*high=*/fpu_reg);
2279 vprev = kNoVRegister;
2280 } else {
2281 vprev = fpu_reg;
2282 }
2283 }
2284 }
2285 ASSERT(vprev == kNoVRegister);
2286}
2287
2288void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2289 // Use STP to push registers in pairs.
2290 Register pending_reg = kNoRegister;
2291 for (Register reg : regs) {
2292 if (pending_reg != kNoRegister) {
2293 PushPair(reg, pending_reg);
2294 pending_reg = kNoRegister;
2295 } else {
2296 pending_reg = reg;
2297 }
2298 }
2299 if (pending_reg != kNoRegister) {
2300 Push(pending_reg);
2301 }
2302}
2303
2304void Assembler::PushNativeCalleeSavedRegisters() {
2305 // Save the callee-saved registers.
2306 // We use str instead of the Push macro because we will be pushing the PP
2307 // register when it is not holding a pool-pointer since we are coming from
2308 // C++ code.
2311 const Register r = static_cast<Register>(i);
2312 if (prev != kNoRegister) {
2313 stp(/*low=*/r, /*high=*/prev,
2314 Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
2315 prev = kNoRegister;
2316 } else {
2317 prev = r;
2318 }
2319 }
2320 if (prev != kNoRegister) {
2321 str(prev, Address(SP, -1 * target::kWordSize, Address::PreIndex));
2322 }
2323
2324 // Save the bottom 64-bits of callee-saved V registers.
2325 VRegister vprev = kNoVRegister;
2327 const VRegister r = static_cast<VRegister>(i);
2328 if (vprev != kNoVRegister) {
2329 PushDoublePair(/*low=*/r, /*high=*/vprev);
2330 vprev = kNoVRegister;
2331 } else {
2332 vprev = r;
2333 }
2334 }
2335 if (vprev != kNoVRegister) {
2336 PushDouble(vprev);
2337 }
2338}
2339
2340void Assembler::PopNativeCalleeSavedRegisters() {
2341 // Restore the bottom 64-bits of callee-saved V registers.
2342 bool pop_single = (kAbiPreservedFpuRegCount & 1) != 0;
2343 VRegister vprev = kNoVRegister;
2345 const VRegister r = static_cast<VRegister>(i);
2346 if (pop_single) {
2347 PopDouble(r);
2348 pop_single = false;
2349 } else if (vprev != kNoVRegister) {
2350 PopDoublePair(/*low=*/vprev, /*high=*/r);
2351 vprev = kNoVRegister;
2352 } else {
2353 vprev = r;
2354 }
2355 }
2356
2357 // Restore C++ ABI callee-saved registers.
2358 // We use ldr instead of the Pop macro because we will be popping the PP
2359 // register when it is not holding a pool-pointer since we are returning to
2360 // C++ code. We also skip the dart stack pointer SP, since we are still
2361 // using it as the stack pointer.
2362 pop_single = (kAbiPreservedCpuRegCount & 1) != 0;
2365 Register r = static_cast<Register>(i);
2366 if (pop_single) {
2367 ldr(r, Address(SP, 1 * target::kWordSize, Address::PostIndex));
2368 pop_single = false;
2369 } else if (prev != kNoRegister) {
2370 ldp(/*low=*/prev, /*high=*/r,
2371 Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
2372 prev = kNoRegister;
2373 } else {
2374 prev = r;
2375 }
2376 }
2377}
2378
2379bool Assembler::CanGenerateCbzTbz(Register rn, Condition cond) {
2380 if (rn == CSP) {
2381 return false;
2382 }
2383 switch (cond) {
2384 case EQ: // equal
2385 case NE: // not equal
2386 case MI: // minus/negative
2387 case LT: // signed less than
2388 case PL: // plus/positive or zero
2389 case GE: // signed greater than or equal
2390 return true;
2391 default:
2392 return false;
2393 }
2394}
2395
2396void Assembler::GenerateCbzTbz(Register rn,
2397 Condition cond,
2398 Label* label,
2399 OperandSize sz) {
2400 ASSERT((sz == kEightBytes) || (sz == kFourBytes));
2401 const int32_t sign_bit = sz == kEightBytes ? 63 : 31;
2402 ASSERT(rn != CSP);
2403 switch (cond) {
2404 case EQ: // equal
2405 cbz(label, rn, sz);
2406 return;
2407 case NE: // not equal
2408 cbnz(label, rn, sz);
2409 return;
2410 case MI: // minus/negative
2411 case LT: // signed less than
2412 tbnz(label, rn, sign_bit);
2413 return;
2414 case PL: // plus/positive or zero
2415 case GE: // signed greater than or equal
2416 tbz(label, rn, sign_bit);
2417 return;
2418 default:
2419 // Only conditions above allow single instruction emission.
2420 UNREACHABLE();
2421 }
2422}
2423
2424void Assembler::RangeCheck(Register value,
2425 Register temp,
2426 intptr_t low,
2427 intptr_t high,
2428 RangeCheckCondition condition,
2429 Label* target) {
2430 auto cc = condition == kIfInRange ? LS : HI;
2431 Register to_check = temp != kNoRegister ? temp : value;
2432 AddImmediate(to_check, value, -low);
2433 CompareImmediate(to_check, high - low);
2434 b(target, cc);
2435}
2436
2437} // namespace compiler
2438
2439} // namespace dart
2440
2441#endif // defined(TARGET_ARCH_ARM64)
Align
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
SkPoint pos
static float next(float f)
static float prev(float f)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
bool equals(SkDrawable *a, SkDrawable *b)
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
GLenum type
static OperandSize OperandSizeFor(intptr_t cid)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define LR
Definition: constants_arm.h:32
#define LINK_REGISTER
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
static bool b
AtkStateType state
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition: fuchsia.cc:52
static float Scaled(float time, float speed, float period=0)
Definition: TimeUtils.h:27
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr intptr_t kCompressedWordSizeLog2
Definition: runtime_api.h:287
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
bool IsOriginalObject(const Object &object)
Definition: runtime_api.cc:226
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
const Bool & TrueObject()
Definition: runtime_api.cc:157
bool IsInOldSpace(const Object &obj)
Definition: runtime_api.cc:101
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
InvalidClass kSmiBits
const Object & ToObject(const Code &handle)
Definition: runtime_api.h:173
static constexpr int HeaderSize
Definition: dart_vm.cc:33
const QRegister kAbiLastPreservedFpuReg
constexpr int64_t kMinInt64
Definition: globals.h:485
const int kXRegSizeInBits
const Register kWriteBarrierSlotReg
const Register THR
static Condition InvertCondition(Condition c)
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
const VRegister VTMP
const Register NULL_REG
const RegList kAllFpuRegistersList
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
static constexpr intptr_t kTrueOffsetFromNull
@ kNullCid
Definition: class_id.h:252
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
constexpr intptr_t kBitsPerInt16
Definition: globals.h:465
intptr_t word
Definition: globals.h:500
const Register CODE_REG
@ NOT_ZERO
const Register TMP2
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
const Register kAbiLastPreservedCpuReg
@ kNumberOfVRegisters
@ kNoVRegister
const Register TMP
const int kAbiPreservedCpuRegCount
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const RegList kDartVolatileCpuRegs
const Register kAbiFirstPreservedCpuReg
const Register FPREG
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
const intptr_t cid
const Register HEAP_BITS
const int kAbiPreservedFpuRegCount
const int kWRegSizeInBits
constexpr intptr_t kWordSize
Definition: globals.h:509
const Register PP
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagShift
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
constexpr intptr_t kBitsPerInt8
Definition: globals.h:464
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
const QRegister kAbiFirstPreservedFpuReg
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:189
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
static size_t bytes_needed(int vertex_count, Flags flags, int index_count)
Definition: dl_vertices.cc:23
def matches(file)
Definition: gen_manifest.py:38
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
dest
Definition: zip.py:79
#define Pd
Definition: globals.h:408
int32_t width
SeparatedVector2 offset
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
#define NOT_IN_PRODUCT(code)
Definition: globals.h:84