Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_ARM)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13#include "vm/cpu.h"
14#include "vm/instructions.h"
15#include "vm/tags.h"
16
17// An extra check since we are assuming the existence of /proc/cpuinfo below.
18#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
19 !defined(DART_HOST_OS_IOS) && !defined(DART_HOST_OS_MACOS)
20#error ARM cross-compile only supported on Linux, Android, iOS, and Mac
21#endif
22
23// For use by LR related macros (e.g. CLOBBERS_LR).
24#define __ this->
25
26namespace dart {
27
28DECLARE_FLAG(bool, check_code_pointer);
29DECLARE_FLAG(bool, precompiled_mode);
30
31namespace compiler {
32
33Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
34 intptr_t far_branch_level)
35 : AssemblerBase(object_pool_builder),
36 use_far_branches_(far_branch_level != 0),
37 constant_pool_allowed_(false) {
38 generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
39 Call(
40 Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)),
41 cond);
42 };
43 generate_invoke_array_write_barrier_ = [&](Condition cond) {
44 Call(Address(THR, target::Thread::array_write_barrier_entry_point_offset()),
45 cond);
46 };
47}
48
49uint32_t Address::encoding3() const {
50 if (kind_ == Immediate) {
51 uint32_t offset = encoding_ & kOffset12Mask;
52 ASSERT(offset < 256);
53 return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) |
54 (offset & 0xf);
55 }
56 ASSERT(kind_ == IndexRegister);
57 return encoding_;
58}
59
60uint32_t Address::vencoding() const {
61 ASSERT(kind_ == Immediate);
62 uint32_t offset = encoding_ & kOffset12Mask;
63 ASSERT(offset < (1 << 10)); // In the range 0 to +1020.
64 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4.
65 int mode = encoding_ & ((8 | 4 | 1) << 21);
66 ASSERT((mode == Offset) || (mode == NegOffset));
67 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
68 if (mode == Offset) {
69 vencoding |= 1 << 23;
70 }
71 return vencoding;
72}
73
74void Assembler::Emit(int32_t value) {
75 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
76 buffer_.Emit<int32_t>(value);
77}
78
79void Assembler::EmitType01(Condition cond,
80 int type,
81 Opcode opcode,
82 int set_cc,
83 Register rn,
84 Register rd,
85 Operand o) {
86 ASSERT(rd != kNoRegister);
87 ASSERT(cond != kNoCondition);
88 int32_t encoding =
89 static_cast<int32_t>(cond) << kConditionShift | type << kTypeShift |
90 static_cast<int32_t>(opcode) << kOpcodeShift | set_cc << kSShift |
91 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | o.encoding();
92 Emit(encoding);
93}
94
95void Assembler::EmitType5(Condition cond, int32_t offset, bool link) {
96 ASSERT(cond != kNoCondition);
97 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
98 5 << kTypeShift | (link ? 1 : 0) << kLinkShift;
99 BailoutIfInvalidBranchOffset(offset);
100 Emit(Assembler::EncodeBranchOffset(offset, encoding));
101}
102
103void Assembler::EmitMemOp(Condition cond,
104 bool load,
105 bool byte,
106 Register rd,
107 Address ad) {
108 ASSERT(rd != kNoRegister);
109 ASSERT(cond != kNoCondition);
110 // Unpredictable, illegal on some microarchitectures.
111 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
112
113 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 |
114 (ad.kind() == Address::Immediate ? 0 : B25) |
115 (load ? L : 0) | (byte ? B : 0) | ArmEncode::Rd(rd) |
116 ad.encoding();
117 Emit(encoding);
118}
119
120void Assembler::EmitMemOpAddressMode3(Condition cond,
121 int32_t mode,
122 Register rd,
123 Address ad) {
124 ASSERT(rd != kNoRegister);
125 ASSERT(cond != kNoCondition);
126 // Unpredictable, illegal on some microarchitectures.
127 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
128
129 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode |
130 ArmEncode::Rd(rd) | ad.encoding3();
131 Emit(encoding);
132}
133
134void Assembler::EmitMultiMemOp(Condition cond,
135 BlockAddressMode am,
136 bool load,
137 Register base,
138 RegList regs) {
139 ASSERT(base != kNoRegister);
140 ASSERT(cond != kNoCondition);
141 // Unpredictable, illegal on some microarchitectures.
142 ASSERT(!Address::has_writeback(am) || !(regs & (1 << base)));
143 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
144 am | (load ? L : 0) | ArmEncode::Rn(base) | regs;
145 Emit(encoding);
146}
147
148void Assembler::EmitShiftImmediate(Condition cond,
149 Shift opcode,
150 Register rd,
151 Register rm,
152 Operand o) {
153 ASSERT(cond != kNoCondition);
154 ASSERT(o.type() == 1);
155 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
156 static_cast<int32_t>(MOV) << kOpcodeShift |
157 ArmEncode::Rd(rd) | o.encoding() << kShiftImmShift |
158 static_cast<int32_t>(opcode) << kShiftShift |
159 static_cast<int32_t>(rm);
160 Emit(encoding);
161}
162
163void Assembler::EmitShiftRegister(Condition cond,
164 Shift opcode,
165 Register rd,
166 Register rm,
167 Operand o) {
168 ASSERT(cond != kNoCondition);
169 ASSERT(o.type() == 0);
170 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
171 static_cast<int32_t>(MOV) << kOpcodeShift |
172 ArmEncode::Rd(rd) | o.encoding() << kShiftRegisterShift |
173 static_cast<int32_t>(opcode) << kShiftShift | B4 |
174 static_cast<int32_t>(rm);
175 Emit(encoding);
176}
177
178void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) {
179 EmitType01(cond, o.type(), AND, 0, rn, rd, o);
180}
181
182void Assembler::ands(Register rd, Register rn, Operand o, Condition cond) {
183 EmitType01(cond, o.type(), AND, 1, rn, rd, o);
184}
185
186void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) {
187 EmitType01(cond, o.type(), EOR, 0, rn, rd, o);
188}
189
190void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) {
191 EmitType01(cond, o.type(), SUB, 0, rn, rd, o);
192}
193
194void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) {
195 EmitType01(cond, o.type(), RSB, 0, rn, rd, o);
196}
197
198void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) {
199 EmitType01(cond, o.type(), RSB, 1, rn, rd, o);
200}
201
202void Assembler::add(Register rd, Register rn, Operand o, Condition cond) {
203 EmitType01(cond, o.type(), ADD, 0, rn, rd, o);
204}
205
206void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) {
207 EmitType01(cond, o.type(), ADD, 1, rn, rd, o);
208}
209
210void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) {
211 EmitType01(cond, o.type(), SUB, 1, rn, rd, o);
212}
213
214void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) {
215 EmitType01(cond, o.type(), ADC, 0, rn, rd, o);
216}
217
218void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) {
219 EmitType01(cond, o.type(), ADC, 1, rn, rd, o);
220}
221
222void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) {
223 EmitType01(cond, o.type(), SBC, 0, rn, rd, o);
224}
225
226void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) {
227 EmitType01(cond, o.type(), SBC, 1, rn, rd, o);
228}
229
230void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) {
231 EmitType01(cond, o.type(), RSC, 0, rn, rd, o);
232}
233
234void Assembler::tst(Register rn, Operand o, Condition cond) {
235 EmitType01(cond, o.type(), TST, 1, rn, R0, o);
236}
237
238void Assembler::teq(Register rn, Operand o, Condition cond) {
239 EmitType01(cond, o.type(), TEQ, 1, rn, R0, o);
240}
241
242void Assembler::cmp(Register rn, Operand o, Condition cond) {
243 EmitType01(cond, o.type(), CMP, 1, rn, R0, o);
244}
245
246void Assembler::cmn(Register rn, Operand o, Condition cond) {
247 EmitType01(cond, o.type(), CMN, 1, rn, R0, o);
248}
249
250void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) {
251 EmitType01(cond, o.type(), ORR, 0, rn, rd, o);
252}
253
254void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) {
255 EmitType01(cond, o.type(), ORR, 1, rn, rd, o);
256}
257
258void Assembler::mov(Register rd, Operand o, Condition cond) {
259 EmitType01(cond, o.type(), MOV, 0, R0, rd, o);
260}
261
262void Assembler::movs(Register rd, Operand o, Condition cond) {
263 EmitType01(cond, o.type(), MOV, 1, R0, rd, o);
264}
265
266void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) {
267 EmitType01(cond, o.type(), BIC, 0, rn, rd, o);
268}
269
270void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) {
271 EmitType01(cond, o.type(), BIC, 1, rn, rd, o);
272}
273
274void Assembler::mvn_(Register rd, Operand o, Condition cond) {
275 EmitType01(cond, o.type(), MVN, 0, R0, rd, o);
276}
277
278void Assembler::mvns(Register rd, Operand o, Condition cond) {
279 EmitType01(cond, o.type(), MVN, 1, R0, rd, o);
280}
281
282void Assembler::clz(Register rd, Register rm, Condition cond) {
283 ASSERT(rd != kNoRegister);
284 ASSERT(rm != kNoRegister);
285 ASSERT(cond != kNoCondition);
286 ASSERT(rd != PC);
287 ASSERT(rm != PC);
288 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
289 B22 | B21 | (0xf << 16) | ArmEncode::Rd(rd) | (0xf << 8) |
290 B4 | static_cast<int32_t>(rm);
291 Emit(encoding);
292}
293
294void Assembler::rbit(Register rd, Register rm, Condition cond) {
295 ASSERT(rd != kNoRegister);
296 ASSERT(rm != kNoRegister);
297 ASSERT(cond != kNoCondition);
298 ASSERT(rd != PC);
299 ASSERT(rm != PC);
300 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 |
301 B25 | B23 | B22 | B21 | B20 | (0xf << 16) |
302 ArmEncode::Rd(rd) | (0xf << 8) | B5 | B4 |
303 static_cast<int32_t>(rm);
304 Emit(encoding);
305}
306
307void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
308 ASSERT(cond != kNoCondition);
309 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 |
310 ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
311 (imm16 & 0xfff);
312 Emit(encoding);
313}
314
315void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
316 ASSERT(cond != kNoCondition);
317 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 |
318 B22 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
319 (imm16 & 0xfff);
320 Emit(encoding);
321}
322
323void Assembler::EmitMulOp(Condition cond,
324 int32_t opcode,
325 Register rd,
326 Register rn,
327 Register rm,
328 Register rs) {
329 ASSERT(rd != kNoRegister);
330 ASSERT(rn != kNoRegister);
331 ASSERT(rm != kNoRegister);
332 ASSERT(rs != kNoRegister);
333 ASSERT(cond != kNoCondition);
334 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) |
335 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | ArmEncode::Rs(rs) |
336 B7 | B4 | ArmEncode::Rm(rm);
337 Emit(encoding);
338}
339
340void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
341 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
342 EmitMulOp(cond, 0, R0, rd, rn, rm);
343}
344
345// Like mul, but sets condition flags.
346void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) {
347 EmitMulOp(cond, B20, R0, rd, rn, rm);
348}
349
350void Assembler::mla(Register rd,
351 Register rn,
352 Register rm,
353 Register ra,
354 Condition cond) {
355 // rd <- ra + rn * rm.
356 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
357 EmitMulOp(cond, B21, ra, rd, rn, rm);
358}
359
360void Assembler::mls(Register rd,
361 Register rn,
362 Register rm,
363 Register ra,
364 Condition cond) {
365 // rd <- ra - rn * rm.
366 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
367 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
368}
369
370void Assembler::smull(Register rd_lo,
371 Register rd_hi,
372 Register rn,
373 Register rm,
374 Condition cond) {
375 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
376 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
377}
378
379void Assembler::umull(Register rd_lo,
380 Register rd_hi,
381 Register rn,
382 Register rm,
383 Condition cond) {
384 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
385 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
386}
387
388void Assembler::umlal(Register rd_lo,
389 Register rd_hi,
390 Register rn,
391 Register rm,
392 Condition cond) {
393 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
394 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm);
395}
396
397void Assembler::umaal(Register rd_lo,
398 Register rd_hi,
399 Register rn,
400 Register rm) {
401 ASSERT(rd_lo != IP);
402 ASSERT(rd_hi != IP);
403 ASSERT(rn != IP);
404 ASSERT(rm != IP);
405 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
406 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm);
407}
408
409void Assembler::EmitDivOp(Condition cond,
410 int32_t opcode,
411 Register rd,
412 Register rn,
413 Register rm) {
414 ASSERT(TargetCPUFeatures::integer_division_supported());
415 ASSERT(rd != kNoRegister);
416 ASSERT(rn != kNoRegister);
417 ASSERT(rm != kNoRegister);
418 ASSERT(cond != kNoCondition);
419 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) |
420 (static_cast<int32_t>(rn) << kDivRnShift) |
421 (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 |
422 B24 | B20 | B15 | B14 | B13 | B12 | B4 |
423 (static_cast<int32_t>(rm) << kDivRmShift);
424 Emit(encoding);
425}
426
427void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
428 EmitDivOp(cond, 0, rd, rn, rm);
429}
430
431void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
432 EmitDivOp(cond, B21, rd, rn, rm);
433}
434
435void Assembler::ldr(Register rd, Address ad, Condition cond) {
436 EmitMemOp(cond, true, false, rd, ad);
437}
438
439void Assembler::str(Register rd, Address ad, Condition cond) {
440 EmitMemOp(cond, false, false, rd, ad);
441}
442
443void Assembler::ldrb(Register rd, Address ad, Condition cond) {
444 EmitMemOp(cond, true, true, rd, ad);
445}
446
447void Assembler::strb(Register rd, Address ad, Condition cond) {
448 EmitMemOp(cond, false, true, rd, ad);
449}
450
451void Assembler::ldrh(Register rd, Address ad, Condition cond) {
452 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
453}
454
455void Assembler::strh(Register rd, Address ad, Condition cond) {
456 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
457}
458
459void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
460 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
461}
462
463void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
464 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
465}
466
467void Assembler::ldrd(Register rd,
468 Register rd2,
469 Register rn,
470 int32_t offset,
471 Condition cond) {
472 ASSERT((rd % 2) == 0);
473 ASSERT(rd2 == rd + 1);
474 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
475}
476
477void Assembler::strd(Register rd,
478 Register rd2,
479 Register rn,
480 int32_t offset,
481 Condition cond) {
482 ASSERT((rd % 2) == 0);
483 ASSERT(rd2 == rd + 1);
484 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
485}
486
487void Assembler::ldm(BlockAddressMode am,
488 Register base,
489 RegList regs,
490 Condition cond) {
491 ASSERT(regs != 0);
492 EmitMultiMemOp(cond, am, true, base, regs);
493}
494
495void Assembler::stm(BlockAddressMode am,
496 Register base,
497 RegList regs,
498 Condition cond) {
499 ASSERT(regs != 0);
500 EmitMultiMemOp(cond, am, false, base, regs);
501}
502
503void Assembler::ldrex(Register rt, Register rn, Condition cond) {
504 ASSERT(rn != kNoRegister);
505 ASSERT(rt != kNoRegister);
506 ASSERT(cond != kNoCondition);
507 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
508 B23 | L | (static_cast<int32_t>(rn) << kLdrExRnShift) |
509 (static_cast<int32_t>(rt) << kLdrExRtShift) | B11 | B10 |
510 B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
511 Emit(encoding);
512}
513
514void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
515 ASSERT(rn != kNoRegister);
516 ASSERT(rd != kNoRegister);
517 ASSERT(rt != kNoRegister);
518 ASSERT(cond != kNoCondition);
519 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
520 B23 | (static_cast<int32_t>(rn) << kStrExRnShift) |
521 (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 |
522 B9 | B8 | B7 | B4 |
523 (static_cast<int32_t>(rt) << kStrExRtShift);
524 Emit(encoding);
525}
526
527void Assembler::dmb() {
528 // Emit a `dmb ish` instruction.
529 Emit(kDataMemoryBarrier);
530}
531
532static int32_t BitFieldExtractEncoding(bool sign_extend,
533 Register rd,
534 Register rn,
535 int32_t lsb,
536 int32_t width,
537 Condition cond) {
538 ASSERT(rn != kNoRegister && rn != PC);
539 ASSERT(rd != kNoRegister && rd != PC);
540 ASSERT(cond != kNoCondition);
541 ASSERT(Utils::IsUint(kBitFieldExtractLSBBits, lsb));
542 ASSERT(width >= 1);
543 ASSERT(lsb + width <= kBitsPerInt32);
544 const int32_t widthm1 = width - 1;
545 ASSERT(Utils::IsUint(kBitFieldExtractWidthBits, widthm1));
546 return (static_cast<int32_t>(cond) << kConditionShift) | B26 | B25 | B24 |
547 B23 | (sign_extend ? 0 : B22) | B21 |
548 (widthm1 << kBitFieldExtractWidthShift) |
549 (static_cast<int32_t>(rd) << kRdShift) |
550 (lsb << kBitFieldExtractLSBShift) | B6 | B4 |
551 (static_cast<int32_t>(rn) << kBitFieldExtractRnShift);
552}
553
554void Assembler::sbfx(Register rd,
555 Register rn,
556 int32_t lsb,
557 int32_t width,
558 Condition cond) {
559 const bool sign_extend = true;
560 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb, width, cond));
561}
562
563void Assembler::ubfx(Register rd,
564 Register rn,
565 int32_t lsb,
566 int32_t width,
567 Condition cond) {
568 const bool sign_extend = false;
569 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb, width, cond));
570}
571
572void Assembler::EnterFullSafepoint(Register addr, Register state) {
573 // We generate the same number of instructions whether or not the slow-path is
574 // forced. This simplifies GenerateJitCallbackTrampolines.
575 Label slow_path, done, retry;
576 if (FLAG_use_slow_path) {
577 b(&slow_path);
578 }
579
580 LoadImmediate(addr, target::Thread::safepoint_state_offset());
581 add(addr, THR, Operand(addr));
582 Bind(&retry);
583 ldrex(state, addr);
584 cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
585 b(&slow_path, NE);
586
587 mov(state, Operand(target::Thread::full_safepoint_state_acquired()));
588 strex(TMP, state, addr);
589 cmp(TMP, Operand(0)); // 0 means strex was successful.
590 b(&done, EQ);
591
592 if (!FLAG_use_slow_path) {
593 b(&retry);
594 }
595
596 Bind(&slow_path);
597 ldr(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
598 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
599 blx(TMP);
600
601 Bind(&done);
602}
603
604void Assembler::TransitionGeneratedToNative(Register destination_address,
605 Register exit_frame_fp,
606 Register exit_through_ffi,
607 Register tmp1,
608 bool enter_safepoint) {
609 // Save exit frame information to enable stack walking.
610 StoreToOffset(exit_frame_fp, THR,
611 target::Thread::top_exit_frame_info_offset());
612
613 StoreToOffset(exit_through_ffi, THR,
614 target::Thread::exit_through_ffi_offset());
615 Register tmp2 = exit_through_ffi;
616
617 // Mark that the thread is executing native code.
618 StoreToOffset(destination_address, THR, target::Thread::vm_tag_offset());
619 LoadImmediate(tmp1, target::Thread::native_execution_state());
620 StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
621
622 if (enter_safepoint) {
623 EnterFullSafepoint(tmp1, tmp2);
624 }
625}
626
627void Assembler::ExitFullSafepoint(Register tmp1,
628 Register tmp2,
629 bool ignore_unwind_in_progress) {
630 Register addr = tmp1;
631 Register state = tmp2;
632
633 // We generate the same number of instructions whether or not the slow-path is
634 // forced, for consistency with EnterFullSafepoint.
635 Label slow_path, done, retry;
636 if (FLAG_use_slow_path) {
637 b(&slow_path);
638 }
639
640 LoadImmediate(addr, target::Thread::safepoint_state_offset());
641 add(addr, THR, Operand(addr));
642 Bind(&retry);
643 ldrex(state, addr);
644 cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
645 b(&slow_path, NE);
646
647 mov(state, Operand(target::Thread::full_safepoint_state_unacquired()));
648 strex(TMP, state, addr);
649 cmp(TMP, Operand(0)); // 0 means strex was successful.
650 b(&done, EQ);
651
652 if (!FLAG_use_slow_path) {
653 b(&retry);
654 }
655
656 Bind(&slow_path);
657 if (ignore_unwind_in_progress) {
658 ldr(TMP,
659 Address(THR,
660 target::Thread::
661 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
662 } else {
663 ldr(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
664 }
665 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
666 blx(TMP);
667
668 Bind(&done);
669}
670
671void Assembler::TransitionNativeToGenerated(Register addr,
672 Register state,
673 bool exit_safepoint,
674 bool ignore_unwind_in_progress) {
675 if (exit_safepoint) {
676 ExitFullSafepoint(addr, state, ignore_unwind_in_progress);
677 } else {
678 // flag only makes sense if we are leaving safepoint
679 ASSERT(!ignore_unwind_in_progress);
680#if defined(DEBUG)
681 // Ensure we've already left the safepoint.
682 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
683 LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
684 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
685 ands(TMP, TMP, Operand(state));
686 Label ok;
687 b(&ok, ZERO);
688 Breakpoint();
689 Bind(&ok);
690#endif
691 }
692
693 // Mark that the thread is executing Dart code.
694 LoadImmediate(state, target::Thread::vm_tag_dart_id());
695 StoreToOffset(state, THR, target::Thread::vm_tag_offset());
696 LoadImmediate(state, target::Thread::generated_execution_state());
697 StoreToOffset(state, THR, target::Thread::execution_state_offset());
698
699 // Reset exit frame information in Isolate's mutator thread structure.
700 LoadImmediate(state, 0);
701 StoreToOffset(state, THR, target::Thread::top_exit_frame_info_offset());
702 StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
703}
704
705void Assembler::clrex() {
706 int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 |
707 B21 | B20 | (0xff << 12) | B4 | 0xf;
708 Emit(encoding);
709}
710
711void Assembler::nop(Condition cond) {
712 ASSERT(cond != kNoCondition);
713 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 |
714 B24 | B21 | (0xf << 12);
715 Emit(encoding);
716}
717
718void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
719 ASSERT(sn != kNoSRegister);
720 ASSERT(rt != kNoRegister);
721 ASSERT(rt != SP);
722 ASSERT(rt != PC);
723 ASSERT(cond != kNoCondition);
724 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
725 B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) |
726 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
727 ((static_cast<int32_t>(sn) & 1) * B7) | B4;
728 Emit(encoding);
729}
730
731void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
732 ASSERT(sn != kNoSRegister);
733 ASSERT(rt != kNoRegister);
734 ASSERT(rt != SP);
735 ASSERT(rt != PC);
736 ASSERT(cond != kNoCondition);
737 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
738 B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) |
739 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
740 ((static_cast<int32_t>(sn) & 1) * B7) | B4;
741 Emit(encoding);
742}
743
744void Assembler::vmovsrr(SRegister sm,
745 Register rt,
746 Register rt2,
747 Condition cond) {
748 ASSERT(sm != kNoSRegister);
749 ASSERT(sm != S31);
750 ASSERT(rt != kNoRegister);
751 ASSERT(rt != SP);
752 ASSERT(rt != PC);
753 ASSERT(rt2 != kNoRegister);
754 ASSERT(rt2 != SP);
755 ASSERT(rt2 != PC);
756 ASSERT(cond != kNoCondition);
757 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
758 B26 | B22 | (static_cast<int32_t>(rt2) * B16) |
759 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
760 ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
761 (static_cast<int32_t>(sm) >> 1);
762 Emit(encoding);
763}
764
765void Assembler::vmovrrs(Register rt,
766 Register rt2,
767 SRegister sm,
768 Condition cond) {
769 ASSERT(sm != kNoSRegister);
770 ASSERT(sm != S31);
771 ASSERT(rt != kNoRegister);
772 ASSERT(rt != SP);
773 ASSERT(rt != PC);
774 ASSERT(rt2 != kNoRegister);
775 ASSERT(rt2 != SP);
776 ASSERT(rt2 != PC);
777 ASSERT(rt != rt2);
778 ASSERT(cond != kNoCondition);
779 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
780 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) |
781 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
782 ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
783 (static_cast<int32_t>(sm) >> 1);
784 Emit(encoding);
785}
786
787void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) {
788 ASSERT((i == 0) || (i == 1));
789 ASSERT(rt != kNoRegister);
790 ASSERT(rt != SP);
791 ASSERT(rt != PC);
792 ASSERT(dn != kNoDRegister);
793 ASSERT(cond != kNoCondition);
794 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
795 B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) |
796 B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) |
797 ((static_cast<int32_t>(dn) & 0xf) * B16) | B4;
798 Emit(encoding);
799}
800
801void Assembler::vmovdrr(DRegister dm,
802 Register rt,
803 Register rt2,
804 Condition cond) {
805 ASSERT(dm != kNoDRegister);
806 ASSERT(rt != kNoRegister);
807 ASSERT(rt != SP);
808 ASSERT(rt != PC);
809 ASSERT(rt2 != kNoRegister);
810 ASSERT(rt2 != SP);
811 ASSERT(rt2 != PC);
812 ASSERT(cond != kNoCondition);
813 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
814 B26 | B22 | (static_cast<int32_t>(rt2) * B16) |
815 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 |
816 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 |
817 (static_cast<int32_t>(dm) & 0xf);
818 Emit(encoding);
819}
820
821void Assembler::vmovrrd(Register rt,
822 Register rt2,
823 DRegister dm,
824 Condition cond) {
825 ASSERT(dm != kNoDRegister);
826 ASSERT(rt != kNoRegister);
827 ASSERT(rt != SP);
828 ASSERT(rt != PC);
829 ASSERT(rt2 != kNoRegister);
830 ASSERT(rt2 != SP);
831 ASSERT(rt2 != PC);
832 ASSERT(rt != rt2);
833 ASSERT(cond != kNoCondition);
834 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
835 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) |
836 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 |
837 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 |
838 (static_cast<int32_t>(dm) & 0xf);
839 Emit(encoding);
840}
841
842void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
843 ASSERT(sd != kNoSRegister);
844 ASSERT(cond != kNoCondition);
845 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
846 B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) |
847 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 |
848 ad.vencoding();
849 Emit(encoding);
850}
851
852void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
853 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
854 ASSERT(sd != kNoSRegister);
855 ASSERT(cond != kNoCondition);
856 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
857 B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) |
858 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 |
859 ad.vencoding();
860 Emit(encoding);
861}
862
863void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
864 ASSERT(dd != kNoDRegister);
865 ASSERT(cond != kNoCondition);
866 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
867 B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) |
868 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 |
869 ad.vencoding();
870 Emit(encoding);
871}
872
873void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
874 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
875 ASSERT(dd != kNoDRegister);
876 ASSERT(cond != kNoCondition);
877 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
878 B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) |
879 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 |
880 ad.vencoding();
881 Emit(encoding);
882}
883
884void Assembler::EmitMultiVSMemOp(Condition cond,
885 BlockAddressMode am,
886 bool load,
887 Register base,
888 SRegister start,
889 uint32_t count) {
890 ASSERT(base != kNoRegister);
891 ASSERT(cond != kNoCondition);
892 ASSERT(start != kNoSRegister);
893 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters);
894
895 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
896 B26 | B11 | B9 | am | (load ? L : 0) |
897 ArmEncode::Rn(base) |
898 ((static_cast<int32_t>(start) & 0x1) != 0 ? D : 0) |
899 ((static_cast<int32_t>(start) >> 1) << 12) | count;
900 Emit(encoding);
901}
902
903void Assembler::EmitMultiVDMemOp(Condition cond,
904 BlockAddressMode am,
905 bool load,
906 Register base,
907 DRegister start,
908 int32_t count) {
909 ASSERT(base != kNoRegister);
910 ASSERT(cond != kNoCondition);
911 ASSERT(start != kNoDRegister);
912 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters);
913 const int notArmv5te = 0;
914
915 int32_t encoding =
916 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 |
917 B8 | am | (load ? L : 0) | ArmEncode::Rn(base) |
918 ((static_cast<int32_t>(start) & 0x10) != 0 ? D : 0) |
919 ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | notArmv5te;
920 Emit(encoding);
921}
922
923void Assembler::vldms(BlockAddressMode am,
924 Register base,
925 SRegister first,
926 SRegister last,
927 Condition cond) {
928 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
929 ASSERT(last > first);
930 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1);
931}
932
933void Assembler::vstms(BlockAddressMode am,
934 Register base,
935 SRegister first,
936 SRegister last,
937 Condition cond) {
938 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
939 ASSERT(last > first);
940 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1);
941}
942
943void Assembler::vldmd(BlockAddressMode am,
944 Register base,
945 DRegister first,
946 intptr_t count,
947 Condition cond) {
948 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
949 ASSERT(count <= 16);
950 ASSERT(first + count <= kNumberOfDRegisters);
951 EmitMultiVDMemOp(cond, am, true, base, first, count);
952}
953
954void Assembler::vstmd(BlockAddressMode am,
955 Register base,
956 DRegister first,
957 intptr_t count,
958 Condition cond) {
959 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
960 ASSERT(count <= 16);
961 ASSERT(first + count <= kNumberOfDRegisters);
962 EmitMultiVDMemOp(cond, am, false, base, first, count);
963}
964
965void Assembler::EmitVFPsss(Condition cond,
966 int32_t opcode,
967 SRegister sd,
968 SRegister sn,
969 SRegister sm) {
970 ASSERT(sd != kNoSRegister);
971 ASSERT(sn != kNoSRegister);
972 ASSERT(sm != kNoSRegister);
973 ASSERT(cond != kNoCondition);
974 int32_t encoding =
975 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
976 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) |
977 ((static_cast<int32_t>(sn) >> 1) * B16) |
978 ((static_cast<int32_t>(sd) >> 1) * B12) |
979 ((static_cast<int32_t>(sn) & 1) * B7) |
980 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1);
981 Emit(encoding);
982}
983
984void Assembler::EmitVFPddd(Condition cond,
985 int32_t opcode,
986 DRegister dd,
987 DRegister dn,
988 DRegister dm) {
989 ASSERT(dd != kNoDRegister);
990 ASSERT(dn != kNoDRegister);
991 ASSERT(dm != kNoDRegister);
992 ASSERT(cond != kNoCondition);
993 int32_t encoding =
994 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
995 B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) |
996 ((static_cast<int32_t>(dn) & 0xf) * B16) |
997 ((static_cast<int32_t>(dd) & 0xf) * B12) |
998 ((static_cast<int32_t>(dn) >> 4) * B7) |
999 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1000 Emit(encoding);
1001}
1002
1003void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
1004 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
1005}
1006
1007void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
1008 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
1009}
1010
1011bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
1012 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
1013 if (((imm32 & ((1 << 19) - 1)) == 0) &&
1014 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
1015 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) {
1016 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
1017 ((imm32 >> 19) & ((1 << 6) - 1));
1018 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd,
1019 S0, S0);
1020 return true;
1021 }
1022 return false;
1023}
1024
1025bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
1026 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
1027 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
1028 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
1029 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) {
1030 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
1031 ((imm64 >> 48) & ((1 << 6) - 1));
1032 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf),
1033 dd, D0, D0);
1034 return true;
1035 }
1036 return false;
1037}
1038
1039void Assembler::vadds(SRegister sd,
1040 SRegister sn,
1041 SRegister sm,
1042 Condition cond) {
1043 EmitVFPsss(cond, B21 | B20, sd, sn, sm);
1044}
1045
1046void Assembler::vaddd(DRegister dd,
1047 DRegister dn,
1048 DRegister dm,
1049 Condition cond) {
1050 EmitVFPddd(cond, B21 | B20, dd, dn, dm);
1051}
1052
1053void Assembler::vsubs(SRegister sd,
1054 SRegister sn,
1055 SRegister sm,
1056 Condition cond) {
1057 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
1058}
1059
1060void Assembler::vsubd(DRegister dd,
1061 DRegister dn,
1062 DRegister dm,
1063 Condition cond) {
1064 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
1065}
1066
1067void Assembler::vmuls(SRegister sd,
1068 SRegister sn,
1069 SRegister sm,
1070 Condition cond) {
1071 EmitVFPsss(cond, B21, sd, sn, sm);
1072}
1073
1074void Assembler::vmuld(DRegister dd,
1075 DRegister dn,
1076 DRegister dm,
1077 Condition cond) {
1078 EmitVFPddd(cond, B21, dd, dn, dm);
1079}
1080
1081void Assembler::vmlas(SRegister sd,
1082 SRegister sn,
1083 SRegister sm,
1084 Condition cond) {
1085 EmitVFPsss(cond, 0, sd, sn, sm);
1086}
1087
1088void Assembler::vmlad(DRegister dd,
1089 DRegister dn,
1090 DRegister dm,
1091 Condition cond) {
1092 EmitVFPddd(cond, 0, dd, dn, dm);
1093}
1094
1095void Assembler::vmlss(SRegister sd,
1096 SRegister sn,
1097 SRegister sm,
1098 Condition cond) {
1099 EmitVFPsss(cond, B6, sd, sn, sm);
1100}
1101
1102void Assembler::vmlsd(DRegister dd,
1103 DRegister dn,
1104 DRegister dm,
1105 Condition cond) {
1106 EmitVFPddd(cond, B6, dd, dn, dm);
1107}
1108
1109void Assembler::vdivs(SRegister sd,
1110 SRegister sn,
1111 SRegister sm,
1112 Condition cond) {
1113 EmitVFPsss(cond, B23, sd, sn, sm);
1114}
1115
1116void Assembler::vdivd(DRegister dd,
1117 DRegister dn,
1118 DRegister dm,
1119 Condition cond) {
1120 EmitVFPddd(cond, B23, dd, dn, dm);
1121}
1122
1123void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
1124 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
1125}
1126
1127void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
1128 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
1129}
1130
1131void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
1132 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
1133}
1134
1135void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
1136 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
1137}
1138
1139void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
1140 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
1141}
1142
1143void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
1144 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
1145}
1146
1147void Assembler::EmitVFPsd(Condition cond,
1148 int32_t opcode,
1149 SRegister sd,
1150 DRegister dm) {
1151 ASSERT(sd != kNoSRegister);
1152 ASSERT(dm != kNoDRegister);
1153 ASSERT(cond != kNoCondition);
1154 int32_t encoding =
1155 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
1156 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) |
1157 ((static_cast<int32_t>(sd) >> 1) * B12) |
1158 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1159 Emit(encoding);
1160}
1161
1162void Assembler::EmitVFPds(Condition cond,
1163 int32_t opcode,
1164 DRegister dd,
1165 SRegister sm) {
1166 ASSERT(dd != kNoDRegister);
1167 ASSERT(sm != kNoSRegister);
1168 ASSERT(cond != kNoCondition);
1169 int32_t encoding =
1170 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
1171 B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) |
1172 ((static_cast<int32_t>(dd) & 0xf) * B12) |
1173 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1);
1174 Emit(encoding);
1175}
1176
1177void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
1178 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
1179}
1180
1181void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
1182 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
1183}
1184
1185void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
1186 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
1187}
1188
1189void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
1190 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
1191}
1192
1193void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
1194 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
1195}
1196
1197void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
1198 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
1199}
1200
1201void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1202 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1203}
1204
1205void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1206 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1207}
1208
1209void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1210 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1211}
1212
1213void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1214 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1215}
1216
1217void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1218 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1219}
1220
1221void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1222 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1223}
1224
1225void Assembler::vcmpsz(SRegister sd, Condition cond) {
1226 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1227}
1228
1229void Assembler::vcmpdz(DRegister dd, Condition cond) {
1230 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1231}
1232
1233void Assembler::vmrs(Register rd, Condition cond) {
1234 ASSERT(cond != kNoCondition);
1235 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
1236 B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1237 (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4;
1238 Emit(encoding);
1239}
1240
1241void Assembler::vmstat(Condition cond) {
1242 vmrs(APSR, cond);
1243}
1244
1245static inline int ShiftOfOperandSize(OperandSize size) {
1246 switch (size) {
1247 case kByte:
1248 case kUnsignedByte:
1249 return 0;
1250 case kTwoBytes:
1251 case kUnsignedTwoBytes:
1252 return 1;
1253 case kFourBytes:
1254 case kUnsignedFourBytes:
1255 return 2;
1256 case kWordPair:
1257 return 3;
1258 case kSWord:
1259 case kDWord:
1260 return 0;
1261 default:
1262 UNREACHABLE();
1263 break;
1264 }
1265
1266 UNREACHABLE();
1267 return -1;
1268}
1269
1270void Assembler::EmitSIMDqqq(int32_t opcode,
1271 OperandSize size,
1272 QRegister qd,
1273 QRegister qn,
1274 QRegister qm) {
1275 ASSERT(TargetCPUFeatures::neon_supported());
1276 int sz = ShiftOfOperandSize(size);
1277 int32_t encoding =
1278 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 |
1279 opcode | ((sz & 0x3) * B20) |
1280 ((static_cast<int32_t>(qd * 2) >> 4) * B22) |
1281 ((static_cast<int32_t>(qn * 2) & 0xf) * B16) |
1282 ((static_cast<int32_t>(qd * 2) & 0xf) * B12) |
1283 ((static_cast<int32_t>(qn * 2) >> 4) * B7) |
1284 ((static_cast<int32_t>(qm * 2) >> 4) * B5) |
1285 (static_cast<int32_t>(qm * 2) & 0xf);
1286 Emit(encoding);
1287}
1288
1289void Assembler::EmitSIMDddd(int32_t opcode,
1290 OperandSize size,
1291 DRegister dd,
1292 DRegister dn,
1293 DRegister dm) {
1294 ASSERT(TargetCPUFeatures::neon_supported());
1295 int sz = ShiftOfOperandSize(size);
1296 int32_t encoding =
1297 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 |
1298 opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) |
1299 ((static_cast<int32_t>(dn) & 0xf) * B16) |
1300 ((static_cast<int32_t>(dd) & 0xf) * B12) |
1301 ((static_cast<int32_t>(dn) >> 4) * B7) |
1302 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1303 Emit(encoding);
1304}
1305
1306void Assembler::vmovq(QRegister qd, QRegister qm) {
1307 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm);
1308}
1309
1310void Assembler::vaddqi(OperandSize sz,
1311 QRegister qd,
1312 QRegister qn,
1313 QRegister qm) {
1314 EmitSIMDqqq(B11, sz, qd, qn, qm);
1315}
1316
1317void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) {
1318 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm);
1319}
1320
1321void Assembler::vsubqi(OperandSize sz,
1322 QRegister qd,
1323 QRegister qn,
1324 QRegister qm) {
1325 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm);
1326}
1327
1328void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) {
1329 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm);
1330}
1331
1332void Assembler::vmulqi(OperandSize sz,
1333 QRegister qd,
1334 QRegister qn,
1335 QRegister qm) {
1336 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm);
1337}
1338
1339void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) {
1340 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm);
1341}
1342
1343void Assembler::vshlqi(OperandSize sz,
1344 QRegister qd,
1345 QRegister qm,
1346 QRegister qn) {
1347 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm);
1348}
1349
1350void Assembler::vshlqu(OperandSize sz,
1351 QRegister qd,
1352 QRegister qm,
1353 QRegister qn) {
1354 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm);
1355}
1356
1357void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) {
1358 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm);
1359}
1360
1361void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) {
1362 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm);
1363}
1364
1365void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) {
1366 EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm);
1367}
1368
1369void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) {
1370 EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm);
1371}
1372
1373void Assembler::vmvnq(QRegister qd, QRegister qm) {
1374 EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm);
1375}
1376
1377void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) {
1378 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1379}
1380
1381void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) {
1382 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1383}
1384
1385void Assembler::vabsqs(QRegister qd, QRegister qm) {
1386 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0,
1387 qm);
1388}
1389
1390void Assembler::vnegqs(QRegister qd, QRegister qm) {
1391 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord,
1392 qd, Q0, qm);
1393}
1394
1395void Assembler::vrecpeqs(QRegister qd, QRegister qm) {
1396 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd,
1397 Q0, qm);
1398}
1399
1400void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) {
1401 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1402}
1403
1404void Assembler::vrsqrteqs(QRegister qd, QRegister qm) {
1405 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord,
1406 qd, Q0, qm);
1407}
1408
1409void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) {
1410 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1411}
1412
1413void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) {
1414 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair));
1415 int code = 0;
1416
1417 switch (sz) {
1418 case kByte:
1419 case kUnsignedByte: {
1420 ASSERT((idx >= 0) && (idx < 8));
1421 code = 1 | (idx << 1);
1422 break;
1423 }
1424 case kTwoBytes:
1425 case kUnsignedTwoBytes: {
1426 ASSERT((idx >= 0) && (idx < 4));
1427 code = 2 | (idx << 2);
1428 break;
1429 }
1430 case kFourBytes:
1431 case kUnsignedFourBytes: {
1432 ASSERT((idx >= 0) && (idx < 2));
1433 code = 4 | (idx << 3);
1434 break;
1435 }
1436 default: {
1437 break;
1438 }
1439 }
1440
1441 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair,
1442 static_cast<DRegister>(qd * 2),
1443 static_cast<DRegister>(code & 0xf), dm);
1444}
1445
1446void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) {
1447 ASSERT((len >= 1) && (len <= 4));
1448 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm);
1449}
1450
1451void Assembler::vzipqw(QRegister qd, QRegister qm) {
1452 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm);
1453}
1454
1455void Assembler::vceqqi(OperandSize sz,
1456 QRegister qd,
1457 QRegister qn,
1458 QRegister qm) {
1459 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm);
1460}
1461
1462void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) {
1463 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm);
1464}
1465
1466void Assembler::vcgeqi(OperandSize sz,
1467 QRegister qd,
1468 QRegister qn,
1469 QRegister qm) {
1470 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm);
1471}
1472
1473void Assembler::vcugeqi(OperandSize sz,
1474 QRegister qd,
1475 QRegister qn,
1476 QRegister qm) {
1477 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm);
1478}
1479
1480void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) {
1481 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm);
1482}
1483
1484void Assembler::vcgtqi(OperandSize sz,
1485 QRegister qd,
1486 QRegister qn,
1487 QRegister qm) {
1488 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm);
1489}
1490
1491void Assembler::vcugtqi(OperandSize sz,
1492 QRegister qd,
1493 QRegister qn,
1494 QRegister qm) {
1495 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm);
1496}
1497
1498void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) {
1499 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm);
1500}
1501
1502void Assembler::bkpt(uint16_t imm16) {
1503 Emit(BkptEncoding(imm16));
1504}
1505
1506void Assembler::b(Label* label, Condition cond) {
1507 EmitBranch(cond, label, false);
1508}
1509
1510void Assembler::bl(Label* label, Condition cond) {
1511 EmitBranch(cond, label, true);
1512}
1513
1514void Assembler::bx(Register rm, Condition cond) {
1515 ASSERT(rm != kNoRegister);
1516 ASSERT(cond != kNoCondition);
1517 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
1518 B21 | (0xfff << 8) | B4 | ArmEncode::Rm(rm);
1519 Emit(encoding);
1520}
1521
1522void Assembler::blx(Register rm, Condition cond) {
1523 ASSERT(rm != kNoRegister);
1524 ASSERT(cond != kNoCondition);
1525 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
1526 B21 | (0xfff << 8) | B5 | B4 | ArmEncode::Rm(rm);
1527 Emit(encoding);
1528}
1529
1530void Assembler::MarkExceptionHandler(Label* label) {
1531 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0));
1532 Label l;
1533 b(&l);
1534 EmitBranch(AL, label, false);
1535 Bind(&l);
1536}
1537
1538void Assembler::Drop(intptr_t stack_elements) {
1539 ASSERT(stack_elements >= 0);
1540 if (stack_elements > 0) {
1541 AddImmediate(SP, stack_elements * target::kWordSize);
1542 }
1543}
1544
1545// Uses a code sequence that can easily be decoded.
1546void Assembler::LoadWordFromPoolIndex(Register rd,
1547 intptr_t index,
1548 Register pp,
1549 Condition cond) {
1550 ASSERT((pp != PP) || constant_pool_allowed());
1551 ASSERT(rd != pp);
1552 // PP is tagged on ARM.
1553 const int32_t offset =
1554 target::ObjectPool::element_offset(index) - kHeapObjectTag;
1555 int32_t offset_mask = 0;
1556 if (Address::CanHoldLoadOffset(kFourBytes, offset, &offset_mask)) {
1557 ldr(rd, Address(pp, offset), cond);
1558 } else {
1559 int32_t offset_hi = offset & ~offset_mask; // signed
1560 uint32_t offset_lo = offset & offset_mask; // unsigned
1561 // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1562 Operand o;
1563 if (Operand::CanHold(offset_hi, &o)) {
1564 add(rd, pp, o, cond);
1565 } else {
1566 LoadImmediate(rd, offset_hi, cond);
1567 add(rd, pp, Operand(rd), cond);
1568 }
1569 ldr(rd, Address(rd, offset_lo), cond);
1570 }
1571}
1572
1573void Assembler::StoreWordToPoolIndex(Register value,
1574 intptr_t index,
1575 Register pp,
1576 Condition cond) {
1577 ASSERT((pp != PP) || constant_pool_allowed());
1578 ASSERT(value != pp);
1579 // PP is tagged on ARM.
1580 const int32_t offset =
1581 target::ObjectPool::element_offset(index) - kHeapObjectTag;
1582 int32_t offset_mask = 0;
1583 if (Address::CanHoldLoadOffset(kFourBytes, offset, &offset_mask)) {
1584 str(value, Address(pp, offset), cond);
1585 } else {
1586 int32_t offset_hi = offset & ~offset_mask; // signed
1587 uint32_t offset_lo = offset & offset_mask; // unsigned
1588 // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1589 Operand o;
1590 if (Operand::CanHold(offset_hi, &o)) {
1591 add(TMP, pp, o, cond);
1592 } else {
1593 LoadImmediate(TMP, offset_hi, cond);
1594 add(TMP, pp, Operand(TMP), cond);
1595 }
1596 str(value, Address(TMP, offset_lo), cond);
1597 }
1598}
1599
1600void Assembler::CheckCodePointer() {
1601#ifdef DEBUG
1602 if (!FLAG_check_code_pointer) {
1603 return;
1604 }
1605 Comment("CheckCodePointer");
1606 Label cid_ok, instructions_ok;
1607 Push(R0);
1608 Push(IP);
1609 CompareClassId(CODE_REG, kCodeCid, R0);
1610 b(&cid_ok, EQ);
1611 bkpt(0);
1612 Bind(&cid_ok);
1613
1614 const intptr_t offset = CodeSize() + Instr::kPCReadOffset +
1615 target::Instructions::HeaderSize() - kHeapObjectTag;
1616 mov(R0, Operand(PC));
1617 AddImmediate(R0, -offset);
1618 ldr(IP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1619 cmp(R0, Operand(IP));
1620 b(&instructions_ok, EQ);
1621 bkpt(1);
1622 Bind(&instructions_ok);
1623 Pop(IP);
1624 Pop(R0);
1625#endif
1626}
1627
1628void Assembler::RestoreCodePointer() {
1629 ldr(CODE_REG,
1630 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1631 CheckCodePointer();
1632}
1633
1634void Assembler::LoadPoolPointer(Register reg) {
1635 // Load new pool pointer.
1636 CheckCodePointer();
1637 ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
1638 set_constant_pool_allowed(reg == PP);
1639}
1640
1641void Assembler::SetupGlobalPoolAndDispatchTable() {
1642 ASSERT(FLAG_precompiled_mode);
1643 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1644 ldr(DISPATCH_TABLE_REG,
1645 Address(THR, target::Thread::dispatch_table_array_offset()));
1646}
1647
1648void Assembler::LoadIsolate(Register rd) {
1649 ldr(rd, Address(THR, target::Thread::isolate_offset()));
1650}
1651
1652void Assembler::LoadIsolateGroup(Register rd) {
1653 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
1654}
1655
1656bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1657 ASSERT(IsOriginalObject(object));
1658 if (!constant_pool_allowed()) {
1659 return false;
1660 }
1661
1662 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
1663 ASSERT(IsInOldSpace(object));
1664 return true;
1665}
1666
1667void Assembler::LoadObjectHelper(
1668 Register rd,
1669 const Object& object,
1670 Condition cond,
1671 bool is_unique,
1672 Register pp,
1673 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1674 ASSERT(IsOriginalObject(object));
1675 // `is_unique == true` effectively means object has to be patchable.
1676 if (!is_unique) {
1677 intptr_t offset = 0;
1678 if (target::CanLoadFromThread(object, &offset)) {
1679 // Load common VM constants from the thread. This works also in places
1680 // where no constant pool is set up (e.g. intrinsic code).
1681 ldr(rd, Address(THR, offset), cond);
1682 return;
1683 }
1684 if (target::IsSmi(object)) {
1685 // Relocation doesn't apply to Smis.
1686 LoadImmediate(rd, target::ToRawSmi(object), cond);
1687 return;
1688 }
1689 }
1690 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1691 // Make sure that class CallPattern is able to decode this load from the
1692 // object pool.
1693 const auto index =
1694 is_unique
1695 ? object_pool_builder().AddObject(
1696 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1697 : object_pool_builder().FindObject(
1698 object, ObjectPoolBuilderEntry::kNotPatchable,
1699 snapshot_behavior);
1700 LoadWordFromPoolIndex(rd, index, pp, cond);
1701}
1702
1703void Assembler::LoadObject(Register rd, const Object& object, Condition cond) {
1704 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP);
1705}
1706
1707void Assembler::LoadUniqueObject(
1708 Register rd,
1709 const Object& object,
1710 Condition cond,
1711 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1712 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP,
1713 snapshot_behavior);
1714}
1715
1716void Assembler::LoadNativeEntry(Register rd,
1717 const ExternalLabel* label,
1718 ObjectPoolBuilderEntry::Patchability patchable,
1719 Condition cond) {
1720 const intptr_t index =
1721 object_pool_builder().FindNativeFunction(label, patchable);
1722 LoadWordFromPoolIndex(rd, index, PP, cond);
1723}
1724
1725void Assembler::PushObject(const Object& object) {
1726 ASSERT(IsOriginalObject(object));
1727 LoadObject(IP, object);
1728 Push(IP);
1729}
1730
1731void Assembler::CompareObject(Register rn, const Object& object) {
1732 ASSERT(IsOriginalObject(object));
1733 ASSERT(rn != IP);
1734 if (target::IsSmi(object)) {
1735 CompareImmediate(rn, target::ToRawSmi(object));
1736 } else {
1737 LoadObject(IP, object);
1738 cmp(rn, Operand(IP));
1739 }
1740}
1741
1742Register UseRegister(Register reg, RegList* used) {
1743 ASSERT(reg != THR);
1744 ASSERT(reg != SP);
1745 ASSERT(reg != FP);
1746 ASSERT(reg != PC);
1747 ASSERT((*used & (1 << reg)) == 0);
1748 *used |= (1 << reg);
1749 return reg;
1750}
1751
1752Register AllocateRegister(RegList* used) {
1753 const RegList free = ~*used;
1754 return (free == 0)
1755 ? kNoRegister
1756 : UseRegister(
1757 static_cast<Register>(Utils::CountTrailingZerosWord(free)),
1758 used);
1759}
1760
1761void Assembler::StoreBarrier(Register object,
1762 Register value,
1763 CanBeSmi can_be_smi,
1764 Register scratch) {
1765 // x.slot = x. Barrier should have be removed at the IL level.
1766 ASSERT(object != value);
1767 ASSERT(object != LINK_REGISTER);
1768 ASSERT(value != LINK_REGISTER);
1769 ASSERT(object != scratch);
1770 ASSERT(value != scratch);
1771 ASSERT(scratch != kNoRegister);
1772
1773 // In parallel, test whether
1774 // - object is old and not remembered and value is new, or
1775 // - object is old and value is old and not marked and concurrent marking is
1776 // in progress
1777 // If so, call the WriteBarrier stub, which will either add object to the
1778 // store buffer (case 1) or add value to the marking stack (case 2).
1779 // Compare UntaggedObject::StorePointer.
1780 Label done;
1781 if (can_be_smi == kValueCanBeSmi) {
1782 BranchIfSmi(value, &done, kNearJump);
1783 } else {
1784#if defined(DEBUG)
1785 Label passed_check;
1786 BranchIfNotSmi(value, &passed_check, kNearJump);
1787 Breakpoint();
1788 Bind(&passed_check);
1789#endif
1790 }
1791 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1792 if (preserve_lr) {
1793 SPILLS_LR_TO_FRAME(Push(LR));
1794 }
1795 CLOBBERS_LR({
1796 ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
1797 ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
1798 and_(scratch, LR,
1799 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1800 ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1801 tst(scratch, Operand(LR));
1802 });
1803 if (value != kWriteBarrierValueReg) {
1804 // Unlikely. Only non-graph intrinsics.
1805 // TODO(rmacnak): Shuffle registers in intrinsics.
1806 Label restore_and_done;
1807 b(&restore_and_done, ZERO);
1808 Register objectForCall = object;
1809 if (object != kWriteBarrierValueReg) {
1810 Push(kWriteBarrierValueReg);
1811 } else {
1812 COMPILE_ASSERT(R2 != kWriteBarrierValueReg);
1813 COMPILE_ASSERT(R3 != kWriteBarrierValueReg);
1814 objectForCall = (value == R2) ? R3 : R2;
1815 PushList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1816 mov(objectForCall, Operand(object));
1817 }
1818 mov(kWriteBarrierValueReg, Operand(value));
1819 generate_invoke_write_barrier_wrapper_(AL, objectForCall);
1820
1821 if (object != kWriteBarrierValueReg) {
1822 Pop(kWriteBarrierValueReg);
1823 } else {
1824 PopList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1825 }
1826 Bind(&restore_and_done);
1827 } else {
1828 generate_invoke_write_barrier_wrapper_(NE, object);
1829 }
1830 if (preserve_lr) {
1831 RESTORES_LR_FROM_FRAME(Pop(LR));
1832 }
1833 Bind(&done);
1834}
1835
1836void Assembler::ArrayStoreBarrier(Register object,
1837 Register slot,
1838 Register value,
1839 CanBeSmi can_be_smi,
1840 Register scratch) {
1841 ASSERT(object != LINK_REGISTER);
1842 ASSERT(value != LINK_REGISTER);
1843 ASSERT(slot != LINK_REGISTER);
1844 ASSERT(object != scratch);
1845 ASSERT(value != scratch);
1846 ASSERT(slot != scratch);
1847 ASSERT(scratch != kNoRegister);
1848
1849 // In parallel, test whether
1850 // - object is old and not remembered and value is new, or
1851 // - object is old and value is old and not marked and concurrent marking is
1852 // in progress
1853 // If so, call the WriteBarrier stub, which will either add object to the
1854 // store buffer (case 1) or add value to the marking stack (case 2).
1855 // Compare UntaggedObject::StorePointer.
1856 Label done;
1857 if (can_be_smi == kValueCanBeSmi) {
1858 BranchIfSmi(value, &done, kNearJump);
1859 } else {
1860#if defined(DEBUG)
1861 Label passed_check;
1862 BranchIfNotSmi(value, &passed_check, kNearJump);
1863 Breakpoint();
1864 Bind(&passed_check);
1865#endif
1866 }
1867 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1868 if (preserve_lr) {
1869 SPILLS_LR_TO_FRAME(Push(LR));
1870 }
1871
1872 CLOBBERS_LR({
1873 ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
1874 ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
1875 and_(scratch, LR,
1876 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1877 ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1878 tst(scratch, Operand(LR));
1879 });
1880
1881 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1882 (slot != kWriteBarrierSlotReg)) {
1883 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1884 // from StoreIndexInstr, which gets these exact registers from the register
1885 // allocator.
1886 UNIMPLEMENTED();
1887 }
1888 generate_invoke_array_write_barrier_(NE);
1889 if (preserve_lr) {
1890 RESTORES_LR_FROM_FRAME(Pop(LR));
1891 }
1892 Bind(&done);
1893}
1894
1895void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1896 const Address& dest,
1897 const Object& value,
1898 MemoryOrder memory_order,
1899 OperandSize size) {
1900 ASSERT_EQUAL(size, kFourBytes);
1901 ASSERT_EQUAL(dest.mode(), Address::Mode::Offset);
1902 ASSERT_EQUAL(dest.kind(), Address::OffsetKind::Immediate);
1903 int32_t ignored = 0;
1904 Register scratch = TMP;
1905 if (!Address::CanHoldStoreOffset(size, dest.offset(), &ignored)) {
1906 // As there is no TMP2 on ARM7, Store uses TMP when the instruction cannot
1907 // contain the offset, so we need to use a different scratch register
1908 // for loading the object.
1909 scratch = dest.base() == R9 ? R8 : R9;
1910 Push(scratch);
1911 }
1912 ASSERT(IsOriginalObject(value));
1913 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
1914 // No store buffer update.
1915 LoadObject(scratch, value);
1916 if (memory_order == kRelease) {
1917 StoreRelease(scratch, dest);
1918 } else {
1919 Store(scratch, dest);
1920 }
1921 if (scratch != TMP) {
1922 Pop(scratch);
1923 }
1924}
1925
1926void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1927 Register value) {
1928 // We can't assert the incremental barrier is not needed here, only the
1929 // generational barrier. We sometimes omit the write barrier when 'value' is
1930 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1931 // reachable via a constant pool, so it doesn't matter if it is not traced via
1932 // 'object'.
1933 Label done;
1934 BranchIfSmi(value, &done, kNearJump);
1935 ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
1936 tst(TMP, Operand(1 << target::UntaggedObject::kNewBit));
1937 b(&done, ZERO);
1938 ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
1939 tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1940 b(&done, ZERO);
1941 Stop("Write barrier is required");
1942 Bind(&done);
1943}
1944
1945void Assembler::StoreInternalPointer(Register object,
1946 const Address& dest,
1947 Register value) {
1948 str(value, dest);
1949}
1950
1951void Assembler::InitializeFieldsNoBarrier(Register object,
1952 Register begin,
1953 Register end,
1954 Register value_even,
1955 Register value_odd) {
1956 ASSERT(value_odd == value_even + 1);
1957 Label init_loop;
1958 Bind(&init_loop);
1959 AddImmediate(begin, 2 * target::kWordSize);
1960 cmp(begin, Operand(end));
1961 strd(value_even, value_odd, begin, -2 * target::kWordSize, LS);
1962 b(&init_loop, CC);
1963 str(value_even, Address(begin, -2 * target::kWordSize), HI);
1964}
1965
1966void Assembler::InitializeFieldsNoBarrierUnrolled(Register object,
1967 Register base,
1968 intptr_t begin_offset,
1969 intptr_t end_offset,
1970 Register value_even,
1971 Register value_odd) {
1972 ASSERT(value_odd == value_even + 1);
1973 intptr_t current_offset = begin_offset;
1974 while (current_offset + target::kWordSize < end_offset) {
1975 strd(value_even, value_odd, base, current_offset);
1976 current_offset += 2 * target::kWordSize;
1977 }
1978 while (current_offset < end_offset) {
1979 str(value_even, Address(base, current_offset));
1980 current_offset += target::kWordSize;
1981 }
1982}
1983
1984void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
1985#if defined(DEBUG)
1986 Label done;
1987 tst(value, Operand(kHeapObjectTag));
1988 b(&done, EQ);
1989 Stop("New value must be Smi.");
1990 Bind(&done);
1991#endif // defined(DEBUG)
1992 Store(value, dest);
1993}
1994
1995void Assembler::ExtractClassIdFromTags(Register result,
1996 Register tags,
1997 Condition cond) {
1998 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1999 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2000 ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
2001 target::UntaggedObject::kClassIdTagSize, cond);
2002}
2003
2004void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
2005 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2006 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2007 Lsr(result, tags,
2008 Operand(target::UntaggedObject::kSizeTagPos -
2009 target::ObjectAlignment::kObjectAlignmentLog2),
2010 AL);
2011 AndImmediate(result, result,
2012 (Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2013 << target::ObjectAlignment::kObjectAlignmentLog2));
2014}
2015
2016void Assembler::LoadClassId(Register result, Register object, Condition cond) {
2017 ldr(result, FieldAddress(object, target::Object::tags_offset()), cond);
2018 ExtractClassIdFromTags(result, result, cond);
2019}
2020
2021void Assembler::LoadClassById(Register result, Register class_id) {
2022 ASSERT(result != class_id);
2023
2024 const intptr_t table_offset =
2025 target::IsolateGroup::cached_class_table_table_offset();
2026
2027 LoadIsolateGroup(result);
2028 LoadFromOffset(result, result, table_offset);
2029 ldr(result, Address(result, class_id, LSL, target::kWordSizeLog2));
2030}
2031
2032void Assembler::CompareClassId(Register object,
2033 intptr_t class_id,
2034 Register scratch) {
2035 LoadClassId(scratch, object);
2036 CompareImmediate(scratch, class_id);
2037}
2038
2039void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2040 tst(object, Operand(kSmiTagMask));
2041 LoadClassId(result, object, NE);
2042 LoadImmediate(result, kSmiCid, EQ);
2043}
2044
2045void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2046 LoadClassIdMayBeSmi(result, object);
2047 SmiTag(result);
2048}
2049
2050void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2051 Register src,
2052 Register scratch,
2053 bool can_be_null) {
2054#if defined(DEBUG)
2055 Comment("Check that object in register has cid %" Pd "", cid);
2056 Label matches;
2057 LoadClassIdMayBeSmi(scratch, src);
2058 CompareImmediate(scratch, cid);
2059 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2060 if (can_be_null) {
2061 CompareImmediate(scratch, kNullCid);
2062 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2063 }
2064 Breakpoint();
2065 Bind(&matches);
2066#endif
2067}
2068
2069void Assembler::BailoutIfInvalidBranchOffset(int32_t offset) {
2070 if (!CanEncodeBranchDistance(offset)) {
2071 ASSERT(!use_far_branches());
2073 }
2074}
2075
2076int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
2077 // The offset is off by 8 due to the way the ARM CPUs read PC.
2078 offset -= Instr::kPCReadOffset;
2079
2080 // Properly preserve only the bits supported in the instruction.
2081 offset >>= 2;
2083 return (inst & ~kBranchOffsetMask) | offset;
2084}
2085
2086int Assembler::DecodeBranchOffset(int32_t inst) {
2087 // Sign-extend, left-shift by 2, then add 8.
2088 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset);
2089}
2090
2091static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2092 int32_t offset = 0;
2093 offset |= (movt & 0xf0000) << 12;
2094 offset |= (movt & 0xfff) << 16;
2095 offset |= (movw & 0xf0000) >> 4;
2096 offset |= movw & 0xfff;
2097 return offset;
2098}
2099
2100class PatchFarBranch : public AssemblerFixup {
2101 public:
2102 PatchFarBranch() {}
2103
2104 void Process(const MemoryRegion& region, intptr_t position) {
2105 ProcessARMv7(region, position);
2106 }
2107
2108 private:
2109 void ProcessARMv7(const MemoryRegion& region, intptr_t position) {
2110 const int32_t movw = region.Load<int32_t>(position);
2111 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize);
2112 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2113
2114 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high
2115 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low
2116 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw);
2117 const int32_t dest = region.start() + offset;
2118 const uint16_t dest_high = Utils::High16Bits(dest);
2119 const uint16_t dest_low = Utils::Low16Bits(dest);
2120 const int32_t patched_movt =
2121 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2122 const int32_t patched_movw =
2123 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2124
2125 region.Store<int32_t>(position, patched_movw);
2126 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2127 return;
2128 }
2129
2130 // If the offset loading instructions aren't there, we must have replaced
2131 // the far branch with a near one, and so these instructions
2132 // should be NOPs.
2133 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2134 }
2135
2136 virtual bool IsPointerOffset() const { return false; }
2137};
2138
2139void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) {
2140 buffer_.EmitFixup(new PatchFarBranch());
2141 LoadPatchableImmediate(IP, offset);
2142 if (link) {
2143 blx(IP, cond);
2144 } else {
2145 bx(IP, cond);
2146 }
2147}
2148
2149void Assembler::EmitBranch(Condition cond, Label* label, bool link) {
2150 if (label->IsBound()) {
2151 const int32_t dest = label->Position() - buffer_.Size();
2152 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2153 EmitFarBranch(cond, label->Position(), link);
2154 } else {
2155 EmitType5(cond, dest, link);
2156 }
2157 label->UpdateLRState(lr_state());
2158 } else {
2159 const intptr_t position = buffer_.Size();
2160 if (use_far_branches()) {
2161 const int32_t dest = label->position_;
2162 EmitFarBranch(cond, dest, link);
2163 } else {
2164 // Use the offset field of the branch instruction for linking the sites.
2165 EmitType5(cond, label->position_, link);
2166 }
2167 label->LinkTo(position, lr_state());
2168 }
2169}
2170
2171void Assembler::BindARMv7(Label* label) {
2172 ASSERT(!label->IsBound());
2173 intptr_t bound_pc = buffer_.Size();
2174 while (label->IsLinked()) {
2175 const int32_t position = label->Position();
2176 int32_t dest = bound_pc - position;
2177 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2178 // Far branches are enabled and we can't encode the branch offset.
2179
2180 // Grab instructions that load the offset.
2181 const int32_t movw =
2182 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2183 const int32_t movt =
2184 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2185
2186 // Change from relative to the branch to relative to the assembler
2187 // buffer.
2188 dest = buffer_.Size();
2189 const uint16_t dest_high = Utils::High16Bits(dest);
2190 const uint16_t dest_low = Utils::Low16Bits(dest);
2191 const int32_t patched_movt =
2192 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2193 const int32_t patched_movw =
2194 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2195
2196 // Rewrite the instructions.
2197 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2198 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2199 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2200 } else if (use_far_branches() && CanEncodeBranchDistance(dest)) {
2201 // Far branches are enabled, but we can encode the branch offset.
2202
2203 // Grab instructions that load the offset, and the branch.
2204 const int32_t movw =
2205 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2206 const int32_t movt =
2207 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2208 const int32_t branch =
2209 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2210
2211 // Grab the branch condition, and encode the link bit.
2212 const int32_t cond = branch & 0xf0000000;
2213 const int32_t link = (branch & 0x20) << 19;
2214
2215 // Encode the branch and the offset.
2216 const int32_t new_branch = cond | link | 0x0a000000;
2217 const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2218
2219 // Write the encoded branch instruction followed by two nops.
2220 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded);
2221 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2222 Instr::kNopInstruction);
2223 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2224 Instr::kNopInstruction);
2225
2226 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2227 } else {
2228 BailoutIfInvalidBranchOffset(dest);
2229 int32_t next = buffer_.Load<int32_t>(position);
2230 int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2231 buffer_.Store<int32_t>(position, encoded);
2232 label->position_ = Assembler::DecodeBranchOffset(next);
2233 }
2234 }
2235 label->BindTo(bound_pc, lr_state());
2236}
2237
2238void Assembler::Bind(Label* label) {
2239 BindARMv7(label);
2240}
2241
2242OperandSize Address::OperandSizeFor(intptr_t cid) {
2243 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
2244 switch (rep) {
2245 case kUnboxedInt64:
2246 return kDWord;
2247 case kUnboxedFloat:
2248 return kSWord;
2249 case kUnboxedDouble:
2250 return kDWord;
2251 case kUnboxedInt32x4:
2252 case kUnboxedFloat32x4:
2253 case kUnboxedFloat64x2:
2254 return kRegList;
2255 default:
2256 return RepresentationUtils::OperandSize(rep);
2257 }
2258}
2259
2260bool Address::CanHoldLoadOffset(OperandSize size,
2261 int32_t offset,
2262 int32_t* offset_mask) {
2263 switch (size) {
2264 case kByte:
2265 case kTwoBytes:
2266 case kUnsignedTwoBytes:
2267 case kWordPair: {
2268 *offset_mask = 0xff;
2269 return Utils::MagnitudeIsUint(8, offset); // Addressing mode 3.
2270 }
2271 case kUnsignedByte:
2272 case kFourBytes:
2273 case kUnsignedFourBytes: {
2274 *offset_mask = 0xfff;
2275 return Utils::MagnitudeIsUint(12, offset); // Addressing mode 2.
2276 }
2277 case kSWord:
2278 case kDWord: {
2279 *offset_mask = 0x3fc; // Multiple of 4.
2280 // VFP addressing mode.
2281 return (Utils::MagnitudeIsUint(10, offset) &&
2282 Utils::IsAligned(offset, 4));
2283 }
2284 case kRegList: {
2285 *offset_mask = 0x0;
2286 return offset == 0;
2287 }
2288 default: {
2289 UNREACHABLE();
2290 return false;
2291 }
2292 }
2293}
2294
2295bool Address::CanHoldStoreOffset(OperandSize size,
2296 int32_t offset,
2297 int32_t* offset_mask) {
2298 switch (size) {
2299 case kTwoBytes:
2300 case kUnsignedTwoBytes:
2301 case kWordPair: {
2302 *offset_mask = 0xff;
2303 return Utils::MagnitudeIsUint(8, offset); // Addressing mode 3.
2304 }
2305 case kByte:
2306 case kUnsignedByte:
2307 case kFourBytes:
2308 case kUnsignedFourBytes: {
2309 *offset_mask = 0xfff;
2310 return Utils::MagnitudeIsUint(12, offset); // Addressing mode 2.
2311 }
2312 case kSWord:
2313 case kDWord: {
2314 *offset_mask = 0x3fc; // Multiple of 4.
2315 // VFP addressing mode.
2316 return (Utils::MagnitudeIsUint(10, offset) &&
2317 Utils::IsAligned(offset, 4));
2318 }
2319 case kRegList: {
2320 *offset_mask = 0x0;
2321 return offset == 0;
2322 }
2323 default: {
2324 UNREACHABLE();
2325 return false;
2326 }
2327 }
2328}
2329
2330bool Address::CanHoldImmediateOffset(bool is_load,
2331 intptr_t cid,
2332 int64_t offset) {
2333 int32_t offset_mask = 0;
2334 if (is_load) {
2335 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask);
2336 } else {
2337 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask);
2338 }
2339}
2340
2341void Assembler::Push(Register rd, Condition cond) {
2342 str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond);
2343}
2344
2345void Assembler::Pop(Register rd, Condition cond) {
2346 ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond);
2347}
2348
2349void Assembler::PushList(RegList regs, Condition cond) {
2350 stm(DB_W, SP, regs, cond);
2351}
2352
2353void Assembler::PopList(RegList regs, Condition cond) {
2354 ldm(IA_W, SP, regs, cond);
2355}
2356
2357void Assembler::PushQuad(FpuRegister reg, Condition cond) {
2358 DRegister dreg = EvenDRegisterOf(reg);
2359 vstmd(DB_W, SP, dreg, 2, cond); // 2 D registers per Q register.
2360}
2361
2362void Assembler::PopQuad(FpuRegister reg, Condition cond) {
2363 DRegister dreg = EvenDRegisterOf(reg);
2364 vldmd(IA_W, SP, dreg, 2, cond); // 2 D registers per Q register.
2365}
2366
2367void Assembler::PushRegisters(const RegisterSet& regs) {
2368 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2369 if (fpu_regs_count > 0) {
2370 AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
2371 // Store fpu registers with the lowest register number at the lowest
2372 // address.
2373 intptr_t offset = 0;
2374 mov(TMP, Operand(SP));
2375 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
2376 QRegister fpu_reg = static_cast<QRegister>(i);
2377 if (regs.ContainsFpuRegister(fpu_reg)) {
2378 DRegister d = EvenDRegisterOf(fpu_reg);
2379 ASSERT(d + 1 == OddDRegisterOf(fpu_reg));
2380 vstmd(IA_W, IP, d, 2);
2382 }
2383 }
2384 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
2385 }
2386
2387 // The order in which the registers are pushed must match the order
2388 // in which the registers are encoded in the safe point's stack map.
2389 // NOTE: This matches the order of ARM's multi-register push.
2390 RegList reg_list = 0;
2391 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2392 Register reg = static_cast<Register>(i);
2393 if (regs.ContainsRegister(reg)) {
2394 reg_list |= (1 << reg);
2395 }
2396 }
2397 if (reg_list != 0) {
2398 PushList(reg_list);
2399 }
2400}
2401
2402void Assembler::PopRegisters(const RegisterSet& regs) {
2403 RegList reg_list = 0;
2404 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2405 Register reg = static_cast<Register>(i);
2406 if (regs.ContainsRegister(reg)) {
2407 reg_list |= (1 << reg);
2408 }
2409 }
2410 if (reg_list != 0) {
2411 PopList(reg_list);
2412 }
2413
2414 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2415 if (fpu_regs_count > 0) {
2416 // Fpu registers have the lowest register number at the lowest address.
2417 intptr_t offset = 0;
2418 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
2419 QRegister fpu_reg = static_cast<QRegister>(i);
2420 if (regs.ContainsFpuRegister(fpu_reg)) {
2421 DRegister d = EvenDRegisterOf(fpu_reg);
2422 ASSERT(d + 1 == OddDRegisterOf(fpu_reg));
2423 vldmd(IA_W, SP, d, 2);
2425 }
2426 }
2427 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
2428 }
2429}
2430
2431void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2432 // Collect the longest descending sequences of registers and
2433 // push them with a single STMDB instruction.
2434 RegList pending_regs = 0;
2435 Register lowest_pending_reg = kNumberOfCpuRegisters;
2436 intptr_t num_pending_regs = 0;
2437 for (Register reg : regs) {
2438 if (reg >= lowest_pending_reg) {
2439 ASSERT(pending_regs != 0);
2440 if (num_pending_regs > 1) {
2441 PushList(pending_regs);
2442 } else {
2443 Push(lowest_pending_reg);
2444 }
2445 pending_regs = 0;
2446 num_pending_regs = 0;
2447 }
2448 pending_regs |= (1 << reg);
2449 lowest_pending_reg = reg;
2450 ++num_pending_regs;
2451 }
2452 if (pending_regs != 0) {
2453 if (num_pending_regs > 1) {
2454 PushList(pending_regs);
2455 } else {
2456 Push(lowest_pending_reg);
2457 }
2458 }
2459}
2460
2461void Assembler::PushNativeCalleeSavedRegisters() {
2462 // Save new context and C++ ABI callee-saved registers.
2463 PushList(kAbiPreservedCpuRegs);
2464
2465 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
2466 ASSERT(2 * kAbiPreservedFpuRegCount < 16);
2467 // Save FPU registers. 2 D registers per Q register.
2468 vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2469}
2470
2471void Assembler::PopNativeCalleeSavedRegisters() {
2472 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
2473 // Restore C++ ABI callee-saved registers.
2474 // Restore FPU registers. 2 D registers per Q register.
2475 vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2476 // Restore CPU registers.
2477 PopList(kAbiPreservedCpuRegs);
2478}
2479
2480void Assembler::ExtendValue(Register rd,
2481 Register rm,
2482 OperandSize sz,
2483 Condition cond) {
2484 switch (sz) {
2485 case kUnsignedFourBytes:
2486 case kFourBytes:
2487 if (rd == rm) return;
2488 return mov(rd, Operand(rm), cond);
2489 case kUnsignedTwoBytes:
2490 return ubfx(rd, rm, 0, kBitsPerInt16, cond);
2491 case kTwoBytes:
2492 return sbfx(rd, rm, 0, kBitsPerInt16, cond);
2493 case kUnsignedByte:
2494 return ubfx(rd, rm, 0, kBitsPerInt8, cond);
2495 case kByte:
2496 return sbfx(rd, rm, 0, kBitsPerInt8, cond);
2497 default:
2498 UNIMPLEMENTED();
2499 break;
2500 }
2501}
2502
2503void Assembler::Lsl(Register rd,
2504 Register rm,
2505 const Operand& shift_imm,
2506 Condition cond) {
2507 ASSERT(shift_imm.type() == 1);
2508 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted.
2509 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond);
2510}
2511
2512void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
2513 mov(rd, Operand(rm, LSL, rs), cond);
2514}
2515
2516void Assembler::Lsr(Register rd,
2517 Register rm,
2518 const Operand& shift_imm,
2519 Condition cond) {
2520 ASSERT(shift_imm.type() == 1);
2521 uint32_t shift = shift_imm.encoding();
2522 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted.
2523 if (shift == 32) {
2524 shift = 0; // Comply to UAL syntax.
2525 }
2526 mov(rd, Operand(rm, LSR, shift), cond);
2527}
2528
2529void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
2530 mov(rd, Operand(rm, LSR, rs), cond);
2531}
2532
2533void Assembler::Asr(Register rd,
2534 Register rm,
2535 const Operand& shift_imm,
2536 Condition cond) {
2537 ASSERT(shift_imm.type() == 1);
2538 uint32_t shift = shift_imm.encoding();
2539 ASSERT(shift != 0); // Do not use Asr if no shift is wanted.
2540 if (shift == 32) {
2541 shift = 0; // Comply to UAL syntax.
2542 }
2543 mov(rd, Operand(rm, ASR, shift), cond);
2544}
2545
2546void Assembler::Asrs(Register rd,
2547 Register rm,
2548 const Operand& shift_imm,
2549 Condition cond) {
2550 ASSERT(shift_imm.type() == 1);
2551 uint32_t shift = shift_imm.encoding();
2552 ASSERT(shift != 0); // Do not use Asr if no shift is wanted.
2553 if (shift == 32) {
2554 shift = 0; // Comply to UAL syntax.
2555 }
2556 movs(rd, Operand(rm, ASR, shift), cond);
2557}
2558
2559void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
2560 mov(rd, Operand(rm, ASR, rs), cond);
2561}
2562
2563void Assembler::Ror(Register rd,
2564 Register rm,
2565 const Operand& shift_imm,
2566 Condition cond) {
2567 ASSERT(shift_imm.type() == 1);
2568 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction.
2569 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond);
2570}
2571
2572void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) {
2573 mov(rd, Operand(rm, ROR, rs), cond);
2574}
2575
2576void Assembler::Rrx(Register rd, Register rm, Condition cond) {
2577 mov(rd, Operand(rm, ROR, 0), cond);
2578}
2579
2580void Assembler::SignFill(Register rd, Register rm, Condition cond) {
2581 Asr(rd, rm, Operand(31), cond);
2582}
2583
2584void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
2585 ASSERT(qm != QTMP);
2586 ASSERT(qd != QTMP);
2587
2588 // Reciprocal estimate.
2589 vrecpeqs(qd, qm);
2590 // 2 Newton-Raphson steps.
2591 vrecpsqs(QTMP, qm, qd);
2592 vmulqs(qd, qd, QTMP);
2593 vrecpsqs(QTMP, qm, qd);
2594 vmulqs(qd, qd, QTMP);
2595}
2596
2597void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) {
2598 ASSERT(qm != QTMP);
2599 ASSERT(qd != QTMP);
2600
2601 // Reciprocal square root estimate.
2602 vrsqrteqs(qd, qm);
2603 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2.
2604 // First step.
2605 vmulqs(QTMP, qd, qd); // QTMP <- xn^2
2606 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2.
2607 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP
2608 // Second step.
2609 vmulqs(QTMP, qd, qd);
2610 vrsqrtsqs(QTMP, qm, QTMP);
2611 vmulqs(qd, qd, QTMP);
2612}
2613
2614void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) {
2615 ASSERT(temp != QTMP);
2616 ASSERT(qm != QTMP);
2617 ASSERT(qd != QTMP);
2618
2619 if (temp != kNoQRegister) {
2620 vmovq(temp, qm);
2621 qm = temp;
2622 }
2623
2624 VreciprocalSqrtqs(qd, qm);
2625 vmovq(qm, qd);
2626 Vreciprocalqs(qd, qm);
2627}
2628
2629void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2630 ASSERT(qd != QTMP);
2631 ASSERT(qn != QTMP);
2632 ASSERT(qm != QTMP);
2633
2634 Vreciprocalqs(qd, qm);
2635 vmulqs(qd, qn, qd);
2636}
2637
2638void Assembler::Branch(const Address& address, Condition cond) {
2639 ldr(PC, address, cond);
2640}
2641
2642void Assembler::BranchLink(intptr_t target_code_pool_index,
2643 CodeEntryKind entry_kind) {
2644 CLOBBERS_LR({
2645 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
2646 // We don't actually use CODE_REG in the callee and caller might
2647 // be using CODE_REG for a live value (e.g. a value that is alive
2648 // across invocation of a shared stub like the one we use for
2649 // allocating Mint boxes).
2650 const Register code_reg = FLAG_precompiled_mode ? LR : CODE_REG;
2651 LoadWordFromPoolIndex(code_reg, target_code_pool_index, PP, AL);
2652 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
2653 });
2654}
2655
2656void Assembler::BranchLink(
2657 const Code& target,
2658 ObjectPoolBuilderEntry::Patchability patchable,
2659 CodeEntryKind entry_kind,
2660 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2661 // Make sure that class CallPattern is able to patch the label referred
2662 // to by this code sequence.
2663 // For added code robustness, use 'blx lr' in a patchable sequence and
2664 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2665 const intptr_t index = object_pool_builder().FindObject(
2666 ToObject(target), patchable, snapshot_behavior);
2667 BranchLink(index, entry_kind);
2668}
2669
2670void Assembler::BranchLinkPatchable(
2671 const Code& target,
2672 CodeEntryKind entry_kind,
2673 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2674 BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind,
2675 snapshot_behavior);
2676}
2677
2678void Assembler::BranchLinkWithEquivalence(const Code& target,
2679 const Object& equivalence,
2680 CodeEntryKind entry_kind) {
2681 // Make sure that class CallPattern is able to patch the label referred
2682 // to by this code sequence.
2683 // For added code robustness, use 'blx lr' in a patchable sequence and
2684 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2685 const intptr_t index =
2686 object_pool_builder().FindObject(ToObject(target), equivalence);
2687 BranchLink(index, entry_kind);
2688}
2689
2690void Assembler::BranchLink(const ExternalLabel* label) {
2691 CLOBBERS_LR({
2692 LoadImmediate(LR, label->address()); // Target address is never patched.
2693 blx(LR); // Use blx instruction so that the return branch prediction works.
2694 });
2695}
2696
2697void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2698 ASSERT(base != PC);
2699 ASSERT(base != IP);
2700 LoadFromOffset(IP, base, offset);
2701 blx(IP); // Use blx instruction so that the return branch prediction works.
2702}
2703
2704void Assembler::LoadPatchableImmediate(Register rd,
2705 int32_t value,
2706 Condition cond) {
2707 const uint16_t value_low = Utils::Low16Bits(value);
2708 const uint16_t value_high = Utils::High16Bits(value);
2709 movw(rd, value_low, cond);
2710 movt(rd, value_high, cond);
2711}
2712
2713void Assembler::LoadDecodableImmediate(Register rd,
2714 int32_t value,
2715 Condition cond) {
2716 movw(rd, Utils::Low16Bits(value), cond);
2717 const uint16_t value_high = Utils::High16Bits(value);
2718 if (value_high != 0) {
2719 movt(rd, value_high, cond);
2720 }
2721}
2722
2723void Assembler::LoadImmediate(Register rd, Immediate value, Condition cond) {
2724 LoadImmediate(rd, value.value(), cond);
2725}
2726
2727void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
2728 Operand o;
2729 if (Operand::CanHold(value, &o)) {
2730 mov(rd, o, cond);
2731 } else if (Operand::CanHold(~value, &o)) {
2732 mvn_(rd, o, cond);
2733 } else {
2734 LoadDecodableImmediate(rd, value, cond);
2735 }
2736}
2737
2738void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) {
2739 if (!vmovs(sd, value, cond)) {
2740 const DRegister dd = static_cast<DRegister>(sd >> 1);
2741 const int index = sd & 1;
2742 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond);
2743 vmovdr(dd, index, IP, cond);
2744 }
2745}
2746
2747void Assembler::LoadDImmediate(DRegister dd,
2748 double value,
2749 Register scratch,
2750 Condition cond) {
2751 ASSERT(scratch != PC);
2752 ASSERT(scratch != IP);
2753 if (vmovd(dd, value, cond)) return;
2754
2755 int64_t imm64 = bit_cast<int64_t, double>(value);
2756 if (constant_pool_allowed()) {
2757 intptr_t index = object_pool_builder().FindImmediate64(imm64);
2758 intptr_t offset =
2759 target::ObjectPool::element_offset(index) - kHeapObjectTag;
2760 LoadDFromOffset(dd, PP, offset, cond);
2761 } else {
2762 // A scratch register and IP are needed to load an arbitrary double.
2763 ASSERT(scratch != kNoRegister);
2764 int64_t imm64 = bit_cast<int64_t, double>(value);
2765 LoadImmediate(IP, Utils::Low32Bits(imm64), cond);
2766 LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2767 vmovdrr(dd, IP, scratch, cond);
2768 }
2769}
2770
2771void Assembler::LoadQImmediate(QRegister qd, simd128_value_t value) {
2772 ASSERT(constant_pool_allowed());
2773 intptr_t index = object_pool_builder().FindImmediate128(value);
2774 intptr_t offset = target::ObjectPool::element_offset(index) - kHeapObjectTag;
2775 LoadMultipleDFromOffset(EvenDRegisterOf(qd), 2, PP, offset);
2776}
2777
2778Address Assembler::PrepareLargeLoadOffset(const Address& address,
2779 OperandSize size,
2780 Condition cond) {
2781 ASSERT(size != kWordPair);
2782 if (address.kind() != Address::Immediate) {
2783 return address;
2784 }
2785 int32_t offset = address.offset();
2786 int32_t offset_mask = 0;
2787 if (Address::CanHoldLoadOffset(size, offset, &offset_mask)) {
2788 return address;
2789 }
2790 auto mode = address.mode();
2791 // If the retrieved offset is negative, then the U bit was flipped during
2792 // encoding, so re-flip it.
2793 if (offset < 0) {
2794 mode = static_cast<Address::Mode>(mode ^ U);
2795 }
2796 // If writing back post-indexing, we can't separate the instruction into
2797 // two parts and the offset must fit.
2798 ASSERT((mode | U) != Address::PostIndex);
2799 // If we're writing back pre-indexing, we must add directly to the base,
2800 // otherwise we use TMP.
2801 Register base = address.base();
2802 ASSERT(base != TMP || address.has_writeback());
2803 Register temp = address.has_writeback() ? base : TMP;
2804 AddImmediate(temp, base, offset & ~offset_mask, cond);
2805 base = temp;
2806 offset = offset & offset_mask;
2807 return Address(base, offset, mode);
2808}
2809
2810Address Assembler::PrepareLargeStoreOffset(const Address& address,
2811 OperandSize size,
2812 Condition cond) {
2813 ASSERT(size != kWordPair);
2814 if (address.kind() != Address::Immediate) {
2815 return address;
2816 }
2817 int32_t offset = address.offset();
2818 int32_t offset_mask = 0;
2819 if (Address::CanHoldStoreOffset(size, offset, &offset_mask)) {
2820 return address;
2821 }
2822 auto mode = address.mode();
2823 // If the retrieved offset is negative, then the U bit was flipped during
2824 // encoding, so re-flip it.
2825 if (offset < 0) {
2826 mode = static_cast<Address::Mode>(mode ^ U);
2827 }
2828 // If writing back post-indexing, we can't separate the instruction into
2829 // two parts and the offset must fit.
2830 ASSERT((mode | U) != Address::PostIndex);
2831 // If we're writing back pre-indexing, we must add directly to the base,
2832 // otherwise we use TMP.
2833 Register base = address.base();
2834 ASSERT(base != TMP || address.has_writeback());
2835 Register temp = address.has_writeback() ? base : TMP;
2836 AddImmediate(temp, base, offset & ~offset_mask, cond);
2837 base = temp;
2838 offset = offset & offset_mask;
2839 return Address(base, offset, mode);
2840}
2841
2842void Assembler::Load(Register reg,
2843 const Address& address,
2844 OperandSize size,
2845 Condition cond) {
2846 const Address& addr = PrepareLargeLoadOffset(address, size, cond);
2847 switch (size) {
2848 case kByte:
2849 ldrsb(reg, addr, cond);
2850 break;
2851 case kUnsignedByte:
2852 ldrb(reg, addr, cond);
2853 break;
2854 case kTwoBytes:
2855 ldrsh(reg, addr, cond);
2856 break;
2857 case kUnsignedTwoBytes:
2858 ldrh(reg, addr, cond);
2859 break;
2860 case kUnsignedFourBytes:
2861 case kFourBytes:
2862 ldr(reg, addr, cond);
2863 break;
2864 default:
2865 UNREACHABLE();
2866 }
2867}
2868
2869void Assembler::LoadFromStack(Register dst, intptr_t depth) {
2870 ASSERT(depth >= 0);
2871 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
2872}
2873
2874void Assembler::StoreToStack(Register src, intptr_t depth) {
2875 ASSERT(depth >= 0);
2876 StoreToOffset(src, SPREG, depth * target::kWordSize);
2877}
2878
2879void Assembler::CompareToStack(Register src, intptr_t depth) {
2880 LoadFromStack(TMP, depth);
2881 CompareRegisters(src, TMP);
2882}
2883
2884void Assembler::Store(Register reg,
2885 const Address& address,
2886 OperandSize size,
2887 Condition cond) {
2888 const Address& addr = PrepareLargeStoreOffset(address, size, cond);
2889 switch (size) {
2890 case kUnsignedByte:
2891 case kByte:
2892 strb(reg, addr, cond);
2893 break;
2894 case kUnsignedTwoBytes:
2895 case kTwoBytes:
2896 strh(reg, addr, cond);
2897 break;
2898 case kUnsignedFourBytes:
2899 case kFourBytes:
2900 str(reg, addr, cond);
2901 break;
2902 default:
2903 UNREACHABLE();
2904 }
2905}
2906
2907void Assembler::LoadSFromOffset(SRegister reg,
2908 Register base,
2909 int32_t offset,
2910 Condition cond) {
2911 vldrs(reg, PrepareLargeLoadOffset(Address(base, offset), kSWord, cond), cond);
2912}
2913
2914void Assembler::StoreSToOffset(SRegister reg,
2915 Register base,
2916 int32_t offset,
2917 Condition cond) {
2918 vstrs(reg, PrepareLargeStoreOffset(Address(base, offset), kSWord, cond),
2919 cond);
2920}
2921
2922void Assembler::LoadDFromOffset(DRegister reg,
2923 Register base,
2924 int32_t offset,
2925 Condition cond) {
2926 vldrd(reg, PrepareLargeLoadOffset(Address(base, offset), kDWord, cond), cond);
2927}
2928
2929void Assembler::StoreDToOffset(DRegister reg,
2930 Register base,
2931 int32_t offset,
2932 Condition cond) {
2933 vstrd(reg, PrepareLargeStoreOffset(Address(base, offset), kDWord, cond),
2934 cond);
2935}
2936
2937void Assembler::LoadMultipleDFromOffset(DRegister first,
2938 intptr_t count,
2939 Register base,
2940 int32_t offset) {
2941 ASSERT(base != IP);
2942 AddImmediate(IP, base, offset);
2943 vldmd(IA, IP, first, count);
2944}
2945
2946void Assembler::StoreMultipleDToOffset(DRegister first,
2947 intptr_t count,
2948 Register base,
2949 int32_t offset) {
2950 ASSERT(base != IP);
2951 AddImmediate(IP, base, offset);
2952 vstmd(IA, IP, first, count);
2953}
2954
2955void Assembler::AddImmediate(Register rd,
2956 Register rn,
2957 int32_t value,
2958 Condition cond) {
2959 if (value == 0) {
2960 if (rd != rn) {
2961 mov(rd, Operand(rn), cond);
2962 }
2963 return;
2964 }
2965 // We prefer to select the shorter code sequence rather than selecting add for
2966 // positive values and sub for negatives ones, which would slightly improve
2967 // the readability of generated code for some constants.
2968 Operand o;
2969 if (Operand::CanHold(value, &o)) {
2970 add(rd, rn, o, cond);
2971 } else if (Operand::CanHold(-value, &o)) {
2972 sub(rd, rn, o, cond);
2973 } else {
2974 ASSERT(rn != IP);
2975 if (Operand::CanHold(~value, &o)) {
2976 mvn_(IP, o, cond);
2977 add(rd, rn, Operand(IP), cond);
2978 } else if (Operand::CanHold(~(-value), &o)) {
2979 mvn_(IP, o, cond);
2980 sub(rd, rn, Operand(IP), cond);
2981 } else if (value > 0) {
2982 LoadDecodableImmediate(IP, value, cond);
2983 add(rd, rn, Operand(IP), cond);
2984 } else {
2985 LoadDecodableImmediate(IP, -value, cond);
2986 sub(rd, rn, Operand(IP), cond);
2987 }
2988 }
2989}
2990
2991void Assembler::AddImmediateSetFlags(Register rd,
2992 Register rn,
2993 int32_t value,
2994 Condition cond) {
2995 Operand o;
2996 if (Operand::CanHold(value, &o)) {
2997 // Handles value == kMinInt32.
2998 adds(rd, rn, o, cond);
2999 } else if (Operand::CanHold(-value, &o)) {
3000 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3001 subs(rd, rn, o, cond);
3002 } else {
3003 ASSERT(rn != IP);
3004 if (Operand::CanHold(~value, &o)) {
3005 mvn_(IP, o, cond);
3006 adds(rd, rn, Operand(IP), cond);
3007 } else if (Operand::CanHold(~(-value), &o)) {
3008 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3009 mvn_(IP, o, cond);
3010 subs(rd, rn, Operand(IP), cond);
3011 } else {
3012 LoadDecodableImmediate(IP, value, cond);
3013 adds(rd, rn, Operand(IP), cond);
3014 }
3015 }
3016}
3017
3018void Assembler::SubImmediate(Register rd,
3019 Register rn,
3020 int32_t value,
3021 Condition cond) {
3022 AddImmediate(rd, rn, -value, cond);
3023}
3024
3025void Assembler::SubImmediateSetFlags(Register rd,
3026 Register rn,
3027 int32_t value,
3028 Condition cond) {
3029 Operand o;
3030 if (Operand::CanHold(value, &o)) {
3031 // Handles value == kMinInt32.
3032 subs(rd, rn, o, cond);
3033 } else if (Operand::CanHold(-value, &o)) {
3034 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3035 adds(rd, rn, o, cond);
3036 } else {
3037 ASSERT(rn != IP);
3038 if (Operand::CanHold(~value, &o)) {
3039 mvn_(IP, o, cond);
3040 subs(rd, rn, Operand(IP), cond);
3041 } else if (Operand::CanHold(~(-value), &o)) {
3042 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3043 mvn_(IP, o, cond);
3044 adds(rd, rn, Operand(IP), cond);
3045 } else {
3046 LoadDecodableImmediate(IP, value, cond);
3047 subs(rd, rn, Operand(IP), cond);
3048 }
3049 }
3050}
3051
3052void Assembler::AndImmediate(Register rd,
3053 Register rs,
3054 int32_t imm,
3055 Condition cond) {
3056 Operand o;
3057 if (Operand::CanHold(imm, &o)) {
3058 and_(rd, rs, Operand(o), cond);
3059 } else {
3060 LoadImmediate(TMP, imm, cond);
3061 and_(rd, rs, Operand(TMP), cond);
3062 }
3063}
3064
3065void Assembler::AndImmediateSetFlags(Register rd,
3066 Register rs,
3067 int32_t imm,
3068 Condition cond) {
3069 Operand o;
3070 if (Operand::CanHold(imm, &o)) {
3071 ands(rd, rs, Operand(o), cond);
3072 } else {
3073 LoadImmediate(TMP, imm, cond);
3074 ands(rd, rs, Operand(TMP), cond);
3075 }
3076}
3077
3078void Assembler::OrImmediate(Register rd,
3079 Register rs,
3080 int32_t imm,
3081 Condition cond) {
3082 Operand o;
3083 if (Operand::CanHold(imm, &o)) {
3084 orr(rd, rs, Operand(o), cond);
3085 } else {
3086 LoadImmediate(TMP, imm, cond);
3087 orr(rd, rs, Operand(TMP), cond);
3088 }
3089}
3090
3091void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
3092 Operand o;
3093 if (Operand::CanHold(value, &o)) {
3094 cmp(rn, o, cond);
3095 } else {
3096 ASSERT(rn != IP);
3097 LoadImmediate(IP, value, cond);
3098 cmp(rn, Operand(IP), cond);
3099 }
3100}
3101
3102void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) {
3103 Operand o;
3104 if (Operand::CanHold(imm, &o)) {
3105 tst(rn, o, cond);
3106 } else {
3107 LoadImmediate(IP, imm);
3108 tst(rn, Operand(IP), cond);
3109 }
3110}
3111
3112void Assembler::IntegerDivide(Register result,
3113 Register left,
3114 Register right,
3115 DRegister tmpl,
3116 DRegister tmpr) {
3117 ASSERT(tmpl != tmpr);
3118 if (TargetCPUFeatures::integer_division_supported()) {
3119 sdiv(result, left, right);
3120 } else {
3121 SRegister stmpl = EvenSRegisterOf(tmpl);
3122 SRegister stmpr = EvenSRegisterOf(tmpr);
3123 vmovsr(stmpl, left);
3124 vcvtdi(tmpl, stmpl); // left is in tmpl.
3125 vmovsr(stmpr, right);
3126 vcvtdi(tmpr, stmpr); // right is in tmpr.
3127 vdivd(tmpr, tmpl, tmpr);
3128 vcvtid(stmpr, tmpr);
3129 vmovrs(result, stmpr);
3130 }
3131}
3132
3133static int NumRegsBelowFP(RegList regs) {
3134 int count = 0;
3135 for (int i = 0; i < FP; i++) {
3136 if ((regs & (1 << i)) != 0) {
3137 count++;
3138 }
3139 }
3140 return count;
3141}
3142
3143void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
3144 Asr(reg, reg, Operand(shift));
3145}
3146
3147void Assembler::CompareWords(Register reg1,
3148 Register reg2,
3149 intptr_t offset,
3150 Register count,
3151 Register temp,
3152 Label* equals) {
3153 Label loop;
3154
3155 AddImmediate(reg1, offset - kHeapObjectTag);
3156 AddImmediate(reg2, offset - kHeapObjectTag);
3157
3158 COMPILE_ASSERT(target::kWordSize == 4);
3159 Bind(&loop);
3160 BranchIfZero(count, equals, Assembler::kNearJump);
3161 AddImmediate(count, -1);
3162 ldr(temp, Address(reg1, 4, Address::PostIndex));
3163 ldr(TMP, Address(reg2, 4, Address::PostIndex));
3164 cmp(temp, Operand(TMP));
3165 BranchIf(EQUAL, &loop, Assembler::kNearJump);
3166}
3167
3168void Assembler::EnterFrame(RegList regs, intptr_t frame_size) {
3169 if (prologue_offset_ == -1) {
3170 prologue_offset_ = CodeSize();
3171 }
3172 PushList(regs);
3173 if ((regs & (1 << FP)) != 0) {
3174 // Set FP to the saved previous FP.
3175 add(FP, SP, Operand(4 * NumRegsBelowFP(regs)));
3176 }
3177 if (frame_size != 0) {
3178 AddImmediate(SP, -frame_size);
3179 }
3180}
3181
3182void Assembler::LeaveFrame(RegList regs, bool allow_pop_pc) {
3183 ASSERT(allow_pop_pc || (regs & (1 << PC)) == 0); // Must not pop PC.
3184 if ((regs & (1 << FP)) != 0) {
3185 // Use FP to set SP.
3186 sub(SP, FP, Operand(4 * NumRegsBelowFP(regs)));
3187 }
3188 PopList(regs);
3189}
3190
3191void Assembler::Ret(Condition cond /* = AL */) {
3192 READS_RETURN_ADDRESS_FROM_LR(bx(LR, cond));
3193}
3194
3195void Assembler::SetReturnAddress(Register value) {
3196 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
3197}
3198
3199void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3200 // Reserve space for arguments and align frame before entering
3201 // the C++ world.
3202 AddImmediate(SP, -frame_space);
3203 if (OS::ActivationFrameAlignment() > 1) {
3204 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3205 }
3206}
3207
3208void Assembler::EmitEntryFrameVerification(Register scratch) {
3209#if defined(DEBUG)
3210 Label done;
3211 ASSERT(!constant_pool_allowed());
3212 LoadImmediate(scratch, target::frame_layout.exit_link_slot_from_entry_fp *
3213 target::kWordSize);
3214 add(scratch, scratch, Operand(FPREG));
3215 cmp(scratch, Operand(SPREG));
3216 b(&done, EQ);
3217
3218 Breakpoint();
3219
3220 Bind(&done);
3221#endif
3222}
3223
3224void Assembler::CallRuntime(const RuntimeEntry& entry,
3225 intptr_t argument_count) {
3226 ASSERT(!entry.is_leaf());
3227 // Argument count is not checked here, but in the runtime entry for a more
3228 // informative error message.
3229 LoadFromOffset(R9, THR, entry.OffsetFromThread());
3230 LoadImmediate(R4, argument_count);
3231 ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
3232 blx(IP);
3233}
3234
3235// For use by LR related macros (e.g. CLOBBERS_LR).
3236#undef __
3237#define __ assembler_->
3238
3239#if defined(VFPv3_D32)
3240static const RegisterSet kVolatileFpuRegisters(0, 0xFF0F); // Q0-Q3, Q8-Q15
3241#else
3242static const RegisterSet kVolatileFpuRegisters(0, 0x000F); // Q0-Q3
3243#endif
3244
3245LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
3246 intptr_t frame_size,
3247 bool preserve_registers)
3248 : assembler_(assembler), preserve_registers_(preserve_registers) {
3249 __ Comment("EnterCallRuntimeFrame");
3250 if (preserve_registers) {
3251 // Preserve volatile CPU registers and PP.
3252 SPILLS_LR_TO_FRAME(__ EnterFrame(
3253 kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR), 0));
3254 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3255
3256 __ PushRegisters(kVolatileFpuRegisters);
3257 } else {
3258 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
3259 // These registers must always be preserved.
3260 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
3261 COMPILE_ASSERT(IsCalleeSavedRegister(PP));
3262 COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
3263 }
3264
3265 __ ReserveAlignedFrameSpace(frame_size);
3266}
3267
3268void LeafRuntimeScope::Call(const RuntimeEntry& entry,
3269 intptr_t argument_count) {
3270 ASSERT(argument_count == entry.argument_count());
3271 __ LoadFromOffset(TMP, THR, entry.OffsetFromThread());
3272 __ str(TMP,
3273 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3274 __ blx(TMP);
3275 __ LoadImmediate(TMP, VMTag::kDartTagId);
3276 __ str(TMP,
3277 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3278}
3279
3280LeafRuntimeScope::~LeafRuntimeScope() {
3281 if (preserve_registers_) {
3282 // SP might have been modified to reserve space for arguments
3283 // and ensure proper alignment of the stack frame.
3284 // We need to restore it before restoring registers.
3285 const intptr_t kPushedFpuRegisterSize =
3286 kVolatileFpuRegisters.FpuRegisterCount() * kFpuRegisterSize;
3287
3288 COMPILE_ASSERT(PP < FP);
3289 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3290 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3291 // it is pushed ahead of FP.
3292 const intptr_t kPushedRegistersSize =
3293 kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
3294 __ AddImmediate(SP, FP, -kPushedRegistersSize);
3295
3296 __ PopRegisters(kVolatileFpuRegisters);
3297
3298 // Restore volatile CPU registers.
3299 RESTORES_LR_FROM_FRAME(__ LeaveFrame(kDartVolatileCpuRegs | (1 << PP) |
3300 (1 << FP) | (1 << LR)));
3301 } else {
3302 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR)));
3303 }
3304}
3305
3306// For use by LR related macros (e.g. CLOBBERS_LR).
3307#undef __
3308#define __ this->
3309
3310void Assembler::EnterDartFrame(intptr_t frame_size, bool load_pool_pointer) {
3311 ASSERT(!constant_pool_allowed());
3312
3313 // Registers are pushed in descending order: R5 | R6 | R7/R11 | R14.
3314 COMPILE_ASSERT(PP < CODE_REG);
3315 COMPILE_ASSERT(CODE_REG < FP);
3316 COMPILE_ASSERT(FP < LINK_REGISTER.code);
3317
3318 if (!FLAG_precompiled_mode) {
3319 SPILLS_LR_TO_FRAME(
3320 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0));
3321
3322 // Setup pool pointer for this dart function.
3323 if (load_pool_pointer) LoadPoolPointer();
3324 } else {
3325 SPILLS_LR_TO_FRAME(EnterFrame((1 << FP) | (1 << LR), 0));
3326 }
3327 set_constant_pool_allowed(true);
3328
3329 // Reserve space for locals.
3330 AddImmediate(SP, -frame_size);
3331}
3332
3333// On entry to a function compiled for OSR, the caller's frame pointer, the
3334// stack locals, and any copied parameters are already in place. The frame
3335// pointer is already set up. The PC marker is not correct for the
3336// optimized function and there may be extra space for spill slots to
3337// allocate. We must also set up the pool pointer for the function.
3338void Assembler::EnterOsrFrame(intptr_t extra_size) {
3339 ASSERT(!constant_pool_allowed());
3340 Comment("EnterOsrFrame");
3341 RestoreCodePointer();
3342 LoadPoolPointer();
3343
3344 AddImmediate(SP, -extra_size);
3345}
3346
3347void Assembler::LeaveDartFrame() {
3348 if (!FLAG_precompiled_mode) {
3349 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3350 target::kWordSize));
3351 }
3352 set_constant_pool_allowed(false);
3353
3354 // This will implicitly drop saved PP, PC marker due to restoring SP from FP
3355 // first.
3356 RESTORES_LR_FROM_FRAME(LeaveFrame((1 << FP) | (1 << LR)));
3357}
3358
3359void Assembler::LeaveDartFrameAndReturn() {
3360 if (!FLAG_precompiled_mode) {
3361 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3362 target::kWordSize));
3363 }
3364 set_constant_pool_allowed(false);
3365
3366 // This will implicitly drop saved PP, PC marker due to restoring SP from FP
3367 // first.
3368 LeaveFrame((1 << FP) | (1 << PC), /*allow_pop_pc=*/true);
3369}
3370
3371void Assembler::EnterStubFrame() {
3372 EnterDartFrame(0);
3373}
3374
3375void Assembler::LeaveStubFrame() {
3376 LeaveDartFrame();
3377}
3378
3379void Assembler::EnterCFrame(intptr_t frame_space) {
3380 // Already saved.
3381 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
3382 COMPILE_ASSERT(IsCalleeSavedRegister(PP));
3383
3384 EnterFrame(1 << FP, 0);
3385 ReserveAlignedFrameSpace(frame_space);
3386}
3387
3388void Assembler::LeaveCFrame() {
3389 LeaveFrame(1 << FP);
3390}
3391
3392// R0 receiver, R9 ICData entries array
3393// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
3394void Assembler::MonomorphicCheckedEntryJIT() {
3395 has_monomorphic_entry_ = true;
3396#if defined(TESTING) || defined(DEBUG)
3397 bool saved_use_far_branches = use_far_branches();
3398 set_use_far_branches(false);
3399#endif
3400 intptr_t start = CodeSize();
3401
3402 Comment("MonomorphicCheckedEntry");
3403 ASSERT_EQUAL(CodeSize() - start,
3404 target::Instructions::kMonomorphicEntryOffsetJIT);
3405
3406 const intptr_t cid_offset = target::Array::element_offset(0);
3407 const intptr_t count_offset = target::Array::element_offset(1);
3408
3409 // Sadly this cannot use ldm because ldm takes no offset.
3410 ldr(R1, FieldAddress(R9, cid_offset));
3411 ldr(R2, FieldAddress(R9, count_offset));
3412 LoadClassIdMayBeSmi(IP, R0);
3413 add(R2, R2, Operand(target::ToRawSmi(1)));
3414 cmp(R1, Operand(IP, LSL, 1));
3415 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3416 str(R2, FieldAddress(R9, count_offset));
3417 LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.
3418
3419 // Fall through to unchecked entry.
3420 ASSERT_EQUAL(CodeSize() - start,
3421 target::Instructions::kPolymorphicEntryOffsetJIT);
3422
3423#if defined(TESTING) || defined(DEBUG)
3424 set_use_far_branches(saved_use_far_branches);
3425#endif
3426}
3427
3428// R0 receiver, R9 guarded cid as Smi.
3429// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
3430void Assembler::MonomorphicCheckedEntryAOT() {
3431 has_monomorphic_entry_ = true;
3432#if defined(TESTING) || defined(DEBUG)
3433 bool saved_use_far_branches = use_far_branches();
3434 set_use_far_branches(false);
3435#endif
3436 intptr_t start = CodeSize();
3437
3438 Comment("MonomorphicCheckedEntry");
3439 ASSERT_EQUAL(CodeSize() - start,
3440 target::Instructions::kMonomorphicEntryOffsetAOT);
3441
3442 LoadClassId(IP, R0);
3443 cmp(R9, Operand(IP, LSL, 1));
3444 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3445
3446 // Fall through to unchecked entry.
3447 ASSERT_EQUAL(CodeSize() - start,
3448 target::Instructions::kPolymorphicEntryOffsetAOT);
3449
3450#if defined(TESTING) || defined(DEBUG)
3451 set_use_far_branches(saved_use_far_branches);
3452#endif
3453}
3454
3455void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
3456 has_monomorphic_entry_ = true;
3457 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
3458 bkpt(0);
3459 }
3460 b(label);
3461 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
3462 bkpt(0);
3463 }
3464}
3465
3466void Assembler::CombineHashes(Register hash, Register other) {
3467 // hash += other_hash
3468 add(hash, hash, Operand(other));
3469 // hash += hash << 10
3470 add(hash, hash, Operand(hash, LSL, 10));
3471 // hash ^= hash >> 6
3472 eor(hash, hash, Operand(hash, LSR, 6));
3473}
3474
3475void Assembler::FinalizeHashForSize(intptr_t bit_size,
3476 Register hash,
3477 Register scratch) {
3478 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
3479 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
3480 // reasonably expect that the returned values fill the entire bit space.
3481 ASSERT(bit_size <= kBitsPerInt32);
3482 // hash += hash << 3;
3483 add(hash, hash, Operand(hash, LSL, 3));
3484 // hash ^= hash >> 11; // Logical shift, unsigned hash.
3485 eor(hash, hash, Operand(hash, LSR, 11));
3486 // hash += hash << 15;
3487 adds(hash, hash, Operand(hash, LSL, 15));
3488 if (bit_size < kBitsPerInt32) {
3489 // Size to fit.
3490 AndImmediateSetFlags(hash, hash, Utils::NBitMask(bit_size), NOT_ZERO);
3491 }
3492 // return (hash == 0) ? 1 : hash;
3493 LoadImmediate(hash, 1, ZERO);
3494}
3495
3496#ifndef PRODUCT
3497void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) {
3498 ASSERT(stats_addr_reg != kNoRegister);
3499 ASSERT(stats_addr_reg != TMP);
3500 ldrb(TMP, Address(stats_addr_reg, 0));
3501 cmp(TMP, Operand(0));
3502 b(trace, NE);
3503}
3504
3505void Assembler::MaybeTraceAllocation(intptr_t cid,
3506 Label* trace,
3507 Register temp_reg,
3508 JumpDistance distance) {
3509 LoadAllocationTracingStateAddress(temp_reg, cid);
3510 MaybeTraceAllocation(temp_reg, trace);
3511}
3512
3513void Assembler::MaybeTraceAllocation(Register cid,
3514 Label* trace,
3515 Register temp_reg,
3516 JumpDistance distance) {
3517 LoadAllocationTracingStateAddress(temp_reg, cid);
3518 MaybeTraceAllocation(temp_reg, trace);
3519}
3520
3521void Assembler::LoadAllocationTracingStateAddress(Register dest, Register cid) {
3522 ASSERT(dest != kNoRegister);
3523 ASSERT(dest != TMP);
3524
3525 LoadIsolateGroup(dest);
3526 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3527 ldr(dest,
3528 Address(dest,
3529 target::ClassTable::allocation_tracing_state_table_offset()));
3530 AddScaled(cid, cid, TIMES_1,
3531 target::ClassTable::AllocationTracingStateSlotOffsetFor(0));
3532 AddRegisters(dest, cid);
3533}
3534
3535void Assembler::LoadAllocationTracingStateAddress(Register dest, intptr_t cid) {
3536 ASSERT(dest != kNoRegister);
3537 ASSERT(dest != TMP);
3538 ASSERT(cid > 0);
3539
3540 LoadIsolateGroup(dest);
3541 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3542 ldr(dest,
3543 Address(dest,
3544 target::ClassTable::allocation_tracing_state_table_offset()));
3545 AddImmediate(dest,
3546 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid));
3547}
3548#endif // !PRODUCT
3549
3550void Assembler::TryAllocateObject(intptr_t cid,
3551 intptr_t instance_size,
3552 Label* failure,
3553 JumpDistance distance,
3554 Register instance_reg,
3555 Register temp_reg) {
3556 ASSERT(failure != nullptr);
3557 ASSERT(instance_reg != kNoRegister);
3558 ASSERT(instance_reg != temp_reg);
3559 ASSERT(instance_reg != IP);
3560 ASSERT(temp_reg != kNoRegister);
3561 ASSERT(temp_reg != IP);
3562 ASSERT(instance_size != 0);
3563 ASSERT(Utils::IsAligned(instance_size,
3564 target::ObjectAlignment::kObjectAlignment));
3565 if (FLAG_inline_alloc &&
3566 target::Heap::IsAllocatableInNewSpace(instance_size)) {
3567 ldr(instance_reg, Address(THR, target::Thread::top_offset()));
3568 // TODO(koda): Protect against unsigned overflow here.
3569 AddImmediate(instance_reg, instance_size);
3570 // instance_reg: potential top (next object start).
3571 ldr(IP, Address(THR, target::Thread::end_offset()));
3572 cmp(IP, Operand(instance_reg));
3573 // fail if heap end unsigned less than or equal to new heap top.
3574 b(failure, LS);
3575 CheckAllocationCanary(instance_reg, temp_reg);
3576
3577 // If this allocation is traced, program will jump to failure path
3578 // (i.e. the allocation stub) which will allocate the object and trace the
3579 // allocation call site.
3580 NOT_IN_PRODUCT(LoadAllocationTracingStateAddress(temp_reg, cid));
3581 NOT_IN_PRODUCT(MaybeTraceAllocation(temp_reg, failure));
3582
3583 // Successfully allocated the object, now update top to point to
3584 // next object start and store the class in the class field of object.
3585 str(instance_reg, Address(THR, target::Thread::top_offset()));
3586 // Move instance_reg back to the start of the object and tag it.
3587 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3588
3589 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3590 LoadImmediate(temp_reg, tags);
3591 str(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
3592 } else {
3593 b(failure);
3594 }
3595}
3596
3597void Assembler::TryAllocateArray(intptr_t cid,
3598 intptr_t instance_size,
3599 Label* failure,
3600 Register instance,
3601 Register end_address,
3602 Register temp1,
3603 Register temp2) {
3604 if (FLAG_inline_alloc &&
3605 target::Heap::IsAllocatableInNewSpace(instance_size)) {
3606 NOT_IN_PRODUCT(LoadAllocationTracingStateAddress(temp1, cid));
3607 // Potential new object start.
3608 ldr(instance, Address(THR, target::Thread::top_offset()));
3609 AddImmediateSetFlags(end_address, instance, instance_size);
3610 b(failure, CS); // Branch if unsigned overflow.
3611
3612 // Check if the allocation fits into the remaining space.
3613 // instance: potential new object start.
3614 // end_address: potential next object start.
3615 ldr(temp2, Address(THR, target::Thread::end_offset()));
3616 cmp(end_address, Operand(temp2));
3617 b(failure, CS);
3618 CheckAllocationCanary(instance, temp2);
3619
3620 // If this allocation is traced, program will jump to failure path
3621 // (i.e. the allocation stub) which will allocate the object and trace the
3622 // allocation call site.
3623 NOT_IN_PRODUCT(MaybeTraceAllocation(temp1, failure));
3624
3625 // Successfully allocated the object(s), now update top to point to
3626 // next object start and initialize the object.
3627 str(end_address, Address(THR, target::Thread::top_offset()));
3628 add(instance, instance, Operand(kHeapObjectTag));
3629
3630 // Initialize the tags.
3631 // instance: new object start as a tagged pointer.
3632 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3633 LoadImmediate(temp2, tags);
3634 str(temp2,
3635 FieldAddress(instance, target::Object::tags_offset())); // Store tags.
3636 } else {
3637 b(failure);
3638 }
3639}
3640
3641void Assembler::CopyMemoryWords(Register src,
3642 Register dst,
3643 Register size,
3644 Register temp) {
3645 Label loop, done;
3646 __ cmp(size, Operand(0));
3647 __ b(&done, EQUAL);
3648 __ Bind(&loop);
3649 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
3650 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
3651 __ subs(size, size, Operand(target::kWordSize));
3652 __ b(&loop, NOT_ZERO);
3653 __ Bind(&done);
3654}
3655
3656void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond,
3657 intptr_t offset_into_target) {
3658 // Emit "blr.cond <offset>".
3659 EmitType5(cond, 0x686868, /*link=*/true);
3660
3661 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
3662 PcRelativeCallPattern::kLengthInBytes);
3663 pattern.set_distance(offset_into_target);
3664}
3665
3666void Assembler::GenerateUnRelocatedPcRelativeTailCall(
3667 Condition cond,
3668 intptr_t offset_into_target) {
3669 // Emit "b <offset>".
3670 EmitType5(cond, 0x686868, /*link=*/false);
3671
3672 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
3673 PcRelativeTailCallPattern::kLengthInBytes);
3674 pattern.set_distance(offset_into_target);
3675}
3676
3677bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
3678 bool is_load,
3679 bool is_external,
3680 intptr_t cid,
3681 intptr_t index_scale,
3682 bool* needs_base) {
3683 ASSERT(needs_base != nullptr);
3684 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
3685 if ((rep == kUnboxedInt32x4) || (rep == kUnboxedFloat32x4) ||
3686 (rep == kUnboxedFloat64x2)) {
3687 // We are using vldmd/vstmd which do not support offset.
3688 return false;
3689 }
3690
3691 if (!IsSafeSmi(constant)) return false;
3692 const int64_t index = target::SmiValue(constant);
3693 const intptr_t offset_base =
3694 (is_external ? 0
3695 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3696 const int64_t offset = index * index_scale + offset_base;
3697 ASSERT(Utils::IsInt(32, offset));
3698 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3699 *needs_base = false;
3700 return true;
3701 }
3702 if (Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)) {
3703 *needs_base = true;
3704 return true;
3705 }
3706
3707 return false;
3708}
3709
3710Address Assembler::ElementAddressForIntIndex(bool is_load,
3711 bool is_external,
3712 intptr_t cid,
3713 intptr_t index_scale,
3714 Register array,
3715 intptr_t index,
3716 Register temp) {
3717 const int64_t offset_base =
3718 (is_external ? 0
3719 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3720 const int64_t offset =
3721 offset_base + static_cast<int64_t>(index) * index_scale;
3722 ASSERT(Utils::IsInt(32, offset));
3723
3724 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3725 return Address(array, static_cast<int32_t>(offset));
3726 } else {
3727 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base));
3728 AddImmediate(temp, array, static_cast<int32_t>(offset_base));
3729 return Address(temp, static_cast<int32_t>(offset - offset_base));
3730 }
3731}
3732
3733void Assembler::LoadElementAddressForIntIndex(Register address,
3734 bool is_load,
3735 bool is_external,
3736 intptr_t cid,
3737 intptr_t index_scale,
3738 Register array,
3739 intptr_t index) {
3740 const int64_t offset_base =
3741 (is_external ? 0
3742 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3743 const int64_t offset =
3744 offset_base + static_cast<int64_t>(index) * index_scale;
3745 ASSERT(Utils::IsInt(32, offset));
3746 AddImmediate(address, array, offset);
3747}
3748
3749Address Assembler::ElementAddressForRegIndex(bool is_load,
3750 bool is_external,
3751 intptr_t cid,
3752 intptr_t index_scale,
3753 bool index_unboxed,
3754 Register array,
3755 Register index) {
3756 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3757 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
3758 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3759 int32_t offset =
3760 is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
3761 const OperandSize size = Address::OperandSizeFor(cid);
3762 ASSERT(array != IP);
3763 ASSERT(index != IP);
3764 const Register base = is_load ? IP : index;
3765 if ((offset != 0) || (is_load && (size == kByte || size == kUnsignedByte)) ||
3766 (size == kTwoBytes) || (size == kUnsignedTwoBytes) || (size == kSWord) ||
3767 (size == kDWord) || (size == kRegList)) {
3768 if (shift < 0) {
3769 ASSERT(shift == -1);
3770 add(base, array, Operand(index, ASR, 1));
3771 } else {
3772 add(base, array, Operand(index, LSL, shift));
3773 }
3774 } else {
3775 if (shift < 0) {
3776 ASSERT(shift == -1);
3777 return Address(array, index, ASR, 1);
3778 } else {
3779 return Address(array, index, LSL, shift);
3780 }
3781 }
3782 int32_t offset_mask = 0;
3783 if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) ||
3784 (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) {
3785 AddImmediate(base, offset & ~offset_mask);
3786 offset = offset & offset_mask;
3787 }
3788 return Address(base, offset);
3789}
3790
3791void Assembler::LoadElementAddressForRegIndex(Register address,
3792 bool is_load,
3793 bool is_external,
3794 intptr_t cid,
3795 intptr_t index_scale,
3796 bool index_unboxed,
3797 Register array,
3798 Register index) {
3799 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3800 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
3801 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3802 int32_t offset =
3803 is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
3804 if (shift < 0) {
3805 ASSERT(shift == -1);
3806 add(address, array, Operand(index, ASR, 1));
3807 } else {
3808 add(address, array, Operand(index, LSL, shift));
3809 }
3810 if (offset != 0) {
3811 AddImmediate(address, offset);
3812 }
3813}
3814
3815void Assembler::LoadStaticFieldAddress(Register address,
3816 Register field,
3817 Register scratch) {
3818 LoadFieldFromOffset(scratch, field,
3819 target::Field::host_offset_or_field_id_offset());
3820 const intptr_t field_table_offset =
3821 compiler::target::Thread::field_table_values_offset();
3822 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
3823 add(address, address,
3824 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
3825}
3826
3827void Assembler::LoadFieldAddressForRegOffset(Register address,
3828 Register instance,
3829 Register offset_in_words_as_smi) {
3830 add(address, instance,
3831 Operand(offset_in_words_as_smi, LSL,
3832 target::kWordSizeLog2 - kSmiTagShift));
3833 AddImmediate(address, -kHeapObjectTag);
3834}
3835
3836void Assembler::LoadHalfWordUnaligned(Register dst,
3837 Register addr,
3838 Register tmp) {
3839 ASSERT(dst != addr);
3840 ldrb(dst, Address(addr, 0));
3841 ldrsb(tmp, Address(addr, 1));
3842 orr(dst, dst, Operand(tmp, LSL, 8));
3843}
3844
3845void Assembler::LoadHalfWordUnsignedUnaligned(Register dst,
3846 Register addr,
3847 Register tmp) {
3848 ASSERT(dst != addr);
3849 ldrb(dst, Address(addr, 0));
3850 ldrb(tmp, Address(addr, 1));
3851 orr(dst, dst, Operand(tmp, LSL, 8));
3852}
3853
3854void Assembler::StoreHalfWordUnaligned(Register src,
3855 Register addr,
3856 Register tmp) {
3857 strb(src, Address(addr, 0));
3858 Lsr(tmp, src, Operand(8));
3859 strb(tmp, Address(addr, 1));
3860}
3861
3862void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) {
3863 ASSERT(dst != addr);
3864 ldrb(dst, Address(addr, 0));
3865 ldrb(tmp, Address(addr, 1));
3866 orr(dst, dst, Operand(tmp, LSL, 8));
3867 ldrb(tmp, Address(addr, 2));
3868 orr(dst, dst, Operand(tmp, LSL, 16));
3869 ldrb(tmp, Address(addr, 3));
3870 orr(dst, dst, Operand(tmp, LSL, 24));
3871}
3872
3873void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) {
3874 strb(src, Address(addr, 0));
3875 Lsr(tmp, src, Operand(8));
3876 strb(tmp, Address(addr, 1));
3877 Lsr(tmp, src, Operand(16));
3878 strb(tmp, Address(addr, 2));
3879 Lsr(tmp, src, Operand(24));
3880 strb(tmp, Address(addr, 3));
3881}
3882
3883void Assembler::RangeCheck(Register value,
3884 Register temp,
3885 intptr_t low,
3886 intptr_t high,
3887 RangeCheckCondition condition,
3888 Label* target) {
3889 auto cc = condition == kIfInRange ? LS : HI;
3890 Register to_check = temp != kNoRegister ? temp : value;
3891 AddImmediate(to_check, value, -low);
3892 CompareImmediate(to_check, high - low);
3893 b(target, cc);
3894}
3895
3896} // namespace compiler
3897} // namespace dart
3898
3899#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static float next(float f)
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
static bool equals(T *a, T *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI T load(const P *ptr)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define LR
#define LINK_REGISTER
#define UNIMPLEMENTED
static const char * begin(const StringSlice &s)
Definition editor.cpp:252
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
VkInstance instance
Definition main.cc:48
static bool b
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
int argument_count
Definition fuchsia.cc:52
ClipOpAndAA opAA SkRegion region
Definition SkRecords.h:238
void BailoutWithBranchOffsetError()
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
const Object & ToObject(const Code &handle)
link(from_root, to_root)
Definition dart_pkg.py:44
const int kDartVolatileCpuRegCount
const Register kWriteBarrierValueReg
uint16_t RegList
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ kSpecialCondition
@ kNumberOfCpuRegisters
@ kNoRegister
const int kNumberOfFpuRegisters
intx_t sign_extend(int32_t x)
const Register TMP
@ kShiftImmShift
@ kConditionShift
@ kBranchOffsetMask
@ kBitFieldExtractLSBShift
@ kOffset12Mask
@ kBitFieldExtractRnShift
@ kShiftRegisterShift
@ kOpcodeShift
const int kFpuRegisterSize
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dest
Definition zip.py:79
#define Pd
Definition globals.h:408
int32_t width
Point offset
Definition SkMD5.cpp:130
#define NOT_IN_PRODUCT(code)
Definition globals.h:84