Flutter Engine
The Flutter Engine
assembler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_ARM)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13#include "vm/cpu.h"
14#include "vm/instructions.h"
15#include "vm/tags.h"
16
17// An extra check since we are assuming the existence of /proc/cpuinfo below.
18#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
19 !defined(DART_HOST_OS_IOS) && !defined(DART_HOST_OS_MACOS)
20#error ARM cross-compile only supported on Linux, Android, iOS, and Mac
21#endif
22
23// For use by LR related macros (e.g. CLOBBERS_LR).
24#define __ this->
25
26namespace dart {
27
28DECLARE_FLAG(bool, check_code_pointer);
29DECLARE_FLAG(bool, precompiled_mode);
30
31namespace compiler {
32
33Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
34 intptr_t far_branch_level)
35 : AssemblerBase(object_pool_builder),
36 use_far_branches_(far_branch_level != 0),
37 constant_pool_allowed_(false) {
38 generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
39 Call(
40 Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)),
41 cond);
42 };
43 generate_invoke_array_write_barrier_ = [&](Condition cond) {
44 Call(Address(THR, target::Thread::array_write_barrier_entry_point_offset()),
45 cond);
46 };
47}
48
49uint32_t Address::encoding3() const {
50 if (kind_ == Immediate) {
51 uint32_t offset = encoding_ & kOffset12Mask;
52 ASSERT(offset < 256);
53 return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) |
54 (offset & 0xf);
55 }
56 ASSERT(kind_ == IndexRegister);
57 return encoding_;
58}
59
60uint32_t Address::vencoding() const {
61 ASSERT(kind_ == Immediate);
62 uint32_t offset = encoding_ & kOffset12Mask;
63 ASSERT(offset < (1 << 10)); // In the range 0 to +1020.
64 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4.
65 int mode = encoding_ & ((8 | 4 | 1) << 21);
66 ASSERT((mode == Offset) || (mode == NegOffset));
67 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
68 if (mode == Offset) {
69 vencoding |= 1 << 23;
70 }
71 return vencoding;
72}
73
74void Assembler::Emit(int32_t value) {
75 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
76 buffer_.Emit<int32_t>(value);
77}
78
79void Assembler::EmitType01(Condition cond,
80 int type,
81 Opcode opcode,
82 int set_cc,
83 Register rn,
84 Register rd,
85 Operand o) {
86 ASSERT(rd != kNoRegister);
87 ASSERT(cond != kNoCondition);
88 int32_t encoding =
89 static_cast<int32_t>(cond) << kConditionShift | type << kTypeShift |
90 static_cast<int32_t>(opcode) << kOpcodeShift | set_cc << kSShift |
91 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | o.encoding();
92 Emit(encoding);
93}
94
95void Assembler::EmitType5(Condition cond, int32_t offset, bool link) {
96 ASSERT(cond != kNoCondition);
97 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
98 5 << kTypeShift | (link ? 1 : 0) << kLinkShift;
99 BailoutIfInvalidBranchOffset(offset);
100 Emit(Assembler::EncodeBranchOffset(offset, encoding));
101}
102
103void Assembler::EmitMemOp(Condition cond,
104 bool load,
105 bool byte,
106 Register rd,
107 Address ad) {
108 ASSERT(rd != kNoRegister);
109 ASSERT(cond != kNoCondition);
110 // Unpredictable, illegal on some microarchitectures.
111 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
112
113 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 |
114 (ad.kind() == Address::Immediate ? 0 : B25) |
115 (load ? L : 0) | (byte ? B : 0) | ArmEncode::Rd(rd) |
116 ad.encoding();
117 Emit(encoding);
118}
119
120void Assembler::EmitMemOpAddressMode3(Condition cond,
121 int32_t mode,
122 Register rd,
123 Address ad) {
124 ASSERT(rd != kNoRegister);
125 ASSERT(cond != kNoCondition);
126 // Unpredictable, illegal on some microarchitectures.
127 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
128
129 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode |
130 ArmEncode::Rd(rd) | ad.encoding3();
131 Emit(encoding);
132}
133
134void Assembler::EmitMultiMemOp(Condition cond,
136 bool load,
138 RegList regs) {
140 ASSERT(cond != kNoCondition);
141 // Unpredictable, illegal on some microarchitectures.
142 ASSERT(!Address::has_writeback(am) || !(regs & (1 << base)));
143 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
144 am | (load ? L : 0) | ArmEncode::Rn(base) | regs;
145 Emit(encoding);
146}
147
148void Assembler::EmitShiftImmediate(Condition cond,
149 Shift opcode,
150 Register rd,
151 Register rm,
152 Operand o) {
153 ASSERT(cond != kNoCondition);
154 ASSERT(o.type() == 1);
155 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
156 static_cast<int32_t>(MOV) << kOpcodeShift |
157 ArmEncode::Rd(rd) | o.encoding() << kShiftImmShift |
158 static_cast<int32_t>(opcode) << kShiftShift |
159 static_cast<int32_t>(rm);
160 Emit(encoding);
161}
162
163void Assembler::EmitShiftRegister(Condition cond,
164 Shift opcode,
165 Register rd,
166 Register rm,
167 Operand o) {
168 ASSERT(cond != kNoCondition);
169 ASSERT(o.type() == 0);
170 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
171 static_cast<int32_t>(MOV) << kOpcodeShift |
172 ArmEncode::Rd(rd) | o.encoding() << kShiftRegisterShift |
173 static_cast<int32_t>(opcode) << kShiftShift | B4 |
174 static_cast<int32_t>(rm);
175 Emit(encoding);
176}
177
178void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) {
179 EmitType01(cond, o.type(), AND, 0, rn, rd, o);
180}
181
182void Assembler::ands(Register rd, Register rn, Operand o, Condition cond) {
183 EmitType01(cond, o.type(), AND, 1, rn, rd, o);
184}
185
186void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) {
187 EmitType01(cond, o.type(), EOR, 0, rn, rd, o);
188}
189
190void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) {
191 EmitType01(cond, o.type(), SUB, 0, rn, rd, o);
192}
193
194void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) {
195 EmitType01(cond, o.type(), RSB, 0, rn, rd, o);
196}
197
198void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) {
199 EmitType01(cond, o.type(), RSB, 1, rn, rd, o);
200}
201
202void Assembler::add(Register rd, Register rn, Operand o, Condition cond) {
203 EmitType01(cond, o.type(), ADD, 0, rn, rd, o);
204}
205
206void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) {
207 EmitType01(cond, o.type(), ADD, 1, rn, rd, o);
208}
209
210void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) {
211 EmitType01(cond, o.type(), SUB, 1, rn, rd, o);
212}
213
214void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) {
215 EmitType01(cond, o.type(), ADC, 0, rn, rd, o);
216}
217
218void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) {
219 EmitType01(cond, o.type(), ADC, 1, rn, rd, o);
220}
221
222void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) {
223 EmitType01(cond, o.type(), SBC, 0, rn, rd, o);
224}
225
226void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) {
227 EmitType01(cond, o.type(), SBC, 1, rn, rd, o);
228}
229
230void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) {
231 EmitType01(cond, o.type(), RSC, 0, rn, rd, o);
232}
233
234void Assembler::tst(Register rn, Operand o, Condition cond) {
235 EmitType01(cond, o.type(), TST, 1, rn, R0, o);
236}
237
238void Assembler::teq(Register rn, Operand o, Condition cond) {
239 EmitType01(cond, o.type(), TEQ, 1, rn, R0, o);
240}
241
242void Assembler::cmp(Register rn, Operand o, Condition cond) {
243 EmitType01(cond, o.type(), CMP, 1, rn, R0, o);
244}
245
246void Assembler::cmn(Register rn, Operand o, Condition cond) {
247 EmitType01(cond, o.type(), CMN, 1, rn, R0, o);
248}
249
250void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) {
251 EmitType01(cond, o.type(), ORR, 0, rn, rd, o);
252}
253
254void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) {
255 EmitType01(cond, o.type(), ORR, 1, rn, rd, o);
256}
257
258void Assembler::mov(Register rd, Operand o, Condition cond) {
259 EmitType01(cond, o.type(), MOV, 0, R0, rd, o);
260}
261
262void Assembler::movs(Register rd, Operand o, Condition cond) {
263 EmitType01(cond, o.type(), MOV, 1, R0, rd, o);
264}
265
266void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) {
267 EmitType01(cond, o.type(), BIC, 0, rn, rd, o);
268}
269
270void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) {
271 EmitType01(cond, o.type(), BIC, 1, rn, rd, o);
272}
273
274void Assembler::mvn_(Register rd, Operand o, Condition cond) {
275 EmitType01(cond, o.type(), MVN, 0, R0, rd, o);
276}
277
278void Assembler::mvns(Register rd, Operand o, Condition cond) {
279 EmitType01(cond, o.type(), MVN, 1, R0, rd, o);
280}
281
282void Assembler::clz(Register rd, Register rm, Condition cond) {
283 ASSERT(rd != kNoRegister);
284 ASSERT(rm != kNoRegister);
285 ASSERT(cond != kNoCondition);
286 ASSERT(rd != PC);
287 ASSERT(rm != PC);
288 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
289 B22 | B21 | (0xf << 16) | ArmEncode::Rd(rd) | (0xf << 8) |
290 B4 | static_cast<int32_t>(rm);
291 Emit(encoding);
292}
293
294void Assembler::rbit(Register rd, Register rm, Condition cond) {
295 ASSERT(rd != kNoRegister);
296 ASSERT(rm != kNoRegister);
297 ASSERT(cond != kNoCondition);
298 ASSERT(rd != PC);
299 ASSERT(rm != PC);
300 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 |
301 B25 | B23 | B22 | B21 | B20 | (0xf << 16) |
302 ArmEncode::Rd(rd) | (0xf << 8) | B5 | B4 |
303 static_cast<int32_t>(rm);
304 Emit(encoding);
305}
306
307void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
308 ASSERT(cond != kNoCondition);
309 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 |
310 ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
311 (imm16 & 0xfff);
312 Emit(encoding);
313}
314
315void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
316 ASSERT(cond != kNoCondition);
317 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 |
318 B22 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
319 (imm16 & 0xfff);
320 Emit(encoding);
321}
322
323void Assembler::EmitMulOp(Condition cond,
324 int32_t opcode,
325 Register rd,
326 Register rn,
327 Register rm,
328 Register rs) {
329 ASSERT(rd != kNoRegister);
330 ASSERT(rn != kNoRegister);
331 ASSERT(rm != kNoRegister);
332 ASSERT(rs != kNoRegister);
333 ASSERT(cond != kNoCondition);
334 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) |
335 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | ArmEncode::Rs(rs) |
336 B7 | B4 | ArmEncode::Rm(rm);
337 Emit(encoding);
338}
339
340void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
341 // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
342 EmitMulOp(cond, 0, R0, rd, rn, rm);
343}
344
345// Like mul, but sets condition flags.
346void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) {
347 EmitMulOp(cond, B20, R0, rd, rn, rm);
348}
349
350void Assembler::mla(Register rd,
351 Register rn,
352 Register rm,
353 Register ra,
354 Condition cond) {
355 // rd <- ra + rn * rm.
356 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
357 EmitMulOp(cond, B21, ra, rd, rn, rm);
358}
359
360void Assembler::mls(Register rd,
361 Register rn,
362 Register rm,
363 Register ra,
364 Condition cond) {
365 // rd <- ra - rn * rm.
366 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
367 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
368}
369
370void Assembler::smull(Register rd_lo,
371 Register rd_hi,
372 Register rn,
373 Register rm,
374 Condition cond) {
375 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
376 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
377}
378
379void Assembler::umull(Register rd_lo,
380 Register rd_hi,
381 Register rn,
382 Register rm,
383 Condition cond) {
384 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
385 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
386}
387
388void Assembler::umlal(Register rd_lo,
389 Register rd_hi,
390 Register rn,
391 Register rm,
392 Condition cond) {
393 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
394 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm);
395}
396
397void Assembler::umaal(Register rd_lo,
398 Register rd_hi,
399 Register rn,
400 Register rm) {
401 ASSERT(rd_lo != IP);
402 ASSERT(rd_hi != IP);
403 ASSERT(rn != IP);
404 ASSERT(rm != IP);
405 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
406 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm);
407}
408
409void Assembler::EmitDivOp(Condition cond,
410 int32_t opcode,
411 Register rd,
412 Register rn,
413 Register rm) {
414 ASSERT(TargetCPUFeatures::integer_division_supported());
415 ASSERT(rd != kNoRegister);
416 ASSERT(rn != kNoRegister);
417 ASSERT(rm != kNoRegister);
418 ASSERT(cond != kNoCondition);
419 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) |
420 (static_cast<int32_t>(rn) << kDivRnShift) |
421 (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 |
422 B24 | B20 | B15 | B14 | B13 | B12 | B4 |
423 (static_cast<int32_t>(rm) << kDivRmShift);
424 Emit(encoding);
425}
426
427void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
428 EmitDivOp(cond, 0, rd, rn, rm);
429}
430
431void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
432 EmitDivOp(cond, B21, rd, rn, rm);
433}
434
435void Assembler::ldr(Register rd, Address ad, Condition cond) {
436 EmitMemOp(cond, true, false, rd, ad);
437}
438
439void Assembler::str(Register rd, Address ad, Condition cond) {
440 EmitMemOp(cond, false, false, rd, ad);
441}
442
443void Assembler::ldrb(Register rd, Address ad, Condition cond) {
444 EmitMemOp(cond, true, true, rd, ad);
445}
446
447void Assembler::strb(Register rd, Address ad, Condition cond) {
448 EmitMemOp(cond, false, true, rd, ad);
449}
450
451void Assembler::ldrh(Register rd, Address ad, Condition cond) {
452 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
453}
454
455void Assembler::strh(Register rd, Address ad, Condition cond) {
456 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
457}
458
459void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
460 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
461}
462
463void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
464 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
465}
466
467void Assembler::ldrd(Register rd,
468 Register rd2,
469 Register rn,
470 int32_t offset,
471 Condition cond) {
472 ASSERT((rd % 2) == 0);
473 ASSERT(rd2 == rd + 1);
474 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
475}
476
477void Assembler::strd(Register rd,
478 Register rd2,
479 Register rn,
480 int32_t offset,
481 Condition cond) {
482 ASSERT((rd % 2) == 0);
483 ASSERT(rd2 == rd + 1);
484 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
485}
486
487void Assembler::ldm(BlockAddressMode am,
489 RegList regs,
490 Condition cond) {
491 ASSERT(regs != 0);
492 EmitMultiMemOp(cond, am, true, base, regs);
493}
494
495void Assembler::stm(BlockAddressMode am,
497 RegList regs,
498 Condition cond) {
499 ASSERT(regs != 0);
500 EmitMultiMemOp(cond, am, false, base, regs);
501}
502
503void Assembler::ldrex(Register rt, Register rn, Condition cond) {
504 ASSERT(rn != kNoRegister);
505 ASSERT(rt != kNoRegister);
506 ASSERT(rn != R15);
507 ASSERT(rt != R15);
508 ASSERT(cond != kNoCondition);
509 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
510 B23 | L | (static_cast<int32_t>(rn) << kLdrExRnShift) |
511 (static_cast<int32_t>(rt) << kLdrExRtShift) | B11 | B10 |
512 B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
513 Emit(encoding);
514}
515
516void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
517 ASSERT(rn != kNoRegister);
518 ASSERT(rd != kNoRegister);
519 ASSERT(rt != kNoRegister);
520 ASSERT(rn != R15);
521 ASSERT(rd != R15);
522 ASSERT(rt != R15);
523 ASSERT(rd != kNoRegister);
524 ASSERT(rt != kNoRegister);
525 ASSERT(cond != kNoCondition);
526 ASSERT(rd != rn);
527 ASSERT(rd != rt);
528 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
529 B23 | (static_cast<int32_t>(rn) << kStrExRnShift) |
530 (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 |
531 B9 | B8 | B7 | B4 |
532 (static_cast<int32_t>(rt) << kStrExRtShift);
533 Emit(encoding);
534}
535
536void Assembler::dmb() {
537 // Emit a `dmb ish` instruction.
538 Emit(kDataMemoryBarrier);
539}
540
541static int32_t BitFieldExtractEncoding(bool sign_extend,
542 Register rd,
543 Register rn,
544 int32_t lsb,
545 int32_t width,
546 Condition cond) {
547 ASSERT(rn != kNoRegister && rn != PC);
548 ASSERT(rd != kNoRegister && rd != PC);
549 ASSERT(cond != kNoCondition);
550 ASSERT(Utils::IsUint(kBitFieldExtractLSBBits, lsb));
551 ASSERT(width >= 1);
552 ASSERT(lsb + width <= kBitsPerInt32);
553 const int32_t widthm1 = width - 1;
554 ASSERT(Utils::IsUint(kBitFieldExtractWidthBits, widthm1));
555 return (static_cast<int32_t>(cond) << kConditionShift) | B26 | B25 | B24 |
556 B23 | (sign_extend ? 0 : B22) | B21 |
557 (widthm1 << kBitFieldExtractWidthShift) |
558 (static_cast<int32_t>(rd) << kRdShift) |
559 (lsb << kBitFieldExtractLSBShift) | B6 | B4 |
560 (static_cast<int32_t>(rn) << kBitFieldExtractRnShift);
561}
562
563void Assembler::sbfx(Register rd,
564 Register rn,
565 int32_t lsb,
566 int32_t width,
567 Condition cond) {
568 const bool sign_extend = true;
569 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb, width, cond));
570}
571
572void Assembler::ubfx(Register rd,
573 Register rn,
574 int32_t lsb,
575 int32_t width,
576 Condition cond) {
577 const bool sign_extend = false;
578 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb, width, cond));
579}
580
581void Assembler::EnterFullSafepoint(Register addr, Register state) {
582 // We generate the same number of instructions whether or not the slow-path is
583 // forced. This simplifies GenerateJitCallbackTrampolines.
584 Label slow_path, done, retry;
585 if (FLAG_use_slow_path) {
586 b(&slow_path);
587 }
588
589 LoadImmediate(addr, target::Thread::safepoint_state_offset());
590 add(addr, THR, Operand(addr));
591 Bind(&retry);
592 ldrex(state, addr);
593 cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
594 b(&slow_path, NE);
595
596 mov(state, Operand(target::Thread::full_safepoint_state_acquired()));
597 strex(TMP, state, addr);
598 cmp(TMP, Operand(0)); // 0 means strex was successful.
599 b(&done, EQ);
600
601 if (!FLAG_use_slow_path) {
602 b(&retry);
603 }
604
605 Bind(&slow_path);
606 ldr(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
607 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
608 blx(TMP);
609
610 Bind(&done);
611}
612
613void Assembler::TransitionGeneratedToNative(Register destination_address,
614 Register exit_frame_fp,
615 Register exit_through_ffi,
616 Register tmp1,
617 bool enter_safepoint) {
618 // Save exit frame information to enable stack walking.
619 StoreToOffset(exit_frame_fp, THR,
620 target::Thread::top_exit_frame_info_offset());
621
622 StoreToOffset(exit_through_ffi, THR,
623 target::Thread::exit_through_ffi_offset());
624 Register tmp2 = exit_through_ffi;
625
626 // Mark that the thread is executing native code.
627 StoreToOffset(destination_address, THR, target::Thread::vm_tag_offset());
628 LoadImmediate(tmp1, target::Thread::native_execution_state());
629 StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
630
631 if (enter_safepoint) {
632 EnterFullSafepoint(tmp1, tmp2);
633 }
634}
635
636void Assembler::ExitFullSafepoint(Register tmp1,
637 Register tmp2,
638 bool ignore_unwind_in_progress) {
639 Register addr = tmp1;
640 Register state = tmp2;
641
642 // We generate the same number of instructions whether or not the slow-path is
643 // forced, for consistency with EnterFullSafepoint.
644 Label slow_path, done, retry;
645 if (FLAG_use_slow_path) {
646 b(&slow_path);
647 }
648
649 LoadImmediate(addr, target::Thread::safepoint_state_offset());
650 add(addr, THR, Operand(addr));
651 Bind(&retry);
652 ldrex(state, addr);
653 cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
654 b(&slow_path, NE);
655
656 mov(state, Operand(target::Thread::full_safepoint_state_unacquired()));
657 strex(TMP, state, addr);
658 cmp(TMP, Operand(0)); // 0 means strex was successful.
659 b(&done, EQ);
660
661 if (!FLAG_use_slow_path) {
662 b(&retry);
663 }
664
665 Bind(&slow_path);
666 if (ignore_unwind_in_progress) {
667 ldr(TMP,
668 Address(THR,
669 target::Thread::
670 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
671 } else {
672 ldr(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
673 }
674 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
675 blx(TMP);
676
677 Bind(&done);
678}
679
680void Assembler::TransitionNativeToGenerated(Register addr,
682 bool exit_safepoint,
683 bool ignore_unwind_in_progress,
684 bool set_tag) {
685 if (exit_safepoint) {
686 ExitFullSafepoint(addr, state, ignore_unwind_in_progress);
687 } else {
688 // flag only makes sense if we are leaving safepoint
689 ASSERT(!ignore_unwind_in_progress);
690#if defined(DEBUG)
691 // Ensure we've already left the safepoint.
692 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
693 LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
694 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
695 ands(TMP, TMP, Operand(state));
696 Label ok;
697 b(&ok, ZERO);
698 Breakpoint();
699 Bind(&ok);
700#endif
701 }
702
703 // Mark that the thread is executing Dart code.
704 if (set_tag) {
705 LoadImmediate(state, target::Thread::vm_tag_dart_id());
706 StoreToOffset(state, THR, target::Thread::vm_tag_offset());
707 }
708 LoadImmediate(state, target::Thread::generated_execution_state());
709 StoreToOffset(state, THR, target::Thread::execution_state_offset());
710
711 // Reset exit frame information in Isolate's mutator thread structure.
712 LoadImmediate(state, 0);
713 StoreToOffset(state, THR, target::Thread::top_exit_frame_info_offset());
714 StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
715}
716
717void Assembler::clrex() {
718 int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 |
719 B21 | B20 | (0xff << 12) | B4 | 0xf;
720 Emit(encoding);
721}
722
723void Assembler::nop(Condition cond) {
724 ASSERT(cond != kNoCondition);
725 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 |
726 B24 | B21 | (0xf << 12);
727 Emit(encoding);
728}
729
730void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
731 ASSERT(sn != kNoSRegister);
732 ASSERT(rt != kNoRegister);
733 ASSERT(rt != SP);
734 ASSERT(rt != PC);
735 ASSERT(cond != kNoCondition);
736 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
737 B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) |
738 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
739 ((static_cast<int32_t>(sn) & 1) * B7) | B4;
740 Emit(encoding);
741}
742
743void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
744 ASSERT(sn != kNoSRegister);
745 ASSERT(rt != kNoRegister);
746 ASSERT(rt != SP);
747 ASSERT(rt != PC);
748 ASSERT(cond != kNoCondition);
749 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
750 B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) |
751 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
752 ((static_cast<int32_t>(sn) & 1) * B7) | B4;
753 Emit(encoding);
754}
755
756void Assembler::vmovsrr(SRegister sm,
757 Register rt,
758 Register rt2,
759 Condition cond) {
760 ASSERT(sm != kNoSRegister);
761 ASSERT(sm != S31);
762 ASSERT(rt != kNoRegister);
763 ASSERT(rt != SP);
764 ASSERT(rt != PC);
765 ASSERT(rt2 != kNoRegister);
766 ASSERT(rt2 != SP);
767 ASSERT(rt2 != PC);
768 ASSERT(cond != kNoCondition);
769 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
770 B26 | B22 | (static_cast<int32_t>(rt2) * B16) |
771 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
772 ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
773 (static_cast<int32_t>(sm) >> 1);
774 Emit(encoding);
775}
776
777void Assembler::vmovrrs(Register rt,
778 Register rt2,
779 SRegister sm,
780 Condition cond) {
781 ASSERT(sm != kNoSRegister);
782 ASSERT(sm != S31);
783 ASSERT(rt != kNoRegister);
784 ASSERT(rt != SP);
785 ASSERT(rt != PC);
786 ASSERT(rt2 != kNoRegister);
787 ASSERT(rt2 != SP);
788 ASSERT(rt2 != PC);
789 ASSERT(rt != rt2);
790 ASSERT(cond != kNoCondition);
791 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
792 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) |
793 (static_cast<int32_t>(rt) * B12) | B11 | B9 |
794 ((static_cast<int32_t>(sm) & 1) * B5) | B4 |
795 (static_cast<int32_t>(sm) >> 1);
796 Emit(encoding);
797}
798
799void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) {
800 ASSERT((i == 0) || (i == 1));
801 ASSERT(rt != kNoRegister);
802 ASSERT(rt != SP);
803 ASSERT(rt != PC);
804 ASSERT(dn != kNoDRegister);
805 ASSERT(cond != kNoCondition);
806 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
807 B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) |
808 B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) |
809 ((static_cast<int32_t>(dn) & 0xf) * B16) | B4;
810 Emit(encoding);
811}
812
813void Assembler::vmovdrr(DRegister dm,
814 Register rt,
815 Register rt2,
816 Condition cond) {
817 ASSERT(dm != kNoDRegister);
818 ASSERT(rt != kNoRegister);
819 ASSERT(rt != SP);
820 ASSERT(rt != PC);
821 ASSERT(rt2 != kNoRegister);
822 ASSERT(rt2 != SP);
823 ASSERT(rt2 != PC);
824 ASSERT(cond != kNoCondition);
825 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
826 B26 | B22 | (static_cast<int32_t>(rt2) * B16) |
827 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 |
828 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 |
829 (static_cast<int32_t>(dm) & 0xf);
830 Emit(encoding);
831}
832
833void Assembler::vmovrrd(Register rt,
834 Register rt2,
835 DRegister dm,
836 Condition cond) {
837 ASSERT(dm != kNoDRegister);
838 ASSERT(rt != kNoRegister);
839 ASSERT(rt != SP);
840 ASSERT(rt != PC);
841 ASSERT(rt2 != kNoRegister);
842 ASSERT(rt2 != SP);
843 ASSERT(rt2 != PC);
844 ASSERT(rt != rt2);
845 ASSERT(cond != kNoCondition);
846 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
847 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) |
848 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 |
849 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 |
850 (static_cast<int32_t>(dm) & 0xf);
851 Emit(encoding);
852}
853
854void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
855 ASSERT(sd != kNoSRegister);
856 ASSERT(cond != kNoCondition);
857 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
858 B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) |
859 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 |
860 ad.vencoding();
861 Emit(encoding);
862}
863
864void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
865 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
866 ASSERT(sd != kNoSRegister);
867 ASSERT(cond != kNoCondition);
868 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
869 B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) |
870 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 |
871 ad.vencoding();
872 Emit(encoding);
873}
874
875void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
876 ASSERT(dd != kNoDRegister);
877 ASSERT(cond != kNoCondition);
878 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
879 B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) |
880 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 |
881 ad.vencoding();
882 Emit(encoding);
883}
884
885void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
886 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
887 ASSERT(dd != kNoDRegister);
888 ASSERT(cond != kNoCondition);
889 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
890 B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) |
891 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 |
892 ad.vencoding();
893 Emit(encoding);
894}
895
896void Assembler::EmitMultiVSMemOp(Condition cond,
898 bool load,
901 uint32_t count) {
903 ASSERT(cond != kNoCondition);
905 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters);
906
907 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
908 B26 | B11 | B9 | am | (load ? L : 0) |
909 ArmEncode::Rn(base) |
910 ((static_cast<int32_t>(start) & 0x1) != 0 ? D : 0) |
911 ((static_cast<int32_t>(start) >> 1) << 12) | count;
912 Emit(encoding);
913}
914
915void Assembler::EmitMultiVDMemOp(Condition cond,
917 bool load,
920 int32_t count) {
922 ASSERT(cond != kNoCondition);
924 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters);
925 const int notArmv5te = 0;
926
927 int32_t encoding =
928 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 |
929 B8 | am | (load ? L : 0) | ArmEncode::Rn(base) |
930 ((static_cast<int32_t>(start) & 0x10) != 0 ? D : 0) |
931 ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | notArmv5te;
932 Emit(encoding);
933}
934
935void Assembler::vldms(BlockAddressMode am,
937 SRegister first,
938 SRegister last,
939 Condition cond) {
940 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
941 ASSERT(last > first);
942 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1);
943}
944
945void Assembler::vstms(BlockAddressMode am,
947 SRegister first,
948 SRegister last,
949 Condition cond) {
950 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
951 ASSERT(last > first);
952 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1);
953}
954
955void Assembler::vldmd(BlockAddressMode am,
957 DRegister first,
958 intptr_t count,
959 Condition cond) {
960 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
961 ASSERT(count <= 16);
962 ASSERT(first + count <= kNumberOfDRegisters);
963 EmitMultiVDMemOp(cond, am, true, base, first, count);
964}
965
966void Assembler::vstmd(BlockAddressMode am,
968 DRegister first,
969 intptr_t count,
970 Condition cond) {
971 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
972 ASSERT(count <= 16);
973 ASSERT(first + count <= kNumberOfDRegisters);
974 EmitMultiVDMemOp(cond, am, false, base, first, count);
975}
976
977void Assembler::EmitVFPsss(Condition cond,
978 int32_t opcode,
979 SRegister sd,
980 SRegister sn,
981 SRegister sm) {
982 ASSERT(sd != kNoSRegister);
983 ASSERT(sn != kNoSRegister);
984 ASSERT(sm != kNoSRegister);
985 ASSERT(cond != kNoCondition);
986 int32_t encoding =
987 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
988 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) |
989 ((static_cast<int32_t>(sn) >> 1) * B16) |
990 ((static_cast<int32_t>(sd) >> 1) * B12) |
991 ((static_cast<int32_t>(sn) & 1) * B7) |
992 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1);
993 Emit(encoding);
994}
995
996void Assembler::EmitVFPddd(Condition cond,
997 int32_t opcode,
998 DRegister dd,
999 DRegister dn,
1000 DRegister dm) {
1001 ASSERT(dd != kNoDRegister);
1002 ASSERT(dn != kNoDRegister);
1003 ASSERT(dm != kNoDRegister);
1004 ASSERT(cond != kNoCondition);
1005 int32_t encoding =
1006 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
1007 B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) |
1008 ((static_cast<int32_t>(dn) & 0xf) * B16) |
1009 ((static_cast<int32_t>(dd) & 0xf) * B12) |
1010 ((static_cast<int32_t>(dn) >> 4) * B7) |
1011 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1012 Emit(encoding);
1013}
1014
1015void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
1016 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
1017}
1018
1019void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
1020 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
1021}
1022
1023bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
1024 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
1025 if (((imm32 & ((1 << 19) - 1)) == 0) &&
1026 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
1027 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) {
1028 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
1029 ((imm32 >> 19) & ((1 << 6) - 1));
1030 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd,
1031 S0, S0);
1032 return true;
1033 }
1034 return false;
1035}
1036
1037bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
1038 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
1039 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
1040 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
1041 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) {
1042 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
1043 ((imm64 >> 48) & ((1 << 6) - 1));
1044 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf),
1045 dd, D0, D0);
1046 return true;
1047 }
1048 return false;
1049}
1050
1051void Assembler::vadds(SRegister sd,
1052 SRegister sn,
1053 SRegister sm,
1054 Condition cond) {
1055 EmitVFPsss(cond, B21 | B20, sd, sn, sm);
1056}
1057
1058void Assembler::vaddd(DRegister dd,
1059 DRegister dn,
1060 DRegister dm,
1061 Condition cond) {
1062 EmitVFPddd(cond, B21 | B20, dd, dn, dm);
1063}
1064
1065void Assembler::vsubs(SRegister sd,
1066 SRegister sn,
1067 SRegister sm,
1068 Condition cond) {
1069 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
1070}
1071
1072void Assembler::vsubd(DRegister dd,
1073 DRegister dn,
1074 DRegister dm,
1075 Condition cond) {
1076 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
1077}
1078
1079void Assembler::vmuls(SRegister sd,
1080 SRegister sn,
1081 SRegister sm,
1082 Condition cond) {
1083 EmitVFPsss(cond, B21, sd, sn, sm);
1084}
1085
1086void Assembler::vmuld(DRegister dd,
1087 DRegister dn,
1088 DRegister dm,
1089 Condition cond) {
1090 EmitVFPddd(cond, B21, dd, dn, dm);
1091}
1092
1093void Assembler::vmlas(SRegister sd,
1094 SRegister sn,
1095 SRegister sm,
1096 Condition cond) {
1097 EmitVFPsss(cond, 0, sd, sn, sm);
1098}
1099
1100void Assembler::vmlad(DRegister dd,
1101 DRegister dn,
1102 DRegister dm,
1103 Condition cond) {
1104 EmitVFPddd(cond, 0, dd, dn, dm);
1105}
1106
1107void Assembler::vmlss(SRegister sd,
1108 SRegister sn,
1109 SRegister sm,
1110 Condition cond) {
1111 EmitVFPsss(cond, B6, sd, sn, sm);
1112}
1113
1114void Assembler::vmlsd(DRegister dd,
1115 DRegister dn,
1116 DRegister dm,
1117 Condition cond) {
1118 EmitVFPddd(cond, B6, dd, dn, dm);
1119}
1120
1121void Assembler::vdivs(SRegister sd,
1122 SRegister sn,
1123 SRegister sm,
1124 Condition cond) {
1125 EmitVFPsss(cond, B23, sd, sn, sm);
1126}
1127
1128void Assembler::vdivd(DRegister dd,
1129 DRegister dn,
1130 DRegister dm,
1131 Condition cond) {
1132 EmitVFPddd(cond, B23, dd, dn, dm);
1133}
1134
1135void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
1136 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
1137}
1138
1139void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
1140 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
1141}
1142
1143void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
1144 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
1145}
1146
1147void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
1148 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
1149}
1150
1151void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
1152 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
1153}
1154
1155void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
1156 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
1157}
1158
1159void Assembler::EmitVFPsd(Condition cond,
1160 int32_t opcode,
1161 SRegister sd,
1162 DRegister dm) {
1163 ASSERT(sd != kNoSRegister);
1164 ASSERT(dm != kNoDRegister);
1165 ASSERT(cond != kNoCondition);
1166 int32_t encoding =
1167 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
1168 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) |
1169 ((static_cast<int32_t>(sd) >> 1) * B12) |
1170 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1171 Emit(encoding);
1172}
1173
1174void Assembler::EmitVFPds(Condition cond,
1175 int32_t opcode,
1176 DRegister dd,
1177 SRegister sm) {
1178 ASSERT(dd != kNoDRegister);
1179 ASSERT(sm != kNoSRegister);
1180 ASSERT(cond != kNoCondition);
1181 int32_t encoding =
1182 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 |
1183 B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) |
1184 ((static_cast<int32_t>(dd) & 0xf) * B12) |
1185 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1);
1186 Emit(encoding);
1187}
1188
1189void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
1190 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
1191}
1192
1193void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
1194 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
1195}
1196
1197void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
1198 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
1199}
1200
1201void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
1202 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
1203}
1204
1205void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
1206 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
1207}
1208
1209void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
1210 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
1211}
1212
1213void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1214 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1215}
1216
1217void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1218 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1219}
1220
1221void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1222 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1223}
1224
1225void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1226 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1227}
1228
1229void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1230 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1231}
1232
1233void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1234 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1235}
1236
1237void Assembler::vcmpsz(SRegister sd, Condition cond) {
1238 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1239}
1240
1241void Assembler::vcmpdz(DRegister dd, Condition cond) {
1242 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1243}
1244
1245void Assembler::vmrs(Register rd, Condition cond) {
1246 ASSERT(cond != kNoCondition);
1247 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 |
1248 B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1249 (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4;
1250 Emit(encoding);
1251}
1252
1253void Assembler::vmstat(Condition cond) {
1254 vmrs(APSR, cond);
1255}
1256
1257static inline int ShiftOfOperandSize(OperandSize size) {
1258 switch (size) {
1259 case kByte:
1260 case kUnsignedByte:
1261 return 0;
1262 case kTwoBytes:
1263 case kUnsignedTwoBytes:
1264 return 1;
1265 case kFourBytes:
1266 case kUnsignedFourBytes:
1267 return 2;
1268 case kWordPair:
1269 return 3;
1270 case kSWord:
1271 case kDWord:
1272 return 0;
1273 default:
1274 UNREACHABLE();
1275 break;
1276 }
1277
1278 UNREACHABLE();
1279 return -1;
1280}
1281
1282void Assembler::EmitSIMDqqq(int32_t opcode,
1284 QRegister qd,
1285 QRegister qn,
1286 QRegister qm) {
1287 ASSERT(TargetCPUFeatures::neon_supported());
1288 int sz = ShiftOfOperandSize(size);
1289 int32_t encoding =
1290 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 |
1291 opcode | ((sz & 0x3) * B20) |
1292 ((static_cast<int32_t>(qd * 2) >> 4) * B22) |
1293 ((static_cast<int32_t>(qn * 2) & 0xf) * B16) |
1294 ((static_cast<int32_t>(qd * 2) & 0xf) * B12) |
1295 ((static_cast<int32_t>(qn * 2) >> 4) * B7) |
1296 ((static_cast<int32_t>(qm * 2) >> 4) * B5) |
1297 (static_cast<int32_t>(qm * 2) & 0xf);
1298 Emit(encoding);
1299}
1300
1301void Assembler::EmitSIMDddd(int32_t opcode,
1303 DRegister dd,
1304 DRegister dn,
1305 DRegister dm) {
1306 ASSERT(TargetCPUFeatures::neon_supported());
1307 int sz = ShiftOfOperandSize(size);
1308 int32_t encoding =
1309 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 |
1310 opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) |
1311 ((static_cast<int32_t>(dn) & 0xf) * B16) |
1312 ((static_cast<int32_t>(dd) & 0xf) * B12) |
1313 ((static_cast<int32_t>(dn) >> 4) * B7) |
1314 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf);
1315 Emit(encoding);
1316}
1317
1318void Assembler::vmovq(QRegister qd, QRegister qm) {
1319 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm);
1320}
1321
1322void Assembler::vaddqi(OperandSize sz,
1323 QRegister qd,
1324 QRegister qn,
1325 QRegister qm) {
1326 EmitSIMDqqq(B11, sz, qd, qn, qm);
1327}
1328
1329void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) {
1330 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm);
1331}
1332
1333void Assembler::vsubqi(OperandSize sz,
1334 QRegister qd,
1335 QRegister qn,
1336 QRegister qm) {
1337 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm);
1338}
1339
1340void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) {
1341 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm);
1342}
1343
1344void Assembler::vmulqi(OperandSize sz,
1345 QRegister qd,
1346 QRegister qn,
1347 QRegister qm) {
1348 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm);
1349}
1350
1351void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) {
1352 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm);
1353}
1354
1355void Assembler::vshlqi(OperandSize sz,
1356 QRegister qd,
1357 QRegister qm,
1358 QRegister qn) {
1359 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm);
1360}
1361
1362void Assembler::vshlqu(OperandSize sz,
1363 QRegister qd,
1364 QRegister qm,
1365 QRegister qn) {
1366 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm);
1367}
1368
1369void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) {
1370 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm);
1371}
1372
1373void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) {
1374 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm);
1375}
1376
1377void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) {
1378 EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm);
1379}
1380
1381void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) {
1382 EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm);
1383}
1384
1385void Assembler::vmvnq(QRegister qd, QRegister qm) {
1386 EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm);
1387}
1388
1389void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) {
1390 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1391}
1392
1393void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) {
1394 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1395}
1396
1397void Assembler::vabsqs(QRegister qd, QRegister qm) {
1398 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0,
1399 qm);
1400}
1401
1402void Assembler::vnegqs(QRegister qd, QRegister qm) {
1403 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord,
1404 qd, Q0, qm);
1405}
1406
1407void Assembler::vrecpeqs(QRegister qd, QRegister qm) {
1408 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd,
1409 Q0, qm);
1410}
1411
1412void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) {
1413 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1414}
1415
1416void Assembler::vrsqrteqs(QRegister qd, QRegister qm) {
1417 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord,
1418 qd, Q0, qm);
1419}
1420
1421void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) {
1422 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1423}
1424
1425void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) {
1426 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair));
1427 int code = 0;
1428
1429 switch (sz) {
1430 case kByte:
1431 case kUnsignedByte: {
1432 ASSERT((idx >= 0) && (idx < 8));
1433 code = 1 | (idx << 1);
1434 break;
1435 }
1436 case kTwoBytes:
1437 case kUnsignedTwoBytes: {
1438 ASSERT((idx >= 0) && (idx < 4));
1439 code = 2 | (idx << 2);
1440 break;
1441 }
1442 case kFourBytes:
1443 case kUnsignedFourBytes: {
1444 ASSERT((idx >= 0) && (idx < 2));
1445 code = 4 | (idx << 3);
1446 break;
1447 }
1448 default: {
1449 break;
1450 }
1451 }
1452
1453 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair,
1454 static_cast<DRegister>(qd * 2),
1455 static_cast<DRegister>(code & 0xf), dm);
1456}
1457
1458void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) {
1459 ASSERT((len >= 1) && (len <= 4));
1460 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm);
1461}
1462
1463void Assembler::vzipqw(QRegister qd, QRegister qm) {
1464 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm);
1465}
1466
1467void Assembler::vceqqi(OperandSize sz,
1468 QRegister qd,
1469 QRegister qn,
1470 QRegister qm) {
1471 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm);
1472}
1473
1474void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) {
1475 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm);
1476}
1477
1478void Assembler::vcgeqi(OperandSize sz,
1479 QRegister qd,
1480 QRegister qn,
1481 QRegister qm) {
1482 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm);
1483}
1484
1485void Assembler::vcugeqi(OperandSize sz,
1486 QRegister qd,
1487 QRegister qn,
1488 QRegister qm) {
1489 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm);
1490}
1491
1492void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) {
1493 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm);
1494}
1495
1496void Assembler::vcgtqi(OperandSize sz,
1497 QRegister qd,
1498 QRegister qn,
1499 QRegister qm) {
1500 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm);
1501}
1502
1503void Assembler::vcugtqi(OperandSize sz,
1504 QRegister qd,
1505 QRegister qn,
1506 QRegister qm) {
1507 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm);
1508}
1509
1510void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) {
1511 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm);
1512}
1513
1514void Assembler::bkpt(uint16_t imm16) {
1515 Emit(BkptEncoding(imm16));
1516}
1517
1518void Assembler::b(Label* label, Condition cond) {
1519 EmitBranch(cond, label, false);
1520}
1521
1522void Assembler::bl(Label* label, Condition cond) {
1523 EmitBranch(cond, label, true);
1524}
1525
1526void Assembler::bx(Register rm, Condition cond) {
1527 ASSERT(rm != kNoRegister);
1528 ASSERT(cond != kNoCondition);
1529 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
1530 B21 | (0xfff << 8) | B4 | ArmEncode::Rm(rm);
1531 Emit(encoding);
1532}
1533
1534void Assembler::blx(Register rm, Condition cond) {
1535 ASSERT(rm != kNoRegister);
1536 ASSERT(cond != kNoCondition);
1537 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 |
1538 B21 | (0xfff << 8) | B5 | B4 | ArmEncode::Rm(rm);
1539 Emit(encoding);
1540}
1541
1542void Assembler::MarkExceptionHandler(Label* label) {
1543 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0));
1544 Label l;
1545 b(&l);
1546 EmitBranch(AL, label, false);
1547 Bind(&l);
1548}
1549
1550void Assembler::Drop(intptr_t stack_elements) {
1551 ASSERT(stack_elements >= 0);
1552 if (stack_elements > 0) {
1553 AddImmediate(SP, stack_elements * target::kWordSize);
1554 }
1555}
1556
1557// Uses a code sequence that can easily be decoded.
1558void Assembler::LoadWordFromPoolIndex(Register rd,
1559 intptr_t index,
1560 Register pp,
1561 Condition cond) {
1562 ASSERT((pp != PP) || constant_pool_allowed());
1563 ASSERT(rd != pp);
1564 // PP is tagged on ARM.
1565 const int32_t offset =
1566 target::ObjectPool::element_offset(index) - kHeapObjectTag;
1567 int32_t offset_mask = 0;
1568 if (Address::CanHoldLoadOffset(kFourBytes, offset, &offset_mask)) {
1569 ldr(rd, Address(pp, offset), cond);
1570 } else {
1571 int32_t offset_hi = offset & ~offset_mask; // signed
1572 uint32_t offset_lo = offset & offset_mask; // unsigned
1573 // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1574 Operand o;
1575 if (Operand::CanHold(offset_hi, &o)) {
1576 add(rd, pp, o, cond);
1577 } else {
1578 LoadImmediate(rd, offset_hi, cond);
1579 add(rd, pp, Operand(rd), cond);
1580 }
1581 ldr(rd, Address(rd, offset_lo), cond);
1582 }
1583}
1584
1585void Assembler::StoreWordToPoolIndex(Register value,
1586 intptr_t index,
1587 Register pp,
1588 Condition cond) {
1589 ASSERT((pp != PP) || constant_pool_allowed());
1590 ASSERT(value != pp);
1591 // PP is tagged on ARM.
1592 const int32_t offset =
1593 target::ObjectPool::element_offset(index) - kHeapObjectTag;
1594 int32_t offset_mask = 0;
1595 if (Address::CanHoldLoadOffset(kFourBytes, offset, &offset_mask)) {
1596 str(value, Address(pp, offset), cond);
1597 } else {
1598 int32_t offset_hi = offset & ~offset_mask; // signed
1599 uint32_t offset_lo = offset & offset_mask; // unsigned
1600 // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1601 Operand o;
1602 if (Operand::CanHold(offset_hi, &o)) {
1603 add(TMP, pp, o, cond);
1604 } else {
1605 LoadImmediate(TMP, offset_hi, cond);
1606 add(TMP, pp, Operand(TMP), cond);
1607 }
1608 str(value, Address(TMP, offset_lo), cond);
1609 }
1610}
1611
1612void Assembler::CheckCodePointer() {
1613#ifdef DEBUG
1614 if (!FLAG_check_code_pointer) {
1615 return;
1616 }
1617 Comment("CheckCodePointer");
1618 Label cid_ok, instructions_ok;
1619 Push(R0);
1620 Push(IP);
1621 CompareClassId(CODE_REG, kCodeCid, R0);
1622 b(&cid_ok, EQ);
1623 bkpt(0);
1624 Bind(&cid_ok);
1625
1626 const intptr_t offset = CodeSize() + Instr::kPCReadOffset +
1628 mov(R0, Operand(PC));
1629 AddImmediate(R0, -offset);
1630 ldr(IP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1631 cmp(R0, Operand(IP));
1632 b(&instructions_ok, EQ);
1633 bkpt(1);
1634 Bind(&instructions_ok);
1635 Pop(IP);
1636 Pop(R0);
1637#endif
1638}
1639
1640void Assembler::RestoreCodePointer() {
1641 ldr(CODE_REG,
1642 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1643 CheckCodePointer();
1644}
1645
1646void Assembler::LoadPoolPointer(Register reg) {
1647 // Load new pool pointer.
1648 CheckCodePointer();
1649 ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
1650 set_constant_pool_allowed(reg == PP);
1651}
1652
1653void Assembler::SetupGlobalPoolAndDispatchTable() {
1654 ASSERT(FLAG_precompiled_mode);
1655 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1657 Address(THR, target::Thread::dispatch_table_array_offset()));
1658}
1659
1660void Assembler::LoadIsolate(Register rd) {
1661 ldr(rd, Address(THR, target::Thread::isolate_offset()));
1662}
1663
1664void Assembler::LoadIsolateGroup(Register rd) {
1665 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
1666}
1667
1668bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1669 ASSERT(IsOriginalObject(object));
1670 if (!constant_pool_allowed()) {
1671 return false;
1672 }
1673
1674 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
1675 ASSERT(IsInOldSpace(object));
1676 return true;
1677}
1678
1679void Assembler::LoadObjectHelper(
1680 Register rd,
1681 const Object& object,
1682 Condition cond,
1683 bool is_unique,
1684 Register pp,
1685 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1686 ASSERT(IsOriginalObject(object));
1687 // `is_unique == true` effectively means object has to be patchable.
1688 if (!is_unique) {
1689 intptr_t offset = 0;
1690 if (target::CanLoadFromThread(object, &offset)) {
1691 // Load common VM constants from the thread. This works also in places
1692 // where no constant pool is set up (e.g. intrinsic code).
1693 ldr(rd, Address(THR, offset), cond);
1694 return;
1695 }
1696 if (target::IsSmi(object)) {
1697 // Relocation doesn't apply to Smis.
1698 LoadImmediate(rd, target::ToRawSmi(object), cond);
1699 return;
1700 }
1701 }
1702 RELEASE_ASSERT(CanLoadFromObjectPool(object));
1703 // Make sure that class CallPattern is able to decode this load from the
1704 // object pool.
1705 const auto index =
1706 is_unique
1707 ? object_pool_builder().AddObject(
1708 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1709 : object_pool_builder().FindObject(
1710 object, ObjectPoolBuilderEntry::kNotPatchable,
1711 snapshot_behavior);
1712 LoadWordFromPoolIndex(rd, index, pp, cond);
1713}
1714
1715void Assembler::LoadObject(Register rd, const Object& object, Condition cond) {
1716 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP);
1717}
1718
1719void Assembler::LoadUniqueObject(
1720 Register rd,
1721 const Object& object,
1722 Condition cond,
1723 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1724 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP,
1725 snapshot_behavior);
1726}
1727
1728void Assembler::LoadNativeEntry(Register rd,
1729 const ExternalLabel* label,
1730 ObjectPoolBuilderEntry::Patchability patchable,
1731 Condition cond) {
1732 const intptr_t index =
1733 object_pool_builder().FindNativeFunction(label, patchable);
1734 LoadWordFromPoolIndex(rd, index, PP, cond);
1735}
1736
1737void Assembler::PushObject(const Object& object) {
1738 ASSERT(IsOriginalObject(object));
1739 LoadObject(IP, object);
1740 Push(IP);
1741}
1742
1743void Assembler::CompareObject(Register rn, const Object& object) {
1744 ASSERT(IsOriginalObject(object));
1745 ASSERT(rn != IP);
1746 if (target::IsSmi(object)) {
1747 CompareImmediate(rn, target::ToRawSmi(object));
1748 } else {
1749 LoadObject(IP, object);
1750 cmp(rn, Operand(IP));
1751 }
1752}
1753
1754Register UseRegister(Register reg, RegList* used) {
1755 ASSERT(reg != THR);
1756 ASSERT(reg != SP);
1757 ASSERT(reg != FP);
1758 ASSERT(reg != PC);
1759 ASSERT((*used & (1 << reg)) == 0);
1760 *used |= (1 << reg);
1761 return reg;
1762}
1763
1764Register AllocateRegister(RegList* used) {
1765 const RegList free = ~*used;
1766 return (free == 0)
1767 ? kNoRegister
1768 : UseRegister(
1769 static_cast<Register>(Utils::CountTrailingZerosWord(free)),
1770 used);
1771}
1772
1773void Assembler::StoreBarrier(Register object,
1775 CanBeSmi can_be_smi,
1776 Register scratch) {
1777 // x.slot = x. Barrier should have be removed at the IL level.
1778 ASSERT(object != value);
1779 ASSERT(object != LINK_REGISTER);
1781 ASSERT(object != scratch);
1782 ASSERT(value != scratch);
1783 ASSERT(scratch != kNoRegister);
1784
1785 // In parallel, test whether
1786 // - object is old and not remembered and value is new, or
1787 // - object is old and value is old and not marked and concurrent marking is
1788 // in progress
1789 // If so, call the WriteBarrier stub, which will either add object to the
1790 // store buffer (case 1) or add value to the marking stack (case 2).
1791 // Compare UntaggedObject::StorePointer.
1792 Label done;
1793 if (can_be_smi == kValueCanBeSmi) {
1794 BranchIfSmi(value, &done, kNearJump);
1795 } else {
1796#if defined(DEBUG)
1797 Label passed_check;
1798 BranchIfNotSmi(value, &passed_check, kNearJump);
1799 Breakpoint();
1800 Bind(&passed_check);
1801#endif
1802 }
1803 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1804 if (preserve_lr) {
1805 SPILLS_LR_TO_FRAME(Push(LR));
1806 }
1807 CLOBBERS_LR({
1808 ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
1809 ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
1810 and_(scratch, LR,
1811 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1812 ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1813 tst(scratch, Operand(LR));
1814 });
1816 // Unlikely. Only non-graph intrinsics.
1817 // TODO(rmacnak): Shuffle registers in intrinsics.
1818 Label restore_and_done;
1819 b(&restore_and_done, ZERO);
1820 Register objectForCall = object;
1821 if (object != kWriteBarrierValueReg) {
1823 } else {
1826 objectForCall = (value == R2) ? R3 : R2;
1827 PushList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1828 mov(objectForCall, Operand(object));
1829 }
1830 mov(kWriteBarrierValueReg, Operand(value));
1831 generate_invoke_write_barrier_wrapper_(AL, objectForCall);
1832
1833 if (object != kWriteBarrierValueReg) {
1835 } else {
1836 PopList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1837 }
1838 Bind(&restore_and_done);
1839 } else {
1840 generate_invoke_write_barrier_wrapper_(NE, object);
1841 }
1842 if (preserve_lr) {
1843 RESTORES_LR_FROM_FRAME(Pop(LR));
1844 }
1845 Bind(&done);
1846}
1847
1848void Assembler::ArrayStoreBarrier(Register object,
1849 Register slot,
1851 CanBeSmi can_be_smi,
1852 Register scratch) {
1853 ASSERT(object != LINK_REGISTER);
1855 ASSERT(slot != LINK_REGISTER);
1856 ASSERT(object != scratch);
1857 ASSERT(value != scratch);
1858 ASSERT(slot != scratch);
1859 ASSERT(scratch != kNoRegister);
1860
1861 // In parallel, test whether
1862 // - object is old and not remembered and value is new, or
1863 // - object is old and value is old and not marked and concurrent marking is
1864 // in progress
1865 // If so, call the WriteBarrier stub, which will either add object to the
1866 // store buffer (case 1) or add value to the marking stack (case 2).
1867 // Compare UntaggedObject::StorePointer.
1868 Label done;
1869 if (can_be_smi == kValueCanBeSmi) {
1870 BranchIfSmi(value, &done, kNearJump);
1871 } else {
1872#if defined(DEBUG)
1873 Label passed_check;
1874 BranchIfNotSmi(value, &passed_check, kNearJump);
1875 Breakpoint();
1876 Bind(&passed_check);
1877#endif
1878 }
1879 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1880 if (preserve_lr) {
1881 SPILLS_LR_TO_FRAME(Push(LR));
1882 }
1883
1884 CLOBBERS_LR({
1885 ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
1886 ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
1887 and_(scratch, LR,
1888 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1889 ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1890 tst(scratch, Operand(LR));
1891 });
1892
1893 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1894 (slot != kWriteBarrierSlotReg)) {
1895 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
1896 // from StoreIndexInstr, which gets these exact registers from the register
1897 // allocator.
1898 UNIMPLEMENTED();
1899 }
1900 generate_invoke_array_write_barrier_(NE);
1901 if (preserve_lr) {
1902 RESTORES_LR_FROM_FRAME(Pop(LR));
1903 }
1904 Bind(&done);
1905}
1906
1907void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
1908 const Address& dest,
1909 const Object& value,
1910 MemoryOrder memory_order,
1911 OperandSize size) {
1913 ASSERT_EQUAL(dest.mode(), Address::Mode::Offset);
1914 ASSERT_EQUAL(dest.kind(), Address::OffsetKind::Immediate);
1915 int32_t ignored = 0;
1916 Register scratch = TMP;
1917 if (!Address::CanHoldStoreOffset(size, dest.offset(), &ignored)) {
1918 // As there is no TMP2 on ARM7, Store uses TMP when the instruction cannot
1919 // contain the offset, so we need to use a different scratch register
1920 // for loading the object.
1921 scratch = dest.base() == R9 ? R8 : R9;
1922 Push(scratch);
1923 }
1925 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
1926 // No store buffer update.
1927 LoadObject(scratch, value);
1928 if (memory_order == kRelease) {
1929 StoreRelease(scratch, dest);
1930 } else {
1931 Store(scratch, dest);
1932 }
1933 if (scratch != TMP) {
1934 Pop(scratch);
1935 }
1936}
1937
1938void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
1939 Register value) {
1940 // We can't assert the incremental barrier is not needed here, only the
1941 // generational barrier. We sometimes omit the write barrier when 'value' is
1942 // a constant, but we don't eagerly mark 'value' and instead assume it is also
1943 // reachable via a constant pool, so it doesn't matter if it is not traced via
1944 // 'object'.
1945 Label done;
1946 BranchIfSmi(value, &done, kNearJump);
1947 ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
1948 tst(TMP, Operand(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
1949 b(&done, ZERO);
1950 ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
1951 tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1952 b(&done, ZERO);
1953 Stop("Write barrier is required");
1954 Bind(&done);
1955}
1956
1957void Assembler::StoreInternalPointer(Register object,
1958 const Address& dest,
1959 Register value) {
1960 str(value, dest);
1961}
1962
1963void Assembler::InitializeFieldsNoBarrier(Register object,
1965 Register end,
1966 Register value_even,
1967 Register value_odd) {
1968 ASSERT(value_odd == value_even + 1);
1969 Label init_loop;
1970 Bind(&init_loop);
1971 AddImmediate(begin, 2 * target::kWordSize);
1972 cmp(begin, Operand(end));
1973 strd(value_even, value_odd, begin, -2 * target::kWordSize, LS);
1974 b(&init_loop, CC);
1975 str(value_even, Address(begin, -2 * target::kWordSize), HI);
1976}
1977
1978void Assembler::InitializeFieldsNoBarrierUnrolled(Register object,
1979 Register base,
1980 intptr_t begin_offset,
1981 intptr_t end_offset,
1982 Register value_even,
1983 Register value_odd) {
1984 ASSERT(value_odd == value_even + 1);
1985 intptr_t current_offset = begin_offset;
1986 while (current_offset + target::kWordSize < end_offset) {
1987 strd(value_even, value_odd, base, current_offset);
1988 current_offset += 2 * target::kWordSize;
1989 }
1990 while (current_offset < end_offset) {
1991 str(value_even, Address(base, current_offset));
1992 current_offset += target::kWordSize;
1993 }
1994}
1995
1996void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
1997#if defined(DEBUG)
1998 Label done;
1999 tst(value, Operand(kHeapObjectTag));
2000 b(&done, EQ);
2001 Stop("New value must be Smi.");
2002 Bind(&done);
2003#endif // defined(DEBUG)
2004 Store(value, dest);
2005}
2006
2007void Assembler::ExtractClassIdFromTags(Register result,
2008 Register tags,
2009 Condition cond) {
2010 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2011 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2012 ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
2013 target::UntaggedObject::kClassIdTagSize, cond);
2014}
2015
2016void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
2017 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2018 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2019 Lsr(result, tags,
2020 Operand(target::UntaggedObject::kSizeTagPos -
2022 AL);
2023 AndImmediate(result, result,
2024 (Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2026}
2027
2028void Assembler::LoadClassId(Register result, Register object, Condition cond) {
2029 ldr(result, FieldAddress(object, target::Object::tags_offset()), cond);
2030 ExtractClassIdFromTags(result, result, cond);
2031}
2032
2033void Assembler::LoadClassById(Register result, Register class_id) {
2034 ASSERT(result != class_id);
2035
2036 const intptr_t table_offset =
2037 target::IsolateGroup::cached_class_table_table_offset();
2038
2039 LoadIsolateGroup(result);
2040 LoadFromOffset(result, result, table_offset);
2041 ldr(result, Address(result, class_id, LSL, target::kWordSizeLog2));
2042}
2043
2044void Assembler::CompareClassId(Register object,
2045 intptr_t class_id,
2046 Register scratch) {
2047 LoadClassId(scratch, object);
2048 CompareImmediate(scratch, class_id);
2049}
2050
2051void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2052 tst(object, Operand(kSmiTagMask));
2053 LoadClassId(result, object, NE);
2054 LoadImmediate(result, kSmiCid, EQ);
2055}
2056
2057void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2058 LoadClassIdMayBeSmi(result, object);
2059 SmiTag(result);
2060}
2061
2062void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2063 Register src,
2064 Register scratch,
2065 bool can_be_null) {
2066#if defined(DEBUG)
2067 Comment("Check that object in register has cid %" Pd "", cid);
2068 Label matches;
2069 LoadClassIdMayBeSmi(scratch, src);
2070 CompareImmediate(scratch, cid);
2071 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2072 if (can_be_null) {
2073 CompareImmediate(scratch, kNullCid);
2074 BranchIf(EQUAL, &matches, Assembler::kNearJump);
2075 }
2076 Breakpoint();
2077 Bind(&matches);
2078#endif
2079}
2080
2081void Assembler::BailoutIfInvalidBranchOffset(int32_t offset) {
2082 if (!CanEncodeBranchDistance(offset)) {
2083 ASSERT(!use_far_branches());
2085 }
2086}
2087
2088int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
2089 // The offset is off by 8 due to the way the ARM CPUs read PC.
2090 offset -= Instr::kPCReadOffset;
2091
2092 // Properly preserve only the bits supported in the instruction.
2093 offset >>= 2;
2095 return (inst & ~kBranchOffsetMask) | offset;
2096}
2097
2098int Assembler::DecodeBranchOffset(int32_t inst) {
2099 // Sign-extend, left-shift by 2, then add 8.
2100 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset);
2101}
2102
2103static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2104 int32_t offset = 0;
2105 offset |= (movt & 0xf0000) << 12;
2106 offset |= (movt & 0xfff) << 16;
2107 offset |= (movw & 0xf0000) >> 4;
2108 offset |= movw & 0xfff;
2109 return offset;
2110}
2111
2112class PatchFarBranch : public AssemblerFixup {
2113 public:
2114 PatchFarBranch() {}
2115
2116 void Process(const MemoryRegion& region, intptr_t position) {
2117 ProcessARMv7(region, position);
2118 }
2119
2120 private:
2121 void ProcessARMv7(const MemoryRegion& region, intptr_t position) {
2122 const int32_t movw = region.Load<int32_t>(position);
2123 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize);
2124 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2125
2126 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high
2127 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low
2128 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw);
2129 const int32_t dest = region.start() + offset;
2130 const uint16_t dest_high = Utils::High16Bits(dest);
2131 const uint16_t dest_low = Utils::Low16Bits(dest);
2132 const int32_t patched_movt =
2133 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2134 const int32_t patched_movw =
2135 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2136
2137 region.Store<int32_t>(position, patched_movw);
2138 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2139 return;
2140 }
2141
2142 // If the offset loading instructions aren't there, we must have replaced
2143 // the far branch with a near one, and so these instructions
2144 // should be NOPs.
2145 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2146 }
2147
2148 virtual bool IsPointerOffset() const { return false; }
2149};
2150
2151void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) {
2152 buffer_.EmitFixup(new PatchFarBranch());
2153 LoadPatchableImmediate(IP, offset);
2154 if (link) {
2155 blx(IP, cond);
2156 } else {
2157 bx(IP, cond);
2158 }
2159}
2160
2161void Assembler::EmitBranch(Condition cond, Label* label, bool link) {
2162 if (label->IsBound()) {
2163 const int32_t dest = label->Position() - buffer_.Size();
2164 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2165 EmitFarBranch(cond, label->Position(), link);
2166 } else {
2167 EmitType5(cond, dest, link);
2168 }
2169 label->UpdateLRState(lr_state());
2170 } else {
2171 const intptr_t position = buffer_.Size();
2172 if (use_far_branches()) {
2173 const int32_t dest = label->position_;
2174 EmitFarBranch(cond, dest, link);
2175 } else {
2176 // Use the offset field of the branch instruction for linking the sites.
2177 EmitType5(cond, label->position_, link);
2178 }
2179 label->LinkTo(position, lr_state());
2180 }
2181}
2182
2183void Assembler::BindARMv7(Label* label) {
2184 ASSERT(!label->IsBound());
2185 intptr_t bound_pc = buffer_.Size();
2186 while (label->IsLinked()) {
2187 const int32_t position = label->Position();
2188 int32_t dest = bound_pc - position;
2189 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2190 // Far branches are enabled and we can't encode the branch offset.
2191
2192 // Grab instructions that load the offset.
2193 const int32_t movw =
2194 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2195 const int32_t movt =
2196 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2197
2198 // Change from relative to the branch to relative to the assembler
2199 // buffer.
2200 dest = buffer_.Size();
2201 const uint16_t dest_high = Utils::High16Bits(dest);
2202 const uint16_t dest_low = Utils::Low16Bits(dest);
2203 const int32_t patched_movt =
2204 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2205 const int32_t patched_movw =
2206 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2207
2208 // Rewrite the instructions.
2209 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2210 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2211 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2212 } else if (use_far_branches() && CanEncodeBranchDistance(dest)) {
2213 // Far branches are enabled, but we can encode the branch offset.
2214
2215 // Grab instructions that load the offset, and the branch.
2216 const int32_t movw =
2217 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2218 const int32_t movt =
2219 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2220 const int32_t branch =
2221 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2222
2223 // Grab the branch condition, and encode the link bit.
2224 const int32_t cond = branch & 0xf0000000;
2225 const int32_t link = (branch & 0x20) << 19;
2226
2227 // Encode the branch and the offset.
2228 const int32_t new_branch = cond | link | 0x0a000000;
2229 const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2230
2231 // Write the encoded branch instruction followed by two nops.
2232 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded);
2233 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2234 Instr::kNopInstruction);
2235 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2236 Instr::kNopInstruction);
2237
2238 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2239 } else {
2240 BailoutIfInvalidBranchOffset(dest);
2241 int32_t next = buffer_.Load<int32_t>(position);
2242 int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2243 buffer_.Store<int32_t>(position, encoded);
2244 label->position_ = Assembler::DecodeBranchOffset(next);
2245 }
2246 }
2247 label->BindTo(bound_pc, lr_state());
2248}
2249
2250void Assembler::Bind(Label* label) {
2251 BindARMv7(label);
2252}
2253
2254OperandSize Address::OperandSizeFor(intptr_t cid) {
2255 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
2256 switch (rep) {
2257 case kUnboxedInt64:
2258 return kDWord;
2259 case kUnboxedFloat:
2260 return kSWord;
2261 case kUnboxedDouble:
2262 return kDWord;
2263 case kUnboxedInt32x4:
2264 case kUnboxedFloat32x4:
2265 case kUnboxedFloat64x2:
2266 return kRegList;
2267 default:
2269 }
2270}
2271
2272bool Address::CanHoldLoadOffset(OperandSize size,
2273 int32_t offset,
2274 int32_t* offset_mask) {
2275 switch (size) {
2276 case kByte:
2277 case kTwoBytes:
2278 case kUnsignedTwoBytes:
2279 case kWordPair: {
2280 *offset_mask = 0xff;
2281 return Utils::MagnitudeIsUint(8, offset); // Addressing mode 3.
2282 }
2283 case kUnsignedByte:
2284 case kFourBytes:
2285 case kUnsignedFourBytes: {
2286 *offset_mask = 0xfff;
2287 return Utils::MagnitudeIsUint(12, offset); // Addressing mode 2.
2288 }
2289 case kSWord:
2290 case kDWord: {
2291 *offset_mask = 0x3fc; // Multiple of 4.
2292 // VFP addressing mode.
2293 return (Utils::MagnitudeIsUint(10, offset) &&
2294 Utils::IsAligned(offset, 4));
2295 }
2296 case kRegList: {
2297 *offset_mask = 0x0;
2298 return offset == 0;
2299 }
2300 default: {
2301 UNREACHABLE();
2302 return false;
2303 }
2304 }
2305}
2306
2307bool Address::CanHoldStoreOffset(OperandSize size,
2308 int32_t offset,
2309 int32_t* offset_mask) {
2310 switch (size) {
2311 case kTwoBytes:
2312 case kUnsignedTwoBytes:
2313 case kWordPair: {
2314 *offset_mask = 0xff;
2315 return Utils::MagnitudeIsUint(8, offset); // Addressing mode 3.
2316 }
2317 case kByte:
2318 case kUnsignedByte:
2319 case kFourBytes:
2320 case kUnsignedFourBytes: {
2321 *offset_mask = 0xfff;
2322 return Utils::MagnitudeIsUint(12, offset); // Addressing mode 2.
2323 }
2324 case kSWord:
2325 case kDWord: {
2326 *offset_mask = 0x3fc; // Multiple of 4.
2327 // VFP addressing mode.
2328 return (Utils::MagnitudeIsUint(10, offset) &&
2329 Utils::IsAligned(offset, 4));
2330 }
2331 case kRegList: {
2332 *offset_mask = 0x0;
2333 return offset == 0;
2334 }
2335 default: {
2336 UNREACHABLE();
2337 return false;
2338 }
2339 }
2340}
2341
2342bool Address::CanHoldImmediateOffset(bool is_load,
2343 intptr_t cid,
2344 int64_t offset) {
2345 int32_t offset_mask = 0;
2346 if (is_load) {
2347 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask);
2348 } else {
2349 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask);
2350 }
2351}
2352
2353void Assembler::Push(Register rd, Condition cond) {
2354 str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond);
2355}
2356
2357void Assembler::Pop(Register rd, Condition cond) {
2358 ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond);
2359}
2360
2361void Assembler::PushList(RegList regs, Condition cond) {
2362 stm(DB_W, SP, regs, cond);
2363}
2364
2365void Assembler::PopList(RegList regs, Condition cond) {
2366 ldm(IA_W, SP, regs, cond);
2367}
2368
2369void Assembler::PushQuad(FpuRegister reg, Condition cond) {
2370 DRegister dreg = EvenDRegisterOf(reg);
2371 vstmd(DB_W, SP, dreg, 2, cond); // 2 D registers per Q register.
2372}
2373
2374void Assembler::PopQuad(FpuRegister reg, Condition cond) {
2375 DRegister dreg = EvenDRegisterOf(reg);
2376 vldmd(IA_W, SP, dreg, 2, cond); // 2 D registers per Q register.
2377}
2378
2379void Assembler::PushRegisters(const RegisterSet& regs) {
2380 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2381 if (fpu_regs_count > 0) {
2382 AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
2383 // Store fpu registers with the lowest register number at the lowest
2384 // address.
2385 intptr_t offset = 0;
2386 mov(TMP, Operand(SP));
2387 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
2388 QRegister fpu_reg = static_cast<QRegister>(i);
2389 if (regs.ContainsFpuRegister(fpu_reg)) {
2390 DRegister d = EvenDRegisterOf(fpu_reg);
2391 ASSERT(d + 1 == OddDRegisterOf(fpu_reg));
2392 vstmd(IA_W, IP, d, 2);
2394 }
2395 }
2396 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
2397 }
2398
2399 // The order in which the registers are pushed must match the order
2400 // in which the registers are encoded in the safe point's stack map.
2401 // NOTE: This matches the order of ARM's multi-register push.
2402 RegList reg_list = 0;
2403 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2404 Register reg = static_cast<Register>(i);
2405 if (regs.ContainsRegister(reg)) {
2406 reg_list |= (1 << reg);
2407 }
2408 }
2409 if (reg_list != 0) {
2410 PushList(reg_list);
2411 }
2412}
2413
2414void Assembler::PopRegisters(const RegisterSet& regs) {
2415 RegList reg_list = 0;
2416 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2417 Register reg = static_cast<Register>(i);
2418 if (regs.ContainsRegister(reg)) {
2419 reg_list |= (1 << reg);
2420 }
2421 }
2422 if (reg_list != 0) {
2423 PopList(reg_list);
2424 }
2425
2426 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2427 if (fpu_regs_count > 0) {
2428 // Fpu registers have the lowest register number at the lowest address.
2429 intptr_t offset = 0;
2430 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
2431 QRegister fpu_reg = static_cast<QRegister>(i);
2432 if (regs.ContainsFpuRegister(fpu_reg)) {
2433 DRegister d = EvenDRegisterOf(fpu_reg);
2434 ASSERT(d + 1 == OddDRegisterOf(fpu_reg));
2435 vldmd(IA_W, SP, d, 2);
2437 }
2438 }
2439 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
2440 }
2441}
2442
2443void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2444 // Collect the longest descending sequences of registers and
2445 // push them with a single STMDB instruction.
2446 RegList pending_regs = 0;
2447 Register lowest_pending_reg = kNumberOfCpuRegisters;
2448 intptr_t num_pending_regs = 0;
2449 for (Register reg : regs) {
2450 if (reg >= lowest_pending_reg) {
2451 ASSERT(pending_regs != 0);
2452 if (num_pending_regs > 1) {
2453 PushList(pending_regs);
2454 } else {
2455 Push(lowest_pending_reg);
2456 }
2457 pending_regs = 0;
2458 num_pending_regs = 0;
2459 }
2460 pending_regs |= (1 << reg);
2461 lowest_pending_reg = reg;
2462 ++num_pending_regs;
2463 }
2464 if (pending_regs != 0) {
2465 if (num_pending_regs > 1) {
2466 PushList(pending_regs);
2467 } else {
2468 Push(lowest_pending_reg);
2469 }
2470 }
2471}
2472
2473void Assembler::PushNativeCalleeSavedRegisters() {
2474 // Save new context and C++ ABI callee-saved registers.
2475 PushList(kAbiPreservedCpuRegs);
2476
2479 // Save FPU registers. 2 D registers per Q register.
2480 vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2481}
2482
2483void Assembler::PopNativeCalleeSavedRegisters() {
2485 // Restore C++ ABI callee-saved registers.
2486 // Restore FPU registers. 2 D registers per Q register.
2487 vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2488 // Restore CPU registers.
2489 PopList(kAbiPreservedCpuRegs);
2490}
2491
2492void Assembler::ExtendValue(Register rd,
2493 Register rm,
2494 OperandSize sz,
2495 Condition cond) {
2496 switch (sz) {
2497 case kUnsignedFourBytes:
2498 case kFourBytes:
2499 if (rd == rm) return;
2500 return mov(rd, Operand(rm), cond);
2501 case kUnsignedTwoBytes:
2502 return ubfx(rd, rm, 0, kBitsPerInt16, cond);
2503 case kTwoBytes:
2504 return sbfx(rd, rm, 0, kBitsPerInt16, cond);
2505 case kUnsignedByte:
2506 return ubfx(rd, rm, 0, kBitsPerInt8, cond);
2507 case kByte:
2508 return sbfx(rd, rm, 0, kBitsPerInt8, cond);
2509 default:
2510 UNIMPLEMENTED();
2511 break;
2512 }
2513}
2514
2515void Assembler::Lsl(Register rd,
2516 Register rm,
2517 const Operand& shift_imm,
2518 Condition cond) {
2519 ASSERT(shift_imm.type() == 1);
2520 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted.
2521 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond);
2522}
2523
2524void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
2525 mov(rd, Operand(rm, LSL, rs), cond);
2526}
2527
2528void Assembler::Lsr(Register rd,
2529 Register rm,
2530 const Operand& shift_imm,
2531 Condition cond) {
2532 ASSERT(shift_imm.type() == 1);
2533 uint32_t shift = shift_imm.encoding();
2534 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted.
2535 if (shift == 32) {
2536 shift = 0; // Comply to UAL syntax.
2537 }
2538 mov(rd, Operand(rm, LSR, shift), cond);
2539}
2540
2541void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
2542 mov(rd, Operand(rm, LSR, rs), cond);
2543}
2544
2545void Assembler::Asr(Register rd,
2546 Register rm,
2547 const Operand& shift_imm,
2548 Condition cond) {
2549 ASSERT(shift_imm.type() == 1);
2550 uint32_t shift = shift_imm.encoding();
2551 ASSERT(shift != 0); // Do not use Asr if no shift is wanted.
2552 if (shift == 32) {
2553 shift = 0; // Comply to UAL syntax.
2554 }
2555 mov(rd, Operand(rm, ASR, shift), cond);
2556}
2557
2558void Assembler::Asrs(Register rd,
2559 Register rm,
2560 const Operand& shift_imm,
2561 Condition cond) {
2562 ASSERT(shift_imm.type() == 1);
2563 uint32_t shift = shift_imm.encoding();
2564 ASSERT(shift != 0); // Do not use Asr if no shift is wanted.
2565 if (shift == 32) {
2566 shift = 0; // Comply to UAL syntax.
2567 }
2568 movs(rd, Operand(rm, ASR, shift), cond);
2569}
2570
2571void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
2572 mov(rd, Operand(rm, ASR, rs), cond);
2573}
2574
2575void Assembler::Ror(Register rd,
2576 Register rm,
2577 const Operand& shift_imm,
2578 Condition cond) {
2579 ASSERT(shift_imm.type() == 1);
2580 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction.
2581 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond);
2582}
2583
2584void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) {
2585 mov(rd, Operand(rm, ROR, rs), cond);
2586}
2587
2588void Assembler::Rrx(Register rd, Register rm, Condition cond) {
2589 mov(rd, Operand(rm, ROR, 0), cond);
2590}
2591
2592void Assembler::SignFill(Register rd, Register rm, Condition cond) {
2593 Asr(rd, rm, Operand(31), cond);
2594}
2595
2596void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
2597 ASSERT(qm != QTMP);
2598 ASSERT(qd != QTMP);
2599
2600 // Reciprocal estimate.
2601 vrecpeqs(qd, qm);
2602 // 2 Newton-Raphson steps.
2603 vrecpsqs(QTMP, qm, qd);
2604 vmulqs(qd, qd, QTMP);
2605 vrecpsqs(QTMP, qm, qd);
2606 vmulqs(qd, qd, QTMP);
2607}
2608
2609void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) {
2610 ASSERT(qm != QTMP);
2611 ASSERT(qd != QTMP);
2612
2613 // Reciprocal square root estimate.
2614 vrsqrteqs(qd, qm);
2615 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2.
2616 // First step.
2617 vmulqs(QTMP, qd, qd); // QTMP <- xn^2
2618 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2.
2619 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP
2620 // Second step.
2621 vmulqs(QTMP, qd, qd);
2622 vrsqrtsqs(QTMP, qm, QTMP);
2623 vmulqs(qd, qd, QTMP);
2624}
2625
2626void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) {
2627 ASSERT(temp != QTMP);
2628 ASSERT(qm != QTMP);
2629 ASSERT(qd != QTMP);
2630
2631 if (temp != kNoQRegister) {
2632 vmovq(temp, qm);
2633 qm = temp;
2634 }
2635
2636 VreciprocalSqrtqs(qd, qm);
2637 vmovq(qm, qd);
2638 Vreciprocalqs(qd, qm);
2639}
2640
2641void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2642 ASSERT(qd != QTMP);
2643 ASSERT(qn != QTMP);
2644 ASSERT(qm != QTMP);
2645
2646 Vreciprocalqs(qd, qm);
2647 vmulqs(qd, qn, qd);
2648}
2649
2650void Assembler::Branch(const Address& address, Condition cond) {
2651 ldr(PC, address, cond);
2652}
2653
2654void Assembler::BranchLink(intptr_t target_code_pool_index,
2655 CodeEntryKind entry_kind) {
2656 CLOBBERS_LR({
2657 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
2658 // We don't actually use CODE_REG in the callee and caller might
2659 // be using CODE_REG for a live value (e.g. a value that is alive
2660 // across invocation of a shared stub like the one we use for
2661 // allocating Mint boxes).
2662 const Register code_reg = FLAG_precompiled_mode ? LR : CODE_REG;
2663 LoadWordFromPoolIndex(code_reg, target_code_pool_index, PP, AL);
2664 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
2665 });
2666}
2667
2668void Assembler::BranchLink(
2669 const Code& target,
2670 ObjectPoolBuilderEntry::Patchability patchable,
2671 CodeEntryKind entry_kind,
2672 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2673 // Make sure that class CallPattern is able to patch the label referred
2674 // to by this code sequence.
2675 // For added code robustness, use 'blx lr' in a patchable sequence and
2676 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2677 const intptr_t index = object_pool_builder().FindObject(
2678 ToObject(target), patchable, snapshot_behavior);
2679 BranchLink(index, entry_kind);
2680}
2681
2682void Assembler::BranchLinkPatchable(
2683 const Code& target,
2684 CodeEntryKind entry_kind,
2685 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2686 BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind,
2687 snapshot_behavior);
2688}
2689
2690void Assembler::BranchLinkWithEquivalence(const Code& target,
2691 const Object& equivalence,
2692 CodeEntryKind entry_kind) {
2693 // Make sure that class CallPattern is able to patch the label referred
2694 // to by this code sequence.
2695 // For added code robustness, use 'blx lr' in a patchable sequence and
2696 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2697 const intptr_t index =
2698 object_pool_builder().FindObject(ToObject(target), equivalence);
2699 BranchLink(index, entry_kind);
2700}
2701
2702void Assembler::BranchLink(const ExternalLabel* label) {
2703 CLOBBERS_LR({
2704 LoadImmediate(LR, label->address()); // Target address is never patched.
2705 blx(LR); // Use blx instruction so that the return branch prediction works.
2706 });
2707}
2708
2709void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2710 ASSERT(base != PC);
2711 ASSERT(base != IP);
2712 LoadFromOffset(IP, base, offset);
2713 blx(IP); // Use blx instruction so that the return branch prediction works.
2714}
2715
2716void Assembler::LoadPatchableImmediate(Register rd,
2717 int32_t value,
2718 Condition cond) {
2719 const uint16_t value_low = Utils::Low16Bits(value);
2720 const uint16_t value_high = Utils::High16Bits(value);
2721 movw(rd, value_low, cond);
2722 movt(rd, value_high, cond);
2723}
2724
2725void Assembler::LoadDecodableImmediate(Register rd,
2726 int32_t value,
2727 Condition cond) {
2728 movw(rd, Utils::Low16Bits(value), cond);
2729 const uint16_t value_high = Utils::High16Bits(value);
2730 if (value_high != 0) {
2731 movt(rd, value_high, cond);
2732 }
2733}
2734
2735void Assembler::LoadImmediate(Register rd, Immediate value, Condition cond) {
2736 LoadImmediate(rd, value.value(), cond);
2737}
2738
2739void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
2740 Operand o;
2741 if (Operand::CanHold(value, &o)) {
2742 mov(rd, o, cond);
2743 } else if (Operand::CanHold(~value, &o)) {
2744 mvn_(rd, o, cond);
2745 } else {
2746 LoadDecodableImmediate(rd, value, cond);
2747 }
2748}
2749
2750void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) {
2751 if (!vmovs(sd, value, cond)) {
2752 const DRegister dd = static_cast<DRegister>(sd >> 1);
2753 const int index = sd & 1;
2754 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond);
2755 vmovdr(dd, index, IP, cond);
2756 }
2757}
2758
2759void Assembler::LoadDImmediate(DRegister dd,
2760 double value,
2761 Register scratch,
2762 Condition cond) {
2763 ASSERT(scratch != PC);
2764 ASSERT(scratch != IP);
2765 if (vmovd(dd, value, cond)) return;
2766
2767 int64_t imm64 = bit_cast<int64_t, double>(value);
2768 if (constant_pool_allowed()) {
2769 intptr_t index = object_pool_builder().FindImmediate64(imm64);
2770 intptr_t offset =
2771 target::ObjectPool::element_offset(index) - kHeapObjectTag;
2772 LoadDFromOffset(dd, PP, offset, cond);
2773 } else {
2774 // A scratch register and IP are needed to load an arbitrary double.
2775 ASSERT(scratch != kNoRegister);
2776 int64_t imm64 = bit_cast<int64_t, double>(value);
2777 LoadImmediate(IP, Utils::Low32Bits(imm64), cond);
2778 LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2779 vmovdrr(dd, IP, scratch, cond);
2780 }
2781}
2782
2783void Assembler::LoadQImmediate(QRegister qd, simd128_value_t value) {
2784 ASSERT(constant_pool_allowed());
2785 intptr_t index = object_pool_builder().FindImmediate128(value);
2786 intptr_t offset = target::ObjectPool::element_offset(index) - kHeapObjectTag;
2787 LoadMultipleDFromOffset(EvenDRegisterOf(qd), 2, PP, offset);
2788}
2789
2790Address Assembler::PrepareLargeLoadOffset(const Address& address,
2792 Condition cond) {
2793 ASSERT(size != kWordPair);
2794 if (address.kind() != Address::Immediate) {
2795 return address;
2796 }
2797 int32_t offset = address.offset();
2798 int32_t offset_mask = 0;
2799 if (Address::CanHoldLoadOffset(size, offset, &offset_mask)) {
2800 return address;
2801 }
2802 auto mode = address.mode();
2803 // If the retrieved offset is negative, then the U bit was flipped during
2804 // encoding, so re-flip it.
2805 if (offset < 0) {
2806 mode = static_cast<Address::Mode>(mode ^ U);
2807 }
2808 // If writing back post-indexing, we can't separate the instruction into
2809 // two parts and the offset must fit.
2810 ASSERT((mode | U) != Address::PostIndex);
2811 // If we're writing back pre-indexing, we must add directly to the base,
2812 // otherwise we use TMP.
2813 Register base = address.base();
2814 ASSERT(base != TMP || address.has_writeback());
2815 Register temp = address.has_writeback() ? base : TMP;
2816 AddImmediate(temp, base, offset & ~offset_mask, cond);
2817 base = temp;
2818 offset = offset & offset_mask;
2819 return Address(base, offset, mode);
2820}
2821
2822Address Assembler::PrepareLargeStoreOffset(const Address& address,
2824 Condition cond) {
2825 ASSERT(size != kWordPair);
2826 if (address.kind() != Address::Immediate) {
2827 return address;
2828 }
2829 int32_t offset = address.offset();
2830 int32_t offset_mask = 0;
2831 if (Address::CanHoldStoreOffset(size, offset, &offset_mask)) {
2832 return address;
2833 }
2834 auto mode = address.mode();
2835 // If the retrieved offset is negative, then the U bit was flipped during
2836 // encoding, so re-flip it.
2837 if (offset < 0) {
2838 mode = static_cast<Address::Mode>(mode ^ U);
2839 }
2840 // If writing back post-indexing, we can't separate the instruction into
2841 // two parts and the offset must fit.
2842 ASSERT((mode | U) != Address::PostIndex);
2843 // If we're writing back pre-indexing, we must add directly to the base,
2844 // otherwise we use TMP.
2845 Register base = address.base();
2846 ASSERT(base != TMP || address.has_writeback());
2847 Register temp = address.has_writeback() ? base : TMP;
2848 AddImmediate(temp, base, offset & ~offset_mask, cond);
2849 base = temp;
2850 offset = offset & offset_mask;
2851 return Address(base, offset, mode);
2852}
2853
2854void Assembler::Load(Register reg,
2855 const Address& address,
2857 Condition cond) {
2858 const Address& addr = PrepareLargeLoadOffset(address, size, cond);
2859 switch (size) {
2860 case kByte:
2861 ldrsb(reg, addr, cond);
2862 break;
2863 case kUnsignedByte:
2864 ldrb(reg, addr, cond);
2865 break;
2866 case kTwoBytes:
2867 ldrsh(reg, addr, cond);
2868 break;
2869 case kUnsignedTwoBytes:
2870 ldrh(reg, addr, cond);
2871 break;
2872 case kUnsignedFourBytes:
2873 case kFourBytes:
2874 ldr(reg, addr, cond);
2875 break;
2876 default:
2877 UNREACHABLE();
2878 }
2879}
2880
2881void Assembler::LoadFromStack(Register dst, intptr_t depth) {
2882 ASSERT(depth >= 0);
2883 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
2884}
2885
2886void Assembler::StoreToStack(Register src, intptr_t depth) {
2887 ASSERT(depth >= 0);
2888 StoreToOffset(src, SPREG, depth * target::kWordSize);
2889}
2890
2891void Assembler::CompareToStack(Register src, intptr_t depth) {
2892 LoadFromStack(TMP, depth);
2893 CompareRegisters(src, TMP);
2894}
2895
2896void Assembler::Store(Register reg,
2897 const Address& address,
2899 Condition cond) {
2900 const Address& addr = PrepareLargeStoreOffset(address, size, cond);
2901 switch (size) {
2902 case kUnsignedByte:
2903 case kByte:
2904 strb(reg, addr, cond);
2905 break;
2906 case kUnsignedTwoBytes:
2907 case kTwoBytes:
2908 strh(reg, addr, cond);
2909 break;
2910 case kUnsignedFourBytes:
2911 case kFourBytes:
2912 str(reg, addr, cond);
2913 break;
2914 default:
2915 UNREACHABLE();
2916 }
2917}
2918
2919void Assembler::LoadSFromOffset(SRegister reg,
2920 Register base,
2921 int32_t offset,
2922 Condition cond) {
2923 vldrs(reg, PrepareLargeLoadOffset(Address(base, offset), kSWord, cond), cond);
2924}
2925
2926void Assembler::StoreSToOffset(SRegister reg,
2927 Register base,
2928 int32_t offset,
2929 Condition cond) {
2930 vstrs(reg, PrepareLargeStoreOffset(Address(base, offset), kSWord, cond),
2931 cond);
2932}
2933
2934void Assembler::LoadDFromOffset(DRegister reg,
2935 Register base,
2936 int32_t offset,
2937 Condition cond) {
2938 vldrd(reg, PrepareLargeLoadOffset(Address(base, offset), kDWord, cond), cond);
2939}
2940
2941void Assembler::StoreDToOffset(DRegister reg,
2942 Register base,
2943 int32_t offset,
2944 Condition cond) {
2945 vstrd(reg, PrepareLargeStoreOffset(Address(base, offset), kDWord, cond),
2946 cond);
2947}
2948
2949void Assembler::LoadMultipleDFromOffset(DRegister first,
2950 intptr_t count,
2951 Register base,
2952 int32_t offset) {
2953 ASSERT(base != IP);
2954 AddImmediate(IP, base, offset);
2955 vldmd(IA, IP, first, count);
2956}
2957
2958void Assembler::StoreMultipleDToOffset(DRegister first,
2959 intptr_t count,
2960 Register base,
2961 int32_t offset) {
2962 ASSERT(base != IP);
2963 AddImmediate(IP, base, offset);
2964 vstmd(IA, IP, first, count);
2965}
2966
2967void Assembler::AddImmediate(Register rd,
2968 Register rn,
2969 int32_t value,
2970 Condition cond) {
2971 if (value == 0) {
2972 if (rd != rn) {
2973 mov(rd, Operand(rn), cond);
2974 }
2975 return;
2976 }
2977 // We prefer to select the shorter code sequence rather than selecting add for
2978 // positive values and sub for negatives ones, which would slightly improve
2979 // the readability of generated code for some constants.
2980 Operand o;
2981 if (Operand::CanHold(value, &o)) {
2982 add(rd, rn, o, cond);
2983 } else if (Operand::CanHold(-value, &o)) {
2984 sub(rd, rn, o, cond);
2985 } else {
2986 ASSERT(rn != IP);
2987 if (Operand::CanHold(~value, &o)) {
2988 mvn_(IP, o, cond);
2989 add(rd, rn, Operand(IP), cond);
2990 } else if (Operand::CanHold(~(-value), &o)) {
2991 mvn_(IP, o, cond);
2992 sub(rd, rn, Operand(IP), cond);
2993 } else if (value > 0) {
2994 LoadDecodableImmediate(IP, value, cond);
2995 add(rd, rn, Operand(IP), cond);
2996 } else {
2997 LoadDecodableImmediate(IP, -value, cond);
2998 sub(rd, rn, Operand(IP), cond);
2999 }
3000 }
3001}
3002
3003void Assembler::AddImmediateSetFlags(Register rd,
3004 Register rn,
3005 int32_t value,
3006 Condition cond) {
3007 Operand o;
3008 if (Operand::CanHold(value, &o)) {
3009 // Handles value == kMinInt32.
3010 adds(rd, rn, o, cond);
3011 } else if (Operand::CanHold(-value, &o)) {
3012 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3013 subs(rd, rn, o, cond);
3014 } else {
3015 ASSERT(rn != IP);
3016 if (Operand::CanHold(~value, &o)) {
3017 mvn_(IP, o, cond);
3018 adds(rd, rn, Operand(IP), cond);
3019 } else if (Operand::CanHold(~(-value), &o)) {
3020 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3021 mvn_(IP, o, cond);
3022 subs(rd, rn, Operand(IP), cond);
3023 } else {
3024 LoadDecodableImmediate(IP, value, cond);
3025 adds(rd, rn, Operand(IP), cond);
3026 }
3027 }
3028}
3029
3030void Assembler::SubImmediate(Register rd,
3031 Register rn,
3032 int32_t value,
3033 Condition cond) {
3034 AddImmediate(rd, rn, -value, cond);
3035}
3036
3037void Assembler::SubImmediateSetFlags(Register rd,
3038 Register rn,
3039 int32_t value,
3040 Condition cond) {
3041 Operand o;
3042 if (Operand::CanHold(value, &o)) {
3043 // Handles value == kMinInt32.
3044 subs(rd, rn, o, cond);
3045 } else if (Operand::CanHold(-value, &o)) {
3046 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3047 adds(rd, rn, o, cond);
3048 } else {
3049 ASSERT(rn != IP);
3050 if (Operand::CanHold(~value, &o)) {
3051 mvn_(IP, o, cond);
3052 subs(rd, rn, Operand(IP), cond);
3053 } else if (Operand::CanHold(~(-value), &o)) {
3054 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection.
3055 mvn_(IP, o, cond);
3056 adds(rd, rn, Operand(IP), cond);
3057 } else {
3058 LoadDecodableImmediate(IP, value, cond);
3059 subs(rd, rn, Operand(IP), cond);
3060 }
3061 }
3062}
3063
3064void Assembler::AndImmediate(Register rd,
3065 Register rs,
3066 int32_t imm,
3067 Condition cond) {
3068 Operand o;
3069 if (Operand::CanHold(imm, &o)) {
3070 and_(rd, rs, Operand(o), cond);
3071 } else {
3072 LoadImmediate(TMP, imm, cond);
3073 and_(rd, rs, Operand(TMP), cond);
3074 }
3075}
3076
3077void Assembler::AndImmediateSetFlags(Register rd,
3078 Register rs,
3079 int32_t imm,
3080 Condition cond) {
3081 Operand o;
3082 if (Operand::CanHold(imm, &o)) {
3083 ands(rd, rs, Operand(o), cond);
3084 } else {
3085 LoadImmediate(TMP, imm, cond);
3086 ands(rd, rs, Operand(TMP), cond);
3087 }
3088}
3089
3090void Assembler::OrImmediate(Register rd,
3091 Register rs,
3092 int32_t imm,
3093 Condition cond) {
3094 Operand o;
3095 if (Operand::CanHold(imm, &o)) {
3096 orr(rd, rs, Operand(o), cond);
3097 } else {
3098 LoadImmediate(TMP, imm, cond);
3099 orr(rd, rs, Operand(TMP), cond);
3100 }
3101}
3102
3103void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
3104 Operand o;
3105 if (Operand::CanHold(value, &o)) {
3106 cmp(rn, o, cond);
3107 } else {
3108 ASSERT(rn != IP);
3109 LoadImmediate(IP, value, cond);
3110 cmp(rn, Operand(IP), cond);
3111 }
3112}
3113
3114void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) {
3115 Operand o;
3116 if (Operand::CanHold(imm, &o)) {
3117 tst(rn, o, cond);
3118 } else {
3119 LoadImmediate(IP, imm);
3120 tst(rn, Operand(IP), cond);
3121 }
3122}
3123
3124void Assembler::IntegerDivide(Register result,
3125 Register left,
3127 DRegister tmpl,
3128 DRegister tmpr) {
3129 ASSERT(tmpl != tmpr);
3130 if (TargetCPUFeatures::integer_division_supported()) {
3131 sdiv(result, left, right);
3132 } else {
3133 SRegister stmpl = EvenSRegisterOf(tmpl);
3134 SRegister stmpr = EvenSRegisterOf(tmpr);
3135 vmovsr(stmpl, left);
3136 vcvtdi(tmpl, stmpl); // left is in tmpl.
3137 vmovsr(stmpr, right);
3138 vcvtdi(tmpr, stmpr); // right is in tmpr.
3139 vdivd(tmpr, tmpl, tmpr);
3140 vcvtid(stmpr, tmpr);
3141 vmovrs(result, stmpr);
3142 }
3143}
3144
3145static int NumRegsBelowFP(RegList regs) {
3146 int count = 0;
3147 for (int i = 0; i < FP; i++) {
3148 if ((regs & (1 << i)) != 0) {
3149 count++;
3150 }
3151 }
3152 return count;
3153}
3154
3155void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
3156 Asr(reg, reg, Operand(shift));
3157}
3158
3159void Assembler::CompareWords(Register reg1,
3160 Register reg2,
3161 intptr_t offset,
3163 Register temp,
3164 Label* equals) {
3165 Label loop;
3166
3167 AddImmediate(reg1, offset - kHeapObjectTag);
3168 AddImmediate(reg2, offset - kHeapObjectTag);
3169
3171 Bind(&loop);
3172 BranchIfZero(count, equals, Assembler::kNearJump);
3173 AddImmediate(count, -1);
3174 ldr(temp, Address(reg1, 4, Address::PostIndex));
3175 ldr(TMP, Address(reg2, 4, Address::PostIndex));
3176 cmp(temp, Operand(TMP));
3177 BranchIf(EQUAL, &loop, Assembler::kNearJump);
3178}
3179
3180void Assembler::EnterFrame(RegList regs, intptr_t frame_size) {
3181 if (prologue_offset_ == -1) {
3182 prologue_offset_ = CodeSize();
3183 }
3184 PushList(regs);
3185 if ((regs & (1 << FP)) != 0) {
3186 // Set FP to the saved previous FP.
3187 add(FP, SP, Operand(4 * NumRegsBelowFP(regs)));
3188 }
3189 if (frame_size != 0) {
3190 AddImmediate(SP, -frame_size);
3191 }
3192}
3193
3194void Assembler::LeaveFrame(RegList regs, bool allow_pop_pc) {
3195 ASSERT(allow_pop_pc || (regs & (1 << PC)) == 0); // Must not pop PC.
3196 if ((regs & (1 << FP)) != 0) {
3197 // Use FP to set SP.
3198 sub(SP, FP, Operand(4 * NumRegsBelowFP(regs)));
3199 }
3200 PopList(regs);
3201}
3202
3203void Assembler::Ret(Condition cond /* = AL */) {
3204 READS_RETURN_ADDRESS_FROM_LR(bx(LR, cond));
3205}
3206
3207void Assembler::SetReturnAddress(Register value) {
3208 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
3209}
3210
3211void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3212 // Reserve space for arguments and align frame before entering
3213 // the C++ world.
3214 AddImmediate(SP, -frame_space);
3215 if (OS::ActivationFrameAlignment() > 1) {
3216 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3217 }
3218}
3219
3220void Assembler::EmitEntryFrameVerification(Register scratch) {
3221#if defined(DEBUG)
3222 Label done;
3223 ASSERT(!constant_pool_allowed());
3224 LoadImmediate(scratch, target::frame_layout.exit_link_slot_from_entry_fp *
3226 add(scratch, scratch, Operand(FPREG));
3227 cmp(scratch, Operand(SPREG));
3228 b(&done, EQ);
3229
3230 Breakpoint();
3231
3232 Bind(&done);
3233#endif
3234}
3235
3236void Assembler::CallRuntime(const RuntimeEntry& entry,
3237 intptr_t argument_count) {
3238 ASSERT(!entry.is_leaf());
3239 // Argument count is not checked here, but in the runtime entry for a more
3240 // informative error message.
3241 LoadFromOffset(R9, THR, entry.OffsetFromThread());
3242 LoadImmediate(R4, argument_count);
3243 ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
3244 blx(IP);
3245}
3246
3247// For use by LR related macros (e.g. CLOBBERS_LR).
3248#undef __
3249#define __ assembler_->
3250
3251#if defined(VFPv3_D32)
3252static const RegisterSet kVolatileFpuRegisters(0, 0xFF0F); // Q0-Q3, Q8-Q15
3253#else
3254static const RegisterSet kVolatileFpuRegisters(0, 0x000F); // Q0-Q3
3255#endif
3256
3257LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
3258 intptr_t frame_size,
3259 bool preserve_registers)
3260 : assembler_(assembler), preserve_registers_(preserve_registers) {
3261 __ Comment("EnterCallRuntimeFrame");
3262 if (preserve_registers) {
3263 // Preserve volatile CPU registers and PP.
3264 SPILLS_LR_TO_FRAME(__ EnterFrame(
3265 kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR), 0));
3266 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3267
3268 __ PushRegisters(kVolatileFpuRegisters);
3269 } else {
3270 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
3271 // These registers must always be preserved.
3275 }
3276
3277 __ ReserveAlignedFrameSpace(frame_size);
3278}
3279
3280void LeafRuntimeScope::Call(const RuntimeEntry& entry,
3281 intptr_t argument_count) {
3282 ASSERT(argument_count == entry.argument_count());
3283 __ LoadFromOffset(TMP, THR, entry.OffsetFromThread());
3284 __ str(TMP,
3285 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3286 __ blx(TMP);
3287 __ LoadImmediate(TMP, VMTag::kDartTagId);
3288 __ str(TMP,
3289 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3290}
3291
3292LeafRuntimeScope::~LeafRuntimeScope() {
3293 if (preserve_registers_) {
3294 // SP might have been modified to reserve space for arguments
3295 // and ensure proper alignment of the stack frame.
3296 // We need to restore it before restoring registers.
3297 const intptr_t kPushedFpuRegisterSize =
3298 kVolatileFpuRegisters.FpuRegisterCount() * kFpuRegisterSize;
3299
3300 COMPILE_ASSERT(PP < FP);
3301 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3302 // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3303 // it is pushed ahead of FP.
3304 const intptr_t kPushedRegistersSize =
3305 kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
3306 __ AddImmediate(SP, FP, -kPushedRegistersSize);
3307
3308 __ PopRegisters(kVolatileFpuRegisters);
3309
3310 // Restore volatile CPU registers.
3311 RESTORES_LR_FROM_FRAME(__ LeaveFrame(kDartVolatileCpuRegs | (1 << PP) |
3312 (1 << FP) | (1 << LR)));
3313 } else {
3314 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR)));
3315 }
3316}
3317
3318// For use by LR related macros (e.g. CLOBBERS_LR).
3319#undef __
3320#define __ this->
3321
3322void Assembler::EnterDartFrame(intptr_t frame_size, bool load_pool_pointer) {
3323 ASSERT(!constant_pool_allowed());
3324
3325 // Registers are pushed in descending order: R5 | R6 | R7/R11 | R14.
3329
3330 if (!FLAG_precompiled_mode) {
3331 SPILLS_LR_TO_FRAME(
3332 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0));
3333
3334 // Setup pool pointer for this dart function.
3335 if (load_pool_pointer) LoadPoolPointer();
3336 } else {
3337 SPILLS_LR_TO_FRAME(EnterFrame((1 << FP) | (1 << LR), 0));
3338 }
3339 set_constant_pool_allowed(true);
3340
3341 // Reserve space for locals.
3342 AddImmediate(SP, -frame_size);
3343}
3344
3345// On entry to a function compiled for OSR, the caller's frame pointer, the
3346// stack locals, and any copied parameters are already in place. The frame
3347// pointer is already set up. The PC marker is not correct for the
3348// optimized function and there may be extra space for spill slots to
3349// allocate. We must also set up the pool pointer for the function.
3350void Assembler::EnterOsrFrame(intptr_t extra_size) {
3351 ASSERT(!constant_pool_allowed());
3352 Comment("EnterOsrFrame");
3353 RestoreCodePointer();
3354 LoadPoolPointer();
3355
3356 AddImmediate(SP, -extra_size);
3357}
3358
3359void Assembler::LeaveDartFrame() {
3360 if (!FLAG_precompiled_mode) {
3361 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3363 }
3364 set_constant_pool_allowed(false);
3365
3366 // This will implicitly drop saved PP, PC marker due to restoring SP from FP
3367 // first.
3368 RESTORES_LR_FROM_FRAME(LeaveFrame((1 << FP) | (1 << LR)));
3369}
3370
3371void Assembler::LeaveDartFrameAndReturn() {
3372 if (!FLAG_precompiled_mode) {
3373 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3375 }
3376 set_constant_pool_allowed(false);
3377
3378 // This will implicitly drop saved PP, PC marker due to restoring SP from FP
3379 // first.
3380 LeaveFrame((1 << FP) | (1 << PC), /*allow_pop_pc=*/true);
3381}
3382
3383void Assembler::EnterStubFrame() {
3384 EnterDartFrame(0);
3385}
3386
3387void Assembler::LeaveStubFrame() {
3388 LeaveDartFrame();
3389}
3390
3391void Assembler::EnterCFrame(intptr_t frame_space) {
3392 // Already saved.
3395
3396 EnterFrame(1 << FP, 0);
3397 ReserveAlignedFrameSpace(frame_space);
3398}
3399
3400void Assembler::LeaveCFrame() {
3401 LeaveFrame(1 << FP);
3402}
3403
3404// R0 receiver, R9 ICData entries array
3405// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
3406void Assembler::MonomorphicCheckedEntryJIT() {
3407 has_monomorphic_entry_ = true;
3408#if defined(TESTING) || defined(DEBUG)
3409 bool saved_use_far_branches = use_far_branches();
3410 set_use_far_branches(false);
3411#endif
3412 intptr_t start = CodeSize();
3413
3414 Comment("MonomorphicCheckedEntry");
3415 ASSERT_EQUAL(CodeSize() - start,
3416 target::Instructions::kMonomorphicEntryOffsetJIT);
3417
3418 const intptr_t cid_offset = target::Array::element_offset(0);
3419 const intptr_t count_offset = target::Array::element_offset(1);
3420
3421 // Sadly this cannot use ldm because ldm takes no offset.
3422 ldr(R1, FieldAddress(R9, cid_offset));
3423 ldr(R2, FieldAddress(R9, count_offset));
3424 LoadClassIdMayBeSmi(IP, R0);
3425 add(R2, R2, Operand(target::ToRawSmi(1)));
3426 cmp(R1, Operand(IP, LSL, 1));
3427 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3428 str(R2, FieldAddress(R9, count_offset));
3429 LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.
3430
3431 // Fall through to unchecked entry.
3432 ASSERT_EQUAL(CodeSize() - start,
3433 target::Instructions::kPolymorphicEntryOffsetJIT);
3434
3435#if defined(TESTING) || defined(DEBUG)
3436 set_use_far_branches(saved_use_far_branches);
3437#endif
3438}
3439
3440// R0 receiver, R9 guarded cid as Smi.
3441// Preserve R4 (ARGS_DESC_REG), not required today, but maybe later.
3442void Assembler::MonomorphicCheckedEntryAOT() {
3443 has_monomorphic_entry_ = true;
3444#if defined(TESTING) || defined(DEBUG)
3445 bool saved_use_far_branches = use_far_branches();
3446 set_use_far_branches(false);
3447#endif
3448 intptr_t start = CodeSize();
3449
3450 Comment("MonomorphicCheckedEntry");
3451 ASSERT_EQUAL(CodeSize() - start,
3452 target::Instructions::kMonomorphicEntryOffsetAOT);
3453
3454 LoadClassId(IP, R0);
3455 cmp(R9, Operand(IP, LSL, 1));
3456 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3457
3458 // Fall through to unchecked entry.
3459 ASSERT_EQUAL(CodeSize() - start,
3460 target::Instructions::kPolymorphicEntryOffsetAOT);
3461
3462#if defined(TESTING) || defined(DEBUG)
3463 set_use_far_branches(saved_use_far_branches);
3464#endif
3465}
3466
3467void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
3468 has_monomorphic_entry_ = true;
3469 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
3470 bkpt(0);
3471 }
3472 b(label);
3473 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
3474 bkpt(0);
3475 }
3476}
3477
3479 // hash += other_hash
3480 add(hash, hash, Operand(other));
3481 // hash += hash << 10
3482 add(hash, hash, Operand(hash, LSL, 10));
3483 // hash ^= hash >> 6
3484 eor(hash, hash, Operand(hash, LSR, 6));
3485}
3486
3487void Assembler::FinalizeHashForSize(intptr_t bit_size,
3488 Register hash,
3489 Register scratch) {
3490 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
3491 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
3492 // reasonably expect that the returned values fill the entire bit space.
3493 ASSERT(bit_size <= kBitsPerInt32);
3494 // hash += hash << 3;
3495 add(hash, hash, Operand(hash, LSL, 3));
3496 // hash ^= hash >> 11; // Logical shift, unsigned hash.
3497 eor(hash, hash, Operand(hash, LSR, 11));
3498 // hash += hash << 15;
3499 adds(hash, hash, Operand(hash, LSL, 15));
3500 if (bit_size < kBitsPerInt32) {
3501 // Size to fit.
3502 AndImmediateSetFlags(hash, hash, Utils::NBitMask(bit_size), NOT_ZERO);
3503 }
3504 // return (hash == 0) ? 1 : hash;
3505 LoadImmediate(hash, 1, ZERO);
3506}
3507
3508#ifndef PRODUCT
3509void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) {
3510 ASSERT(stats_addr_reg != kNoRegister);
3511 ASSERT(stats_addr_reg != TMP);
3512 ldrb(TMP, Address(stats_addr_reg, 0));
3513 cmp(TMP, Operand(0));
3514 b(trace, NE);
3515}
3516
3517void Assembler::MaybeTraceAllocation(intptr_t cid,
3518 Label* trace,
3519 Register temp_reg,
3520 JumpDistance distance) {
3521 LoadAllocationTracingStateAddress(temp_reg, cid);
3522 MaybeTraceAllocation(temp_reg, trace);
3523}
3524
3525void Assembler::MaybeTraceAllocation(Register cid,
3526 Label* trace,
3527 Register temp_reg,
3528 JumpDistance distance) {
3529 LoadAllocationTracingStateAddress(temp_reg, cid);
3530 MaybeTraceAllocation(temp_reg, trace);
3531}
3532
3533void Assembler::LoadAllocationTracingStateAddress(Register dest, Register cid) {
3535 ASSERT(dest != TMP);
3536
3537 LoadIsolateGroup(dest);
3538 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3539 ldr(dest,
3540 Address(dest,
3541 target::ClassTable::allocation_tracing_state_table_offset()));
3542 AddScaled(dest, dest, cid, TIMES_1,
3543 target::ClassTable::AllocationTracingStateSlotOffsetFor(0));
3544}
3545
3546void Assembler::LoadAllocationTracingStateAddress(Register dest, intptr_t cid) {
3548 ASSERT(dest != TMP);
3549 ASSERT(cid > 0);
3550
3551 LoadIsolateGroup(dest);
3552 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3553 ldr(dest,
3554 Address(dest,
3555 target::ClassTable::allocation_tracing_state_table_offset()));
3556 AddImmediate(dest,
3557 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid));
3558}
3559#endif // !PRODUCT
3560
3561void Assembler::TryAllocateObject(intptr_t cid,
3562 intptr_t instance_size,
3563 Label* failure,
3564 JumpDistance distance,
3565 Register instance_reg,
3566 Register temp_reg) {
3567 ASSERT(failure != nullptr);
3568 ASSERT(instance_reg != kNoRegister);
3569 ASSERT(instance_reg != temp_reg);
3570 ASSERT(instance_reg != IP);
3571 ASSERT(temp_reg != kNoRegister);
3572 ASSERT(temp_reg != IP);
3573 ASSERT(instance_size != 0);
3574 ASSERT(Utils::IsAligned(instance_size,
3576 if (FLAG_inline_alloc &&
3578 ldr(instance_reg, Address(THR, target::Thread::top_offset()));
3579 // TODO(koda): Protect against unsigned overflow here.
3580 AddImmediate(instance_reg, instance_size);
3581 // instance_reg: potential top (next object start).
3582 ldr(IP, Address(THR, target::Thread::end_offset()));
3583 cmp(IP, Operand(instance_reg));
3584 // fail if heap end unsigned less than or equal to new heap top.
3585 b(failure, LS);
3586 CheckAllocationCanary(instance_reg, temp_reg);
3587
3588 // If this allocation is traced, program will jump to failure path
3589 // (i.e. the allocation stub) which will allocate the object and trace the
3590 // allocation call site.
3591 NOT_IN_PRODUCT(LoadAllocationTracingStateAddress(temp_reg, cid));
3592 NOT_IN_PRODUCT(MaybeTraceAllocation(temp_reg, failure));
3593
3594 // Successfully allocated the object, now update top to point to
3595 // next object start and store the class in the class field of object.
3596 str(instance_reg, Address(THR, target::Thread::top_offset()));
3597 // Move instance_reg back to the start of the object and tag it.
3598 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3599
3600 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3601 LoadImmediate(temp_reg, tags);
3602 str(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
3603 } else {
3604 b(failure);
3605 }
3606}
3607
3608void Assembler::TryAllocateArray(intptr_t cid,
3609 intptr_t instance_size,
3610 Label* failure,
3612 Register end_address,
3613 Register temp1,
3614 Register temp2) {
3615 if (FLAG_inline_alloc &&
3617 NOT_IN_PRODUCT(LoadAllocationTracingStateAddress(temp1, cid));
3618 // Potential new object start.
3619 ldr(instance, Address(THR, target::Thread::top_offset()));
3620 AddImmediateSetFlags(end_address, instance, instance_size);
3621 b(failure, CS); // Branch if unsigned overflow.
3622
3623 // Check if the allocation fits into the remaining space.
3624 // instance: potential new object start.
3625 // end_address: potential next object start.
3626 ldr(temp2, Address(THR, target::Thread::end_offset()));
3627 cmp(end_address, Operand(temp2));
3628 b(failure, CS);
3629 CheckAllocationCanary(instance, temp2);
3630
3631 // If this allocation is traced, program will jump to failure path
3632 // (i.e. the allocation stub) which will allocate the object and trace the
3633 // allocation call site.
3634 NOT_IN_PRODUCT(MaybeTraceAllocation(temp1, failure));
3635
3636 // Successfully allocated the object(s), now update top to point to
3637 // next object start and initialize the object.
3638 str(end_address, Address(THR, target::Thread::top_offset()));
3639 add(instance, instance, Operand(kHeapObjectTag));
3640
3641 // Initialize the tags.
3642 // instance: new object start as a tagged pointer.
3643 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3644 LoadImmediate(temp2, tags);
3645 str(temp2,
3646 FieldAddress(instance, target::Object::tags_offset())); // Store tags.
3647 } else {
3648 b(failure);
3649 }
3650}
3651
3652void Assembler::CopyMemoryWords(Register src,
3653 Register dst,
3654 Register size,
3655 Register temp) {
3656 Label loop, done;
3657 __ cmp(size, Operand(0));
3658 __ b(&done, EQUAL);
3659 __ Bind(&loop);
3660 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
3661 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
3662 __ subs(size, size, Operand(target::kWordSize));
3663 __ b(&loop, NOT_ZERO);
3664 __ Bind(&done);
3665}
3666
3667void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond,
3668 intptr_t offset_into_target) {
3669 // Emit "blr.cond <offset>".
3670 EmitType5(cond, 0x686868, /*link=*/true);
3671
3672 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
3673 PcRelativeCallPattern::kLengthInBytes);
3674 pattern.set_distance(offset_into_target);
3675}
3676
3677void Assembler::GenerateUnRelocatedPcRelativeTailCall(
3678 Condition cond,
3679 intptr_t offset_into_target) {
3680 // Emit "b <offset>".
3681 EmitType5(cond, 0x686868, /*link=*/false);
3682
3683 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
3684 PcRelativeTailCallPattern::kLengthInBytes);
3685 pattern.set_distance(offset_into_target);
3686}
3687
3688bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
3689 bool is_load,
3690 bool is_external,
3691 intptr_t cid,
3692 intptr_t index_scale,
3693 bool* needs_base) {
3694 ASSERT(needs_base != nullptr);
3695 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
3696 if ((rep == kUnboxedInt32x4) || (rep == kUnboxedFloat32x4) ||
3697 (rep == kUnboxedFloat64x2)) {
3698 // We are using vldmd/vstmd which do not support offset.
3699 return false;
3700 }
3701
3702 if (!IsSafeSmi(constant)) return false;
3703 const int64_t index = target::SmiValue(constant);
3704 const intptr_t offset_base =
3705 (is_external ? 0
3706 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3707 const int64_t offset = index * index_scale + offset_base;
3708 if (!Utils::IsInt(32, offset)) return false;
3709 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3710 *needs_base = false;
3711 return true;
3712 }
3713 if (Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)) {
3714 *needs_base = true;
3715 return true;
3716 }
3717
3718 return false;
3719}
3720
3721Address Assembler::ElementAddressForIntIndex(bool is_load,
3722 bool is_external,
3723 intptr_t cid,
3724 intptr_t index_scale,
3725 Register array,
3726 intptr_t index,
3727 Register temp) {
3728 const int64_t offset_base =
3729 (is_external ? 0
3730 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3731 const int64_t offset =
3732 offset_base + static_cast<int64_t>(index) * index_scale;
3733 ASSERT(Utils::IsInt(32, offset));
3734
3735 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3736 return Address(array, static_cast<int32_t>(offset));
3737 } else {
3738 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base));
3739 AddImmediate(temp, array, static_cast<int32_t>(offset_base));
3740 return Address(temp, static_cast<int32_t>(offset - offset_base));
3741 }
3742}
3743
3744void Assembler::LoadElementAddressForIntIndex(Register address,
3745 bool is_load,
3746 bool is_external,
3747 intptr_t cid,
3748 intptr_t index_scale,
3749 Register array,
3750 intptr_t index) {
3751 const int64_t offset_base =
3752 (is_external ? 0
3753 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
3754 const int64_t offset =
3755 offset_base + static_cast<int64_t>(index) * index_scale;
3756 ASSERT(Utils::IsInt(32, offset));
3757 AddImmediate(address, array, offset);
3758}
3759
3760Address Assembler::ElementAddressForRegIndex(bool is_load,
3761 bool is_external,
3762 intptr_t cid,
3763 intptr_t index_scale,
3764 bool index_unboxed,
3765 Register array,
3766 Register index) {
3767 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3768 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
3769 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3770 int32_t offset =
3771 is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
3772 const OperandSize size = Address::OperandSizeFor(cid);
3773 ASSERT(array != IP);
3774 ASSERT(index != IP);
3775 const Register base = is_load ? IP : index;
3776 if ((offset != 0) || (is_load && (size == kByte || size == kUnsignedByte)) ||
3777 (size == kTwoBytes) || (size == kUnsignedTwoBytes) || (size == kSWord) ||
3778 (size == kDWord) || (size == kRegList)) {
3779 if (shift < 0) {
3780 ASSERT(shift == -1);
3781 add(base, array, Operand(index, ASR, 1));
3782 } else {
3783 add(base, array, Operand(index, LSL, shift));
3784 }
3785 } else {
3786 if (shift < 0) {
3787 ASSERT(shift == -1);
3788 return Address(array, index, ASR, 1);
3789 } else {
3790 return Address(array, index, LSL, shift);
3791 }
3792 }
3793 int32_t offset_mask = 0;
3794 if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) ||
3795 (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) {
3796 AddImmediate(base, offset & ~offset_mask);
3797 offset = offset & offset_mask;
3798 }
3799 return Address(base, offset);
3800}
3801
3802void Assembler::LoadElementAddressForRegIndex(Register address,
3803 bool is_load,
3804 bool is_external,
3805 intptr_t cid,
3806 intptr_t index_scale,
3807 bool index_unboxed,
3808 Register array,
3809 Register index) {
3810 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3811 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
3812 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3813 int32_t offset =
3814 is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
3815 if (shift < 0) {
3816 ASSERT(shift == -1);
3817 add(address, array, Operand(index, ASR, 1));
3818 } else {
3819 add(address, array, Operand(index, LSL, shift));
3820 }
3821 if (offset != 0) {
3822 AddImmediate(address, offset);
3823 }
3824}
3825
3826void Assembler::LoadStaticFieldAddress(Register address,
3827 Register field,
3828 Register scratch,
3829 bool is_shared) {
3830 LoadFieldFromOffset(scratch, field,
3831 target::Field::host_offset_or_field_id_offset());
3832 const intptr_t field_table_offset =
3833 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
3834 : compiler::target::Thread::field_table_values_offset();
3835 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
3836 add(address, address,
3837 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
3838}
3839
3840void Assembler::LoadFieldAddressForRegOffset(Register address,
3842 Register offset_in_words_as_smi) {
3843 add(address, instance,
3844 Operand(offset_in_words_as_smi, LSL,
3846 AddImmediate(address, -kHeapObjectTag);
3847}
3848
3849void Assembler::LoadHalfWordUnaligned(Register dst,
3850 Register addr,
3851 Register tmp) {
3852 ASSERT(dst != addr);
3853 ldrb(dst, Address(addr, 0));
3854 ldrsb(tmp, Address(addr, 1));
3855 orr(dst, dst, Operand(tmp, LSL, 8));
3856}
3857
3858void Assembler::LoadHalfWordUnsignedUnaligned(Register dst,
3859 Register addr,
3860 Register tmp) {
3861 ASSERT(dst != addr);
3862 ldrb(dst, Address(addr, 0));
3863 ldrb(tmp, Address(addr, 1));
3864 orr(dst, dst, Operand(tmp, LSL, 8));
3865}
3866
3867void Assembler::StoreHalfWordUnaligned(Register src,
3868 Register addr,
3869 Register tmp) {
3870 strb(src, Address(addr, 0));
3871 Lsr(tmp, src, Operand(8));
3872 strb(tmp, Address(addr, 1));
3873}
3874
3875void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) {
3876 ASSERT(dst != addr);
3877 ldrb(dst, Address(addr, 0));
3878 ldrb(tmp, Address(addr, 1));
3879 orr(dst, dst, Operand(tmp, LSL, 8));
3880 ldrb(tmp, Address(addr, 2));
3881 orr(dst, dst, Operand(tmp, LSL, 16));
3882 ldrb(tmp, Address(addr, 3));
3883 orr(dst, dst, Operand(tmp, LSL, 24));
3884}
3885
3886void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) {
3887 strb(src, Address(addr, 0));
3888 Lsr(tmp, src, Operand(8));
3889 strb(tmp, Address(addr, 1));
3890 Lsr(tmp, src, Operand(16));
3891 strb(tmp, Address(addr, 2));
3892 Lsr(tmp, src, Operand(24));
3893 strb(tmp, Address(addr, 3));
3894}
3895
3896void Assembler::RangeCheck(Register value,
3897 Register temp,
3898 intptr_t low,
3899 intptr_t high,
3900 RangeCheckCondition condition,
3901 Label* target) {
3902 auto cc = condition == kIfInRange ? LS : HI;
3903 Register to_check = temp != kNoRegister ? temp : value;
3904 AddImmediate(to_check, value, -low);
3905 CompareImmediate(to_check, high - low);
3906 b(target, cc);
3907}
3908
3909} // namespace compiler
3910} // namespace dart
3911
3912#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
static float next(float f)
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
bool equals(SkDrawable *a, SkDrawable *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
GLenum type
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define LR
Definition: constants_arm.h:32
#define LINK_REGISTER
#define UNIMPLEMENTED
static const char * begin(const StringSlice &s)
Definition: editor.cpp:252
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
VkInstance instance
Definition: main.cc:48
static bool b
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition: fuchsia.cc:52
#define B
ClipOpAndAA opAA SkRegion region
Definition: SkRecords.h:238
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
void BailoutWithBranchOffsetError()
Definition: runtime_api.cc:328
bool IsOriginalObject(const Object &object)
Definition: runtime_api.cc:226
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
bool IsInOldSpace(const Object &obj)
Definition: runtime_api.cc:101
const Object & ToObject(const Code &handle)
Definition: runtime_api.h:173
static constexpr int HeaderSize
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
const Register kWriteBarrierSlotReg
const Register THR
const int kDartVolatileCpuRegCount
static DRegister EvenDRegisterOf(QRegister q)
static DRegister OddDRegisterOf(QRegister q)
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
Definition: globals.h:482
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
uint16_t RegList
@ kNullCid
Definition: class_id.h:252
BlockAddressMode
Definition: assembler_arm.h:37
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
constexpr intptr_t kBitsPerInt16
Definition: globals.h:465
const Register CODE_REG
@ kNoCondition
@ NOT_ZERO
@ kSpecialCondition
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
const int kNumberOfFpuRegisters
const RegList kAbiPreservedCpuRegs
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
intx_t sign_extend(int32_t x)
constexpr uword kDataMemoryBarrier
const Register TMP
const RegList kDartVolatileCpuRegs
const Register FPREG
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
@ kBitFieldExtractLSBBits
@ kShiftImmShift
@ kConditionShift
@ kRdShift
@ kRnShift
@ kTypeShift
@ kBranchOffsetMask
@ kDivRmShift
@ kBitFieldExtractWidthBits
@ kLdrExRnShift
@ kLinkShift
@ kLdrExRtShift
@ kBitFieldExtractLSBShift
@ kStrExRnShift
@ kStrExRtShift
@ kOffset12Mask
@ kStrExRdShift
@ kDivRnShift
@ kBitFieldExtractWidthShift
@ kBitFieldExtractRnShift
@ kDivRdShift
@ kShiftRegisterShift
@ kShiftShift
@ kOpcodeShift
const intptr_t cid
const int kAbiPreservedFpuRegCount
const QRegister QTMP
@ kNoSRegister
@ kNumberOfSRegisters
constexpr intptr_t kWordSize
Definition: globals.h:509
const Register PP
QRegister FpuRegister
@ kNoQRegister
@ kNumberOfDRegisters
@ kNoDRegister
const Register APSR
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
const Register SPREG
constexpr intptr_t kBitsPerInt8
Definition: globals.h:464
const int kFpuRegisterSize
static SRegister EvenSRegisterOf(DRegister d)
DECLARE_FLAG(bool, show_invisible_frames)
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
def matches(file)
Definition: gen_manifest.py:38
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
inst
Definition: malisc.py:37
dest
Definition: zip.py:79
#define Pd
Definition: globals.h:408
int32_t width
SeparatedVector2 offset
Definition: SkMD5.cpp:130
#define NOT_IN_PRODUCT(code)
Definition: globals.h:84