Flutter Engine
The Flutter Engine
instructions_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV*.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
8#include "vm/instructions.h"
10
11#include "vm/constants.h"
12#include "vm/cpu.h"
13#include "vm/object.h"
14#include "vm/object_store.h"
16
17namespace dart {
18
19static bool IsJumpAndLinkScratch(Register reg) {
20 return reg == (FLAG_precompiled_mode ? TMP : CODE_REG);
21}
22
23CallPattern::CallPattern(uword pc, const Code& code)
24 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
25 target_code_pool_index_(-1) {
26 ASSERT(code.ContainsInstructionAt(pc));
27 // R is either CODE_REG (JIT) or TMP (AOT)
28 // [lui,add,]lx R, ##(pp)
29 // xxxxxxxx lx ra, ##(R)
30 // xxxx jalr ra
31
32 // Last instruction: jalr ra.
33 ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
34 Register reg;
35 InstructionPattern::DecodeLoadWordFromPool(pc - 6, &reg,
36 &target_code_pool_index_);
37 ASSERT(IsJumpAndLinkScratch(reg));
38}
39
40ICCallPattern::ICCallPattern(uword pc, const Code& code)
41 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
42 target_pool_index_(-1),
43 data_pool_index_(-1) {
44 ASSERT(code.ContainsInstructionAt(pc));
45 // R is either CODE_REG (JIT) or TMP (AOT)
46 // [lui,add,]lx IC_DATA_REG, ##(pp)
47 // [lui,add,]lx R, ##(pp)
48 // xxxxxxxx lx ra, ##(R)
49 // xxxx jalr ra
50
51 // Last instruction: jalr ra.
52 ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
53
54 Register reg;
55 uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
56 pc - 6, &reg, &target_pool_index_);
57 ASSERT(IsJumpAndLinkScratch(reg));
58
59 InstructionPattern::DecodeLoadWordFromPool(data_load_end, &reg,
60 &data_pool_index_);
61 ASSERT(reg == IC_DATA_REG);
62}
63
64NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
65 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
66 end_(pc),
67 native_function_pool_index_(-1),
68 target_code_pool_index_(-1) {
69 ASSERT(code.ContainsInstructionAt(pc));
70 // R is either CODE_REG (JIT) or TMP (AOT)
71 // [lui,add,]lx t5, ##(pp)
72 // [lui,add,]lx R, ##(pp)
73 // xxxxxxxx lx ra, ##(R)
74 // xxxx jalr ra
75
76 // Last instruction: jalr ra.
77 ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
78
79 Register reg;
80 uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
81 pc - 6, &reg, &target_code_pool_index_);
82 ASSERT(IsJumpAndLinkScratch(reg));
83 InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, &reg,
84 &native_function_pool_index_);
85 ASSERT(reg == T5);
86}
87
88CodePtr NativeCallPattern::target() const {
89 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
90}
91
92void NativeCallPattern::set_target(const Code& target) const {
93 object_pool_.SetObjectAt(target_code_pool_index_, target);
94 // No need to flush the instruction cache, since the code is not modified.
95}
96
97NativeFunction NativeCallPattern::native_function() const {
98 return reinterpret_cast<NativeFunction>(
99 object_pool_.RawValueAt(native_function_pool_index_));
100}
101
102void NativeCallPattern::set_native_function(NativeFunction func) const {
103 object_pool_.SetRawValueAt(native_function_pool_index_,
104 reinterpret_cast<uword>(func));
105}
106
107// Decodes a load sequence ending at 'end' (the last instruction of the load
108// sequence is the instruction before the one at end). Returns a pointer to
109// the first instruction in the sequence. Returns the register being loaded
110// and the loaded immediate value in the output parameters 'reg' and 'value'
111// respectively.
112uword InstructionPattern::DecodeLoadWordImmediate(uword end,
113 Register* reg,
114 intptr_t* value) {
116 return 0;
117}
118
119static bool DecodeLoadX(uword end,
120 Register* dst,
121 Register* base,
122 intptr_t* offset,
123 intptr_t* length) {
124 Instr instr(LoadUnaligned(reinterpret_cast<uint32_t*>(end - 4)));
125#if XLEN == 32
126 if (instr.opcode() == LOAD && instr.funct3() == LW) {
127#elif XLEN == 64
128 if (instr.opcode() == LOAD && instr.funct3() == LD) {
129#endif
130 *dst = instr.rd();
131 *base = instr.rs1();
132 *offset = instr.itype_imm();
133 *length = 4;
134 return true;
135 }
136
137 CInstr cinstr(*reinterpret_cast<uint16_t*>(end - 2));
138#if XLEN == 32
139 if (cinstr.opcode() == C_LW) {
140#elif XLEN == 64
141 if (cinstr.opcode() == C_LD) {
142#endif
143 *dst = cinstr.rdp();
144 *base = cinstr.rs1p();
145#if XLEN == 32
146 *offset = cinstr.mem4_imm();
147#elif XLEN == 64
148 *offset = cinstr.mem8_imm();
149#endif
150 *length = 2;
151 return true;
152 }
153
154 return false;
155}
156
157static bool DecodeLUI(uword end,
158 Register* dst,
159 intptr_t* imm,
160 intptr_t* length) {
161 Instr instr(LoadUnaligned(reinterpret_cast<uint32_t*>(end - 4)));
162 if (instr.opcode() == LUI) {
163 *dst = instr.rd();
164 *imm = instr.utype_imm();
165 *length = 4;
166 return true;
167 }
168
169 CInstr cinstr(*reinterpret_cast<uint16_t*>(end - 2));
170 if (cinstr.opcode() == C_LUI) {
171 *dst = cinstr.rd();
172 *imm = cinstr.u_imm();
173 *length = 2;
174 return true;
175 }
176
177 return false;
178}
179
180// See comment in instructions_arm64.h
181uword InstructionPattern::DecodeLoadWordFromPool(uword end,
182 Register* reg,
183 intptr_t* index) {
184 // [c.]lx dst, offset(pp)
185 // or
186 // [c.]lui dst, hi
187 // c.add dst, dst, pp
188 // [c.]lx dst, lo(dst)
189
191 intptr_t lo, length;
192 if (!DecodeLoadX(end, reg, &base, &lo, &length)) {
193 UNREACHABLE();
194 }
195
196 if (base == PP) {
197 // PP is untagged on RISCV.
198 *index = ObjectPool::IndexFromOffset(lo - kHeapObjectTag);
199 return end - length;
200 }
201 ASSERT(base == *reg);
202 end -= length;
203
204 CInstr add_instr(*reinterpret_cast<uint16_t*>(end - 2));
205 ASSERT(add_instr.opcode() ==
206 C_MV); // Not C_ADD, which extends past the opcode proper.
207 ASSERT(add_instr.rd() == base);
208 ASSERT(add_instr.rs1() == base);
209 ASSERT(add_instr.rs2() == PP);
210 end -= 2;
211
213 intptr_t hi;
214 if (!DecodeLUI(end, &dst, &hi, &length)) {
215 UNREACHABLE();
216 }
217 ASSERT(dst == base);
218 // PP is untagged on RISC-V.
219 *index = ObjectPool::IndexFromOffset(hi + lo - kHeapObjectTag);
220 return end - length;
221}
222
223bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
224 ASSERT(code.ContainsInstructionAt(pc));
225 uint16_t parcel = *reinterpret_cast<uint16_t*>(pc);
226 if (IsCInstruction(parcel)) {
227 CInstr instr(parcel);
228#if XLEN == 32
229 if (instr.opcode() == C_LW) {
230 intptr_t offset = instr.mem4_imm();
231#elif XLEN == 64
232 if (instr.opcode() == C_LD) {
233 intptr_t offset = instr.mem8_imm();
234#endif
235 if (instr.rs1p() == PP) {
236 // PP is untagged on RISC-V.
237 if (!Utils::IsAligned(offset, kWordSize)) {
238 return false; // Being used as argument register A5.
239 }
240 intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
241 return ObjectAtPoolIndex(code, index, obj);
242 } else if (instr.rs1p() == THR) {
243 return Thread::ObjectAtOffset(offset, obj);
244 }
245 }
246 } else {
247 Instr instr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc)));
248#if XLEN == 32
249 if (instr.opcode() == LOAD && instr.funct3() == LW) {
250#elif XLEN == 64
251 if (instr.opcode() == LOAD && instr.funct3() == LD) {
252#endif
253 intptr_t offset = instr.itype_imm();
254 if (instr.rs1() == PP) {
255 // PP is untagged on RISC-V.
256 if (!Utils::IsAligned(offset, kWordSize)) {
257 return false; // Being used as argument register A5.
258 }
259 intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
260 return ObjectAtPoolIndex(code, index, obj);
261 } else if (instr.rs1() == THR) {
262 return Thread::ObjectAtOffset(offset, obj);
263 }
264 }
265 if ((instr.opcode() == OPIMM) && (instr.funct3() == ADDI) &&
266 (instr.rs1() == NULL_REG)) {
267 if (instr.itype_imm() == 0) {
268 *obj = Object::null();
269 return true;
270 }
271 if (instr.itype_imm() == kTrueOffsetFromNull) {
272 *obj = Object::bool_true().ptr();
273 return true;
274 }
275 if (instr.itype_imm() == kFalseOffsetFromNull) {
276 *obj = Object::bool_false().ptr();
277 return true;
278 }
279 }
280 }
281
282 // TODO(riscv): Loads with offsets beyond 12 bits.
283 return false;
284}
285
286// Encodes a load sequence ending at 'end'. Encodes a fixed length two
287// instruction load from the pool pointer in PP using the destination
288// register reg as a temporary for the base address.
289// Assumes that the location has already been validated for patching.
290void InstructionPattern::EncodeLoadWordFromPoolFixed(uword end,
291 int32_t offset) {
293}
294
295CodePtr CallPattern::TargetCode() const {
296 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
297}
298
299void CallPattern::SetTargetCode(const Code& target) const {
300 object_pool_.SetObjectAt(target_code_pool_index_, target);
301 // No need to flush the instruction cache, since the code is not modified.
302}
303
304ObjectPtr ICCallPattern::Data() const {
305 return object_pool_.ObjectAt(data_pool_index_);
306}
307
308void ICCallPattern::SetData(const Object& data) const {
309 ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
310 object_pool_.SetObjectAt(data_pool_index_, data);
311}
312
313CodePtr ICCallPattern::TargetCode() const {
314 return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
315}
316
317void ICCallPattern::SetTargetCode(const Code& target) const {
318 object_pool_.SetObjectAt(target_pool_index_, target);
319 // No need to flush the instruction cache, since the code is not modified.
320}
321
322SwitchableCallPatternBase::SwitchableCallPatternBase(
323 const ObjectPool& object_pool)
324 : object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
325
326ObjectPtr SwitchableCallPatternBase::data() const {
327 return object_pool_.ObjectAt(data_pool_index_);
328}
329
330void SwitchableCallPatternBase::SetData(const Object& data) const {
331 ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
332 object_pool_.SetObjectAt(data_pool_index_, data);
333}
334
335SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
336 : SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
337 ASSERT(code.ContainsInstructionAt(pc));
339}
340
341uword SwitchableCallPattern::target_entry() const {
342 return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
343 .MonomorphicEntryPoint();
344}
345
346void SwitchableCallPattern::SetTarget(const Code& target) const {
347 ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
348 object_pool_.SetObjectAt(target_pool_index_, target);
349}
350
351BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
352 : SwitchableCallPatternBase(ObjectPool::Handle(
353 IsolateGroup::Current()->object_store()->global_object_pool())) {
354 // [lui,add,]lx RA, ##(pp)
355 // [lui,add,]lx IC_DATA_REG, ##(pp)
356 // xxxx jalr RA
357
358 // Last instruction: jalr ra.
359 ASSERT(*reinterpret_cast<uint16_t*>(pc - 2) == 0x9082);
360
361 Register reg;
362 uword target_load_end = InstructionPattern::DecodeLoadWordFromPool(
363 pc - 2, &reg, &data_pool_index_);
365
366 InstructionPattern::DecodeLoadWordFromPool(target_load_end, &reg,
367 &target_pool_index_);
368 ASSERT_EQUAL(reg, RA);
369}
370
371uword BareSwitchableCallPattern::target_entry() const {
372 return object_pool_.RawValueAt(target_pool_index_);
373}
374
375void BareSwitchableCallPattern::SetTarget(const Code& target) const {
376 ASSERT(object_pool_.TypeAt(target_pool_index_) ==
377 ObjectPool::EntryType::kImmediate);
378 object_pool_.SetRawValueAt(target_pool_index_,
379 target.MonomorphicEntryPoint());
380}
381
382ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
383
384bool ReturnPattern::IsValid() const {
385 return *reinterpret_cast<uint16_t*>(pc_) == 0x8082;
386}
387
388bool PcRelativeCallPattern::IsValid() const {
389 Instr aupic(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_)));
390 if (aupic.opcode() != AUIPC) return false;
391 Instr jalr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_ + 4)));
392 if (jalr.opcode() != JALR) return false;
393 if (aupic.rd() != jalr.rs1()) return false;
394 if (jalr.rd() != RA) return false;
395 return true;
396}
397
398bool PcRelativeTailCallPattern::IsValid() const {
399 Instr aupic(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_)));
400 if (aupic.opcode() != AUIPC) return false;
401 Instr jr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_ + 4)));
402 if (jr.opcode() != JALR) return false;
403 if (aupic.rd() != jr.rs1()) return false;
404 if (jr.rd() != ZR) return false;
405 return true;
406}
407
409 StoreUnaligned(reinterpret_cast<uint32_t*>(pc_),
410 EncodeOpcode(AUIPC) | EncodeRd(TMP) | EncodeUTypeImm(0));
411 StoreUnaligned(reinterpret_cast<uint32_t*>(pc_ + 4),
412 EncodeOpcode(JALR) | EncodeFunct3(F3_0) | EncodeRd(ZR) |
413 EncodeRs1(TMP) | EncodeITypeImm(0));
414}
415
416intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
417 // Calls to the type testing stubs look like:
418 // lx s4, ...
419 // lx Rn, idx(pp)
420 // jalr s4
421 // where Rn = TypeTestABI::kSubtypeTestCacheReg.
422
423 // Ensure the caller of the type testing stub (whose return address is [pc_])
424 // branched via `jalr s3` or a pc-relative call.
425 if (*reinterpret_cast<uint16_t*>(pc_ - 2) == 0x9982) { // jalr s3
426 // indirect call
427 // xxxx c.jalr s3
428 Register reg;
429 intptr_t pool_index = -1;
430 InstructionPattern::DecodeLoadWordFromPool(pc_ - 2, &reg, &pool_index);
431 ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
432 return pool_index;
433 } else {
434 ASSERT(FLAG_precompiled_mode);
435 // pc-relative call
436 // xxxxxxxx aupic ra, hi
437 // xxxxxxxx jalr ra, lo
438 Instr jalr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_ - 4)));
439 ASSERT(jalr.opcode() == JALR);
440 Instr auipc(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_ - 8)));
441 ASSERT(auipc.opcode() == AUIPC);
442
443 Register reg;
444 intptr_t pool_index = -1;
445 InstructionPattern::DecodeLoadWordFromPool(pc_ - 8, &reg, &pool_index);
446 ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
447 return pool_index;
448 }
449}
450
451} // namespace dart
452
453#endif // defined TARGET_ARCH_RISCV
#define RA(width, name,...)
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
CallPattern(uword pc, const Code &code)
#define UNIMPLEMENTED
#define ASSERT(E)
glong glong end
uint8_t value
uint32_t * target
size_t length
Definition: dart_vm.cc:33
bool ObjectAtPoolIndex(const Code &code, intptr_t index, Object *obj)
Definition: instructions.cc:14
const Register THR
uint32_t EncodeUTypeImm(intptr_t imm)
bool IsCInstruction(uint16_t parcel)
static constexpr intptr_t kFalseOffsetFromNull
const Register NULL_REG
static constexpr intptr_t kTrueOffsetFromNull
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code &code, Object *obj)
const Register TMP
static T LoadUnaligned(const T *ptr)
Definition: unaligned.h:14
static void StoreUnaligned(T *ptr, T value)
Definition: unaligned.h:22
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition: globals.h:509
uint32_t EncodeITypeImm(intptr_t imm)
const Register PP
void(* NativeFunction)(NativeArguments *arguments)
@ kHeapObjectTag
struct PathData * Data(SkPath *path)
Definition: path_ops.cc:52
void Initialize(zx::channel directory_request, std::optional< zx::eventpair > view_ref)
Initializes Dart bindings for the Fuchsia application model.
Definition: fuchsia.cc:103
dst
Definition: cp.py:12
SeparatedVector2 offset
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63