Flutter Engine
The Flutter Engine
instructions_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
8#include "vm/instructions.h"
10
11#include "vm/constants.h"
12#include "vm/cpu.h"
13#include "vm/object.h"
14#include "vm/object_store.h"
16
17namespace dart {
18
19static bool IsBranchLinkScratch(Register reg) {
20 // See Assembler::BranchLink
21 return FLAG_precompiled_mode ? reg == LINK_REGISTER : reg == CODE_REG;
22}
23
24CallPattern::CallPattern(uword pc, const Code& code)
25 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
26 target_code_pool_index_(-1) {
27 ASSERT(code.ContainsInstructionAt(pc));
28 // Last instruction: blr lr.
29 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
30
31 Register reg;
32 InstructionPattern::DecodeLoadWordFromPool(pc - 2 * Instr::kInstrSize, &reg,
33 &target_code_pool_index_);
34 ASSERT(IsBranchLinkScratch(reg));
35}
36
37ICCallPattern::ICCallPattern(uword pc, const Code& code)
38 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
39 target_pool_index_(-1),
40 data_pool_index_(-1) {
41 ASSERT(code.ContainsInstructionAt(pc));
42 // Last instruction: blr lr.
43 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
44
45 Register data_reg, code_reg;
46 intptr_t pool_index;
47 InstructionPattern::DecodeLoadDoubleWordFromPool(
48 pc - 2 * Instr::kInstrSize, &data_reg, &code_reg, &pool_index);
49 ASSERT(data_reg == R5);
50 ASSERT(IsBranchLinkScratch(code_reg));
51
52 data_pool_index_ = pool_index;
53 target_pool_index_ = pool_index + 1;
54}
55
56NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
57 : object_pool_(ObjectPool::Handle(code.GetObjectPool())),
58 end_(pc),
59 native_function_pool_index_(-1),
60 target_code_pool_index_(-1) {
61 ASSERT(code.ContainsInstructionAt(pc));
62 // Last instruction: blr lr.
63 ASSERT(*(reinterpret_cast<uint32_t*>(end_) - 1) == 0xd63f03c0);
64
65 Register reg;
66 uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
67 end_ - 2 * Instr::kInstrSize, &reg, &target_code_pool_index_);
68 ASSERT(IsBranchLinkScratch(reg));
69 InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, &reg,
70 &native_function_pool_index_);
71 ASSERT(reg == R5);
72}
73
74CodePtr NativeCallPattern::target() const {
75 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
76}
77
78void NativeCallPattern::set_target(const Code& target) const {
79 object_pool_.SetObjectAt(target_code_pool_index_, target);
80 // No need to flush the instruction cache, since the code is not modified.
81}
82
83NativeFunction NativeCallPattern::native_function() const {
84 return reinterpret_cast<NativeFunction>(
85 object_pool_.RawValueAt(native_function_pool_index_));
86}
87
88void NativeCallPattern::set_native_function(NativeFunction func) const {
89 object_pool_.SetRawValueAt(native_function_pool_index_,
90 reinterpret_cast<uword>(func));
91}
92
93// Decodes a load sequence ending at 'end' (the last instruction of the load
94// sequence is the instruction before the one at end). Returns a pointer to
95// the first instruction in the sequence. Returns the register being loaded
96// and the loaded immediate value in the output parameters 'reg' and 'value'
97// respectively.
98uword InstructionPattern::DecodeLoadWordImmediate(uword end,
99 Register* reg,
100 intptr_t* value) {
101 // 1. LoadWordFromPool
102 // or
103 // 2. LoadWordFromPool
104 // orri
105 // or
106 // 3. LoadPatchableImmediate
107 uword start = end - Instr::kInstrSize;
108 Instr* instr = Instr::At(start);
109 bool odd = false;
110
111 // Case 2.
112 if (instr->IsLogicalImmOp()) {
113 ASSERT(instr->Bit(29) == 1);
114 odd = true;
115 // end points at orri so that we can pass it to DecodeLoadWordFromPool.
116 end = start;
117 start -= Instr::kInstrSize;
118 instr = Instr::At(start);
119 // Case 2 falls through to case 1.
120 }
121
122 // Case 1.
123 if (instr->IsLoadStoreRegOp()) {
124 start = DecodeLoadWordFromPool(end, reg, value);
125 if (odd) {
126 *value |= 1;
127 }
128 return start;
129 }
130
131 // Case 3.
132 // movk dst, imm3, 3; movk dst, imm2, 2; movk dst, imm1, 1; movz dst, imm0, 0
133 ASSERT(instr->IsMoveWideOp());
134 ASSERT(instr->Bits(29, 2) == 3);
135 ASSERT(instr->HWField() == 3); // movk dst, imm3, 3
136 *reg = instr->RdField();
137 *value = static_cast<int64_t>(instr->Imm16Field()) << 48;
138
139 start -= Instr::kInstrSize;
140 instr = Instr::At(start);
141 ASSERT(instr->IsMoveWideOp());
142 ASSERT(instr->Bits(29, 2) == 3);
143 ASSERT(instr->HWField() == 2); // movk dst, imm2, 2
144 ASSERT(instr->RdField() == *reg);
145 *value |= static_cast<int64_t>(instr->Imm16Field()) << 32;
146
147 start -= Instr::kInstrSize;
148 instr = Instr::At(start);
149 ASSERT(instr->IsMoveWideOp());
150 ASSERT(instr->Bits(29, 2) == 3);
151 ASSERT(instr->HWField() == 1); // movk dst, imm1, 1
152 ASSERT(instr->RdField() == *reg);
153 *value |= static_cast<int64_t>(instr->Imm16Field()) << 16;
154
155 start -= Instr::kInstrSize;
156 instr = Instr::At(start);
157 ASSERT(instr->IsMoveWideOp());
158 ASSERT(instr->Bits(29, 2) == 2);
159 ASSERT(instr->HWField() == 0); // movz dst, imm0, 0
160 ASSERT(instr->RdField() == *reg);
161 *value |= static_cast<int64_t>(instr->Imm16Field());
162
163 return start;
164}
165
166// See comment in instructions_arm64.h
167uword InstructionPattern::DecodeLoadWordFromPool(uword end,
168 Register* reg,
169 intptr_t* index) {
170 // 1. ldr dst, [pp, offset]
171 // or
172 // 2. add dst, pp, #offset_hi12
173 // ldr dst [dst, #offset_lo12]
174 // or
175 // 3. movz dst, low_offset, 0
176 // movk dst, hi_offset, 1 (optional)
177 // ldr dst, [pp, dst]
178 uword start = end - Instr::kInstrSize;
179 Instr* instr = Instr::At(start);
180 intptr_t offset = 0;
181
182 // Last instruction is always an ldr into a 64-bit X register.
183 ASSERT(instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
184 (instr->Bits(30, 2) == 3));
185
186 // Grab the destination register from the ldr instruction.
187 *reg = instr->RtField();
188
189 if (instr->Bit(24) == 1) {
190 // base + scaled unsigned 12-bit immediate offset.
191 // Case 1.
192 offset |= (instr->Imm12Field() << 3);
193 if (instr->RnField() == *reg) {
194 start -= Instr::kInstrSize;
195 instr = Instr::At(start);
196 ASSERT(instr->IsAddSubImmOp());
197 ASSERT(instr->RnField() == PP);
198 ASSERT(instr->RdField() == *reg);
199 offset |= (instr->Imm12Field() << 12);
200 }
201 } else {
202 ASSERT(instr->Bits(10, 2) == 2);
203 // We have to look at the preceding one or two instructions to find the
204 // offset.
205
206 start -= Instr::kInstrSize;
207 instr = Instr::At(start);
208 ASSERT(instr->IsMoveWideOp());
209 ASSERT(instr->RdField() == *reg);
210 if (instr->Bits(29, 2) == 2) { // movz dst, low_offset, 0
211 ASSERT(instr->HWField() == 0);
212 offset = instr->Imm16Field();
213 // no high offset.
214 } else {
215 ASSERT(instr->Bits(29, 2) == 3); // movk dst, high_offset, 1
216 ASSERT(instr->HWField() == 1);
217 offset = instr->Imm16Field() << 16;
218
219 start -= Instr::kInstrSize;
220 instr = Instr::At(start);
221 ASSERT(instr->IsMoveWideOp());
222 ASSERT(instr->RdField() == *reg);
223 ASSERT(instr->Bits(29, 2) == 2); // movz dst, low_offset, 0
224 ASSERT(instr->HWField() == 0);
225 offset |= instr->Imm16Field();
226 }
227 }
228 // PP is untagged on ARM64.
229 ASSERT(Utils::IsAligned(offset, 8));
230 *index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
231 return start;
232}
233
234// See comment in instructions_arm64.h
235uword InstructionPattern::DecodeLoadDoubleWordFromPool(uword end,
236 Register* reg1,
237 Register* reg2,
238 intptr_t* index) {
239 // Cases:
240 //
241 // 1. ldp reg1, reg2, [pp, offset]
242 //
243 // 2. add tmp, pp, #upper12
244 // ldp reg1, reg2, [tmp, #lower12]
245 //
246 // 3. add tmp, pp, #upper12
247 // add tmp, tmp, #lower12
248 // ldp reg1, reg2, [tmp, 0]
249 //
250 // Note that the pp register is untagged!
251 //
252 uword start = end - Instr::kInstrSize;
253 Instr* ldr_instr = Instr::At(start);
254
255 // Last instruction is always an ldp into two 64-bit X registers.
256 ASSERT(ldr_instr->IsLoadStoreRegPairOp() && (ldr_instr->Bit(22) == 1));
257
258 // Grab the destination register from the ldp instruction.
259 *reg1 = ldr_instr->RtField();
260 *reg2 = ldr_instr->Rt2Field();
261
262 Register base_reg = ldr_instr->RnField();
263 const int base_offset = 8 * ldr_instr->Imm7Field();
264
265 intptr_t pool_offset = 0;
266 if (base_reg == PP) {
267 // Case 1.
268 pool_offset = base_offset;
269 } else {
270 // Case 2 & 3.
271 ASSERT(base_reg == TMP);
272
273 pool_offset = base_offset;
274
275 start -= Instr::kInstrSize;
276 Instr* add_instr = Instr::At(start);
277 ASSERT(add_instr->IsAddSubImmOp());
278 ASSERT(add_instr->RdField() == TMP);
279
280 const auto shift = add_instr->Imm12ShiftField();
281 ASSERT(shift == 0 || shift == 1);
282 pool_offset += (add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
283
284 if (add_instr->RnField() == TMP) {
285 start -= Instr::kInstrSize;
286 Instr* prev_add_instr = Instr::At(start);
287 ASSERT(prev_add_instr->IsAddSubImmOp());
288 ASSERT(prev_add_instr->RnField() == PP);
289
290 const auto shift = prev_add_instr->Imm12ShiftField();
291 ASSERT(shift == 0 || shift == 1);
292 pool_offset += (prev_add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
293 } else {
294 ASSERT(add_instr->RnField() == PP);
295 }
296 }
297 *index = ObjectPool::IndexFromOffset(pool_offset - kHeapObjectTag);
298 return start;
299}
300
301bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
302 ASSERT(code.ContainsInstructionAt(pc));
303
304 Instr* instr = Instr::At(pc);
305 if (instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
306 (instr->Bits(30, 2) == 3) && instr->Bit(24) == 1) {
307 intptr_t offset = (instr->Imm12Field() << 3);
308 if (instr->RnField() == PP) {
309 // PP is untagged on ARM64.
310 ASSERT(Utils::IsAligned(offset, 8));
311 intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
312 return ObjectAtPoolIndex(code, index, obj);
313 } else if (instr->RnField() == THR) {
314 return Thread::ObjectAtOffset(offset, obj);
315 }
316 if (instr->RnField() == instr->RtField()) {
317 Instr* add = Instr::At(pc - Instr::kInstrSize);
318 if (add->IsAddSubImmOp() && (add->SFField() != 0) &&
319 (instr->Bit(22) == 1) && (add->RdField() == add->RtField())) {
320 offset = (add->Imm12Field() << 12) + offset;
321 if (add->RnField() == PP) {
322 // PP is untagged on ARM64.
323 ASSERT(Utils::IsAligned(offset, 8));
324 intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
325 return ObjectAtPoolIndex(code, index, obj);
326 } else if (add->RnField() == THR) {
327 return Thread::ObjectAtOffset(offset, obj);
328 }
329 }
330 }
331 // TODO(rmacnak): Loads with offsets beyond 24 bits.
332 }
333
334 if (instr->IsAddSubImmOp() && (instr->SFField() != 0) &&
335 (instr->RnField() == NULL_REG)) {
336 uint32_t imm = (instr->Bit(22) == 1) ? (instr->Imm12Field() << 12)
337 : (instr->Imm12Field());
338 if (imm == kTrueOffsetFromNull) {
339 *obj = Object::bool_true().ptr();
340 return true;
341 } else if (imm == kFalseOffsetFromNull) {
342 *obj = Object::bool_false().ptr();
343 return true;
344 }
345 }
346
347 return false;
348}
349
350// Encodes a load sequence ending at 'end'. Encodes a fixed length two
351// instruction load from the pool pointer in PP using the destination
352// register reg as a temporary for the base address.
353// Assumes that the location has already been validated for patching.
354void InstructionPattern::EncodeLoadWordFromPoolFixed(uword end,
355 int32_t offset) {
356 uword start = end - Instr::kInstrSize;
357 Instr* instr = Instr::At(start);
358 const int32_t upper12 = offset & 0x00fff000;
359 const int32_t lower12 = offset & 0x00000fff;
360 ASSERT((offset & 0xff000000) == 0); // Can't encode > 24 bits.
361 ASSERT(((lower12 >> 3) << 3) == lower12); // 8-byte aligned.
362 instr->SetImm12Bits(instr->InstructionBits(), lower12 >> 3);
363
364 start -= Instr::kInstrSize;
365 instr = Instr::At(start);
366 instr->SetImm12Bits(instr->InstructionBits(), upper12 >> 12);
367 instr->SetInstructionBits(instr->InstructionBits() | B22);
368}
369
370CodePtr CallPattern::TargetCode() const {
371 return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
372}
373
374void CallPattern::SetTargetCode(const Code& target) const {
375 object_pool_.SetObjectAt(target_code_pool_index_, target);
376 // No need to flush the instruction cache, since the code is not modified.
377}
378
379ObjectPtr ICCallPattern::Data() const {
380 return object_pool_.ObjectAt(data_pool_index_);
381}
382
383void ICCallPattern::SetData(const Object& data) const {
384 ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
385 object_pool_.SetObjectAt(data_pool_index_, data);
386}
387
388CodePtr ICCallPattern::TargetCode() const {
389 return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
390}
391
392void ICCallPattern::SetTargetCode(const Code& target) const {
393 object_pool_.SetObjectAt(target_pool_index_, target);
394 // No need to flush the instruction cache, since the code is not modified.
395}
396
397SwitchableCallPatternBase::SwitchableCallPatternBase(
398 const ObjectPool& object_pool)
399 : object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
400
401ObjectPtr SwitchableCallPatternBase::data() const {
402 return object_pool_.ObjectAt(data_pool_index_);
403}
404
405void SwitchableCallPatternBase::SetData(const Object& data) const {
406 ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
407 object_pool_.SetObjectAt(data_pool_index_, data);
408}
409
410SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
411 : SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
412 ASSERT(code.ContainsInstructionAt(pc));
413 // Last instruction: blr lr.
414 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
415
416 Register ic_data_reg, code_reg;
417 intptr_t pool_index;
418 InstructionPattern::DecodeLoadDoubleWordFromPool(
419 pc - 2 * Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
420 ASSERT(ic_data_reg == R5);
421 ASSERT(IsBranchLinkScratch(code_reg));
422
423 data_pool_index_ = pool_index;
424 target_pool_index_ = pool_index + 1;
425}
426
427uword SwitchableCallPattern::target_entry() const {
428 return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
429 .MonomorphicEntryPoint();
430}
431
432void SwitchableCallPattern::SetTarget(const Code& target) const {
433 ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
434 object_pool_.SetObjectAt(target_pool_index_, target);
435}
436
437BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
438 : SwitchableCallPatternBase(ObjectPool::Handle(
439 IsolateGroup::Current()->object_store()->global_object_pool())) {
440 // Last instruction: blr lr.
441 ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
442
443 Register ic_data_reg, code_reg;
444 intptr_t pool_index;
445 InstructionPattern::DecodeLoadDoubleWordFromPool(
446 pc - Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
447 ASSERT(ic_data_reg == R5);
448 ASSERT(code_reg == LINK_REGISTER);
449
450 data_pool_index_ = pool_index;
451 target_pool_index_ = pool_index + 1;
452}
453
454uword BareSwitchableCallPattern::target_entry() const {
455 return object_pool_.RawValueAt(target_pool_index_);
456}
457
458void BareSwitchableCallPattern::SetTarget(const Code& target) const {
459 ASSERT(object_pool_.TypeAt(target_pool_index_) ==
460 ObjectPool::EntryType::kImmediate);
461 object_pool_.SetRawValueAt(target_pool_index_,
462 target.MonomorphicEntryPoint());
463}
464
465ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
466
467bool ReturnPattern::IsValid() const {
468 Instr* bx_lr = Instr::At(pc_);
470 const int32_t instruction = RET | (static_cast<int32_t>(crn) << kRnShift);
471 return bx_lr->InstructionBits() == instruction;
472}
473
474bool PcRelativeCallPattern::IsValid() const {
475 // bl <offset>
476 const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
477 const uint32_t branch_link = 0x25;
478 return (word >> 26) == branch_link;
479}
480
481bool PcRelativeTailCallPattern::IsValid() const {
482 // b <offset>
483 const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
484 const uint32_t branch_link = 0x5;
485 return (word >> 26) == branch_link;
486}
487
489#if !defined(DART_PRECOMPILED_RUNTIME)
490 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
491 pattern[0] = kAdrEncoding;
492 pattern[1] = kMovzEncoding;
493 pattern[2] = kAddTmpTmp2;
494 pattern[3] = kJumpEncoding;
495 set_distance(0);
496#else
497 UNREACHABLE();
498#endif
499}
500
502#if !defined(DART_PRECOMPILED_RUNTIME)
503 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
504 const uint32_t adr = pattern[0];
505 const uint32_t movz = pattern[1];
506 const uint32_t lower16 =
507 (((adr >> 5) & ((1 << 19) - 1)) << 2) | ((adr >> 29) & 0x3);
508 const uint32_t higher16 = (movz >> kImm16Shift) & 0xffff;
509 return (higher16 << 16) | lower16;
510#else
511 UNREACHABLE();
512 return 0;
513#endif
514}
515
516void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
517#if !defined(DART_PRECOMPILED_RUNTIME)
518 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
519 uint32_t low16 = distance & 0xffff;
520 uint32_t high16 = (distance >> 16) & 0xffff;
521 pattern[0] = kAdrEncoding | ((low16 & 0x3) << 29) | ((low16 >> 2) << 5);
522 pattern[1] = kMovzEncoding | (high16 << kImm16Shift);
523 ASSERT(IsValid());
524#else
525 UNREACHABLE();
526#endif
527}
528
529bool PcRelativeTrampolineJumpPattern::IsValid() const {
530#if !defined(DART_PRECOMPILED_RUNTIME)
531 const uint32_t adr_mask = (3 << 29) | (((1 << 19) - 1) << 5);
532 const uint32_t movz_mask = 0xffff << 5;
533 uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
534 return ((pattern[0] & ~adr_mask) == kAdrEncoding) &&
535 ((pattern[1] & ~movz_mask) == kMovzEncoding) &&
536 (pattern[2] == kAddTmpTmp2) && (pattern[3] == kJumpEncoding);
537#else
538 UNREACHABLE();
539 return false;
540#endif
541}
542
543intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
544 // Calls to the type testing stubs look like:
545 // ldr R9, ...
546 // ldr Rn, [PP+idx]
547 // blr R9
548 // or
549 // ldr Rn, [PP+idx]
550 // blr pc+<offset>
551 // where Rn = TypeTestABI::kSubtypeTestCacheReg.
552
553 // Ensure the caller of the type testing stub (whose return address is [pc_])
554 // branched via `blr R9` or a pc-relative call.
555 uword pc = pc_ - Instr::kInstrSize;
556 const uword blr_r9 = 0xd63f0120;
557 if (*reinterpret_cast<uint32_t*>(pc) != blr_r9) {
558 PcRelativeCallPattern pattern(pc);
559 RELEASE_ASSERT(pattern.IsValid());
560 }
561
562 const uword load_instr_end = pc;
563
564 Register reg;
565 intptr_t pool_index = -1;
566 InstructionPattern::DecodeLoadWordFromPool(load_instr_end, &reg, &pool_index);
567 ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
568 return pool_index;
569}
570
571} // namespace dart
572
573#endif // defined TARGET_ARCH_ARM64
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
CallPattern(uword pc, const Code &code)
#define LINK_REGISTER
#define ASSERT(E)
glong glong end
uint8_t value
uint32_t * target
Definition: dart_vm.cc:33
bool ObjectAtPoolIndex(const Code &code, intptr_t index, Object *obj)
Definition: instructions.cc:14
const Register THR
static constexpr intptr_t kFalseOffsetFromNull
const Register NULL_REG
static constexpr intptr_t kTrueOffsetFromNull
Register ConcreteRegister(LinkRegister)
uintptr_t uword
Definition: globals.h:501
intptr_t word
Definition: globals.h:500
const Register CODE_REG
bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code &code, Object *obj)
const Register TMP
@ kRnShift
const Register PP
void(* NativeFunction)(NativeArguments *arguments)
@ kHeapObjectTag
struct PathData * Data(SkPath *path)
Definition: path_ops.cc:52
void Initialize(zx::channel directory_request, std::optional< zx::eventpair > view_ref)
Initializes Dart bindings for the Fuchsia application model.
Definition: fuchsia.cc:103
SeparatedVector2 offset
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63