10template <
typename Sub
class,
typename Element>
21 intptr_t
length()
const {
return length_; }
58 for (
const auto& op :
ops) {
59 if (op.kind != ParallelMoveResolver::OpKind::kNop)
count++;
64 for (
const auto& op :
ops) {
65 if (op.kind != ParallelMoveResolver::OpKind::kNop) {
77 DISALLOW_COPY_AND_ASSIGN(MoveSchedule);
90 BuildInitialMoveList(parallel_move);
93 TokenPosition::kParallelMove, parallel_move->
inlining_id());
94 for (intptr_t
i = 0;
i < moves_.length(); ++
i) {
100 PerformMove(move_source,
i);
105 for (
const auto& move : moves_) {
106 if (!move.IsEliminated()) {
107 ASSERT(move.src().IsConstant());
108 scheduled_ops_.
Add({OpKind::kMove, move});
115 scheduled_ops_.
Clear();
118void ParallelMoveResolver::BuildInitialMoveList(
124 for (
int i = 0;
i < parallel_move->
NumMoves();
i++) {
130void ParallelMoveResolver::PerformMove(
const InstructionSource&
source,
139 ASSERT(!moves_[index].IsPending());
140 ASSERT(!moves_[index].IsRedundant());
145 ASSERT(!moves_[index].
src().IsInvalid());
146 Location destination = moves_[index].MarkPending();
152 for (
int i = 0;
i < moves_.length(); ++
i) {
153 const MoveOperands& other_move = moves_[
i];
154 if (other_move.Blocks(destination) && !other_move.IsPending()) {
170 moves_[index].ClearPending(destination);
174 if (moves_[index].
src().
Equals(destination)) {
175 moves_[index].Eliminate();
182 for (
auto& other_move : moves_) {
183 if (other_move.Blocks(destination)) {
184 ASSERT(other_move.IsPending());
185 AddSwapToSchedule(index);
191 AddMoveToSchedule(index);
194void ParallelMoveResolver::AddMoveToSchedule(
int index) {
195 auto& move = moves_[index];
196 scheduled_ops_.
Add({OpKind::kMove, move});
200void ParallelMoveResolver::AddSwapToSchedule(
int index) {
201 auto& move = moves_[index];
203 const auto destination = move.
dest();
205 scheduled_ops_.
Add({OpKind::kSwap, move});
214 for (
auto& other_move : moves_) {
215 if (other_move.Blocks(
source)) {
216 other_move.set_src(destination);
217 }
else if (other_move.Blocks(destination)) {
218 other_move.set_src(
source);
225 for (intptr_t
i = 0;
i < move_schedule.length();
i++) {
227 const auto& op = move_schedule[
i];
229 case ParallelMoveResolver::OpKind::kNop:
233 case ParallelMoveResolver::OpKind::kMove:
234 EmitMove(op.operands);
236 case ParallelMoveResolver::OpKind::kSwap:
237 EmitSwap(op.operands);
243void ParallelMoveEmitter::EmitMove(
const MoveOperands& move) {
246#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
247 dst = compiler_->RebaseIfImprovesAddressing(
dst);
248 src = compiler_->RebaseIfImprovesAddressing(
src);
250 ParallelMoveEmitter::TemporaryAllocator temp(
this,
kNoRegister);
256 ASSERT(!temp.DidAllocateTemporary() || !loc.HasStackIndex() ||
257 loc.base_reg() !=
SPREG);
262bool ParallelMoveEmitter::IsScratchLocation(
Location loc) {
264 for (intptr_t
i = current_move_;
i < move_schedule.length();
i++) {
265 const auto& op = move_schedule[
i];
266 if (op.operands.src().Equals(loc) ||
267 (op.kind == ParallelMoveResolver::OpKind::kSwap &&
268 op.operands.dest().Equals(loc))) {
273 for (intptr_t
i = current_move_ + 1;
i < move_schedule.length();
i++) {
274 const auto& op = move_schedule[
i];
275 if (op.kind == ParallelMoveResolver::OpKind::kMove &&
276 op.operands.dest().Equals(loc)) {
284intptr_t ParallelMoveEmitter::AllocateScratchRegister(
287 intptr_t first_free_register,
288 intptr_t last_free_register,
294 intptr_t scratch = -1;
295 for (intptr_t reg = first_free_register; reg <= last_free_register; reg++) {
296 if ((((1 << reg) & blocked_mask) == 0) &&
305 for (intptr_t reg = first_free_register; reg <= last_free_register; reg++) {
306 if (((1 << reg) & blocked_mask) == 0) {
318ParallelMoveEmitter::ScratchFpuRegisterScope::ScratchFpuRegisterScope(
319 ParallelMoveEmitter* emitter,
330 emitter->SpillFpuScratch(reg_);
334ParallelMoveEmitter::ScratchFpuRegisterScope::~ScratchFpuRegisterScope() {
336 emitter_->RestoreFpuScratch(reg_);
340ParallelMoveEmitter::TemporaryAllocator::TemporaryAllocator(
341 ParallelMoveEmitter* emitter,
348Register ParallelMoveEmitter::TemporaryAllocator::AllocateTemporary() {
352 if (emitter_->compiler_->intrinsic_mode()) {
355#if !defined(TARGET_ARCH_IA32)
366 emitter_->SpillScratch(reg_);
373void ParallelMoveEmitter::TemporaryAllocator::ReleaseTemporary() {
375 emitter_->RestoreScratch(reg_);
380ParallelMoveEmitter::ScratchRegisterScope::ScratchRegisterScope(
381 ParallelMoveEmitter* emitter,
383 : allocator_(emitter, blocked) {
384 reg_ = allocator_.AllocateTemporary();
387ParallelMoveEmitter::ScratchRegisterScope::~ScratchRegisterScope() {
388 allocator_.ReleaseTemporary();
395 ASSERT(schedule !=
nullptr);
397 s->Write<intptr_t>(
len);
398 for (intptr_t
i = 0;
i <
len; ++
i) {
399 const auto& op = (*schedule)[
i];
400 s->Write<uint8_t>(
static_cast<uint8_t
>(op.kind));
401 op.operands.Write(
s);
408 const intptr_t
len =
d->Read<intptr_t>();
410 for (intptr_t
i = 0;
i <
len; ++
i) {
412 static_cast<ParallelMoveResolver::OpKind
>(
d->Read<uint8_t>());
static Subclass & Allocate(intptr_t length)
const Element * data() const
Element & operator[](intptr_t i)
const Element * end() const
FixedArray(intptr_t length)
const Element * begin() const
const Element & operator[](intptr_t i) const
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
virtual intptr_t inlining_id() const
static Location MachineRegisterLocation(Kind kind, intptr_t reg)
bool IsEliminated() const
static const MoveSchedule & From(const GrowableArray< ParallelMoveResolver::Op > &ops)
intptr_t NumMoves() const
const MoveSchedule & move_schedule() const
MoveOperands * MoveOperandsAt(intptr_t index) const
void set_move_schedule(const MoveSchedule &schedule)
void Resolve(ParallelMoveInstr *parallel_move)
static Thread * Current()
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
void * AllocUnsafe(intptr_t size)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
static bool Equals(const Object &expected, const Object &actual)
const FpuRegister kNoFpuRegister
static uword RegMaskBit(Register reg)
const RegList kReservedCpuRegisters
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const int kNumberOfFpuRegisters
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const AbstractType * Read(FlowGraphDeserializer *d)
void Write(FlowGraphSerializer *s, const AbstractType *x)
#define OPEN_ARRAY_START(type, align)