Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Namespaces | Macros
assembler_ia32.h File Reference
#include <functional>
#include "platform/assert.h"
#include "platform/utils.h"
#include "vm/compiler/assembler/assembler_base.h"
#include "vm/constants.h"
#include "vm/constants_x86.h"
#include "vm/pointer_tagging.h"

Go to the source code of this file.

Classes

class  dart::compiler::Immediate
 
class  dart::compiler::Operand
 
class  dart::compiler::Address
 
class  dart::compiler::FieldAddress
 
class  dart::compiler::Assembler
 

Namespaces

namespace  dart
 
namespace  dart::compiler
 

Macros

#define ALU_OPS(F)
 
#define DECLARE_ALU(op, opcode, opcode2, modrm_opcode)
 

Macro Definition Documentation

◆ ALU_OPS

#define ALU_OPS (   F)
Value:
F(and, 0x23, 0x21, 4) \
F(or, 0x0b, 0x09, 1) \
F(xor, 0x33, 0x31, 6) \
F(add, 0x03, 0x01, 0) \
F(adc, 0x13, 0x11, 2) \
F(sub, 0x2b, 0x29, 5) \
F(sbb, 0x1b, 0x19, 3) \
F(cmp, 0x3b, 0x39, 7)
#define F(x)

Definition at line 475 of file assembler_ia32.h.

487 { Alu(4, opcode, dst, src); } \
488 void op##w(Register dst, Register src) { Alu(2, opcode, dst, src); } \
489 void op##l(Register dst, const Address& src) { Alu(4, opcode, dst, src); } \
490 void op##w(Register dst, const Address& src) { Alu(2, opcode, dst, src); } \
491 void op##l(const Address& dst, Register src) { Alu(4, opcode2, dst, src); } \
492 void op##w(const Address& dst, Register src) { Alu(2, opcode2, dst, src); } \
493 void op##l(Register dst, const Immediate& imm) { \
494 Alu(modrm_opcode, dst, imm); \
495 } \
496 void op##l(const Address& dst, const Immediate& imm) { \
497 Alu(modrm_opcode, dst, imm); \
498 }
499
501
502#undef DECLARE_ALU
503#undef ALU_OPS
504
505 void cdq();
506
507 void idivl(Register reg);
508
509 void divl(Register reg);
510
511 void imull(Register dst, Register src);
512 void imull(Register reg, const Immediate& imm);
513 void imull(Register reg, const Address& address);
514
515 void imull(Register reg);
516 void imull(const Address& address);
517
518 void mull(Register reg);
519 void mull(const Address& address);
520
521 void incl(Register reg);
522 void incl(const Address& address);
523
524 void decl(Register reg);
525 void decl(const Address& address);
526
527 void shll(Register reg, const Immediate& imm);
528 void shll(Register operand, Register shifter);
529 void shll(const Address& operand, Register shifter);
530 void shrl(Register reg, const Immediate& imm);
531 void shrl(Register operand, Register shifter);
532 void sarl(Register reg, const Immediate& imm);
533 void sarl(Register operand, Register shifter);
534 void sarl(const Address& address, Register shifter);
535 void shldl(Register dst, Register src, Register shifter);
536 void shldl(Register dst, Register src, const Immediate& imm);
537 void shldl(const Address& operand, Register src, Register shifter);
538 void shrdl(Register dst, Register src, Register shifter);
539 void shrdl(Register dst, Register src, const Immediate& imm);
540 void shrdl(const Address& dst, Register src, Register shifter);
541
542 void negl(Register reg);
543 void notl(Register reg);
544
545 void bsfl(Register dst, Register src);
546 void bsrl(Register dst, Register src);
547 void popcntl(Register dst, Register src);
548 void lzcntl(Register dst, Register src);
549
550 void bt(Register base, Register offset);
551 void bt(Register base, int bit);
552
553 void enter(const Immediate& imm);
554 void leave();
555
556 void ret();
557 void ret(const Immediate& imm);
558
559 // 'size' indicates size in bytes and must be in the range 1..8.
560 void nop(int size = 1);
561 void int3();
562 void hlt();
563
564 void j(Condition condition, Label* label, JumpDistance distance = kFarJump);
565 void j(Condition condition, const ExternalLabel* label);
566
567 void jmp(Register reg);
568 void jmp(const Address& address);
569 void jmp(Label* label, JumpDistance distance = kFarJump);
570 void jmp(const ExternalLabel* label);
571
572 void lock();
573 void cmpxchgl(const Address& address, Register reg);
574
575 void cld();
576 void std();
577
578 void cpuid();
579
580 /*
581 * Macros for High-level operations and implemented on all architectures.
582 */
583
584 void Ret() { ret(); }
585
586 // Sets the return address to [value] as if there was a call.
587 // On IA32 pushes [value].
588 void SetReturnAddress(Register value) { PushRegister(value); }
589
590 void PushValueAtOffset(Register base, int32_t offset) {
591 pushl(Address(base, offset));
592 }
593
594 void CompareRegisters(Register a, Register b);
595 void CompareObjectRegisters(Register a, Register b) {
596 CompareRegisters(a, b);
597 }
598 void BranchIf(Condition condition,
599 Label* label,
600 JumpDistance distance = kFarJump) {
601 j(condition, label, distance);
602 }
603 void BranchIfZero(Register src,
604 Label* label,
605 JumpDistance distance = kFarJump) {
606 cmpl(src, Immediate(0));
607 j(ZERO, label, distance);
608 }
609 void BranchIfBit(Register rn,
610 intptr_t bit_number,
611 Condition condition,
612 Label* label,
613 JumpDistance distance = kFarJump) {
614 testl(rn, Immediate(1 << bit_number));
615 j(condition, label, distance);
616 }
617
618 // Arch-specific Load to choose the right operation for [sz].
619 void Load(Register dst,
620 const Address& address,
621 OperandSize sz = kFourBytes) override;
622 void LoadIndexedPayload(Register dst,
623 Register base,
624 int32_t payload_offset,
625 Register index,
626 ScaleFactor scale,
627 OperandSize sz = kFourBytes) override {
628 Load(dst, FieldAddress(base, index, scale, payload_offset), sz);
629 }
630 void Store(Register src,
631 const Address& address,
632 OperandSize sz = kFourBytes) override;
633 void Store(const Object& value, const Address& address);
634 void StoreZero(const Address& address, Register temp = kNoRegister) {
635 movl(address, Immediate(0));
636 }
637 void LoadFromStack(Register dst, intptr_t depth);
638 void StoreToStack(Register src, intptr_t depth);
639 void CompareToStack(Register src, intptr_t depth);
640 void LoadMemoryValue(Register dst, Register base, int32_t offset) {
641 movl(dst, Address(base, offset));
642 }
643 void StoreMemoryValue(Register src, Register base, int32_t offset) {
644 movl(Address(base, offset), src);
645 }
646
647 void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
648 movsd(dst, Address(base, offset));
649 }
650 void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset) {
651 movsd(Address(base, offset), src);
652 }
653 void MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
654 if (src != dst) {
655 movaps(dst, src);
656 }
657 }
658
659 void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
660 movups(dst, Address(base, offset));
661 }
662 void StoreUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
663 movups(Address(base, offset), dst);
664 }
665 void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
666 if (src != dst) {
667 movaps(dst, src);
668 }
669 }
670
671 void LoadAcquire(Register dst,
672 const Address& address,
673 OperandSize size = kFourBytes) override {
674 // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
675 // with other loads).
676 Load(dst, address, size);
677#if defined(TARGET_USES_THREAD_SANITIZER)
678#error No support for TSAN on IA32.
679#endif
680 }
681 void StoreRelease(Register src,
682 const Address& address,
683 OperandSize size = kFourBytes) override {
684 // On intel stores have store-release behavior (i.e. stores are not
685 // re-ordered with other stores).
686 Store(src, address, size);
687#if defined(TARGET_USES_THREAD_SANITIZER)
688#error No support for TSAN on IA32.
689#endif
690 }
691
692 void CompareWithMemoryValue(Register value,
693 Address address,
694 OperandSize size = kFourBytes) override {
695 ASSERT_EQUAL(size, kFourBytes);
696 cmpl(value, address);
697 }
698
699 void ExtendValue(Register to, Register from, OperandSize sz) override;
700 void PushRegister(Register r);
701 void PopRegister(Register r);
702
703 void PushRegisterPair(Register r0, Register r1) {
704 PushRegister(r1);
705 PushRegister(r0);
706 }
707 void PopRegisterPair(Register r0, Register r1) {
708 PopRegister(r0);
709 PopRegister(r1);
710 }
711
712 void PushRegistersInOrder(std::initializer_list<Register> regs);
713
714 void AddImmediate(Register reg, const Immediate& imm);
715 void AddImmediate(Register reg, int32_t value) {
716 AddImmediate(reg, Immediate(value));
717 }
718 void AddImmediate(Register dest, Register src, int32_t value);
719 void AddRegisters(Register dest, Register src) { addl(dest, src); }
720 // [dest] = [src] << [scale] + [value].
721 void AddScaled(Register dest,
722 Register src,
723 ScaleFactor scale,
724 int32_t value) {
725 leal(dest, Address(src, scale, value));
726 }
727
728 void SubImmediate(Register reg, const Immediate& imm);
729 void SubRegisters(Register dest, Register src) { subl(dest, src); }
730 void MulImmediate(Register reg,
731 int32_t imm,
732 OperandSize width = kFourBytes) override {
733 ASSERT(width == kFourBytes);
734 if (Utils::IsPowerOfTwo(imm)) {
735 const intptr_t shift = Utils::ShiftForPowerOfTwo(imm);
736 shll(reg, Immediate(shift));
737 } else {
738 imull(reg, Immediate(imm));
739 }
740 }
741 void AndImmediate(Register dst, int32_t value) override {
742 andl(dst, Immediate(value));
743 }
744 void AndImmediate(Register dst, Register src, int32_t value) {
745 MoveRegister(dst, src);
746 andl(dst, Immediate(value));
747 }
748 void AndRegisters(Register dst,
749 Register src1,
750 Register src2 = kNoRegister) override;
751 void OrImmediate(Register dst, int32_t value) { orl(dst, Immediate(value)); }
752 void LslImmediate(Register dst, int32_t shift) {
753 shll(dst, Immediate(shift));
754 }
755 void LslRegister(Register dst, Register shift) override {
756 ASSERT_EQUAL(shift, ECX); // IA32 does not have a TMP.
757 shll(dst, shift);
758 }
759 void LsrImmediate(Register dst, int32_t shift) override {
760 shrl(dst, Immediate(shift));
761 }
762
763 void CompareImmediate(Register reg,
764 int32_t immediate,
765 OperandSize width = kFourBytes) override {
766 ASSERT_EQUAL(width, kFourBytes);
767 cmpl(reg, Immediate(immediate));
768 }
769
770 void LoadImmediate(Register reg, int32_t immediate) override {
771 if (immediate == 0) {
772 xorl(reg, reg);
773 } else {
774 movl(reg, Immediate(immediate));
775 }
776 }
777
778 void LoadImmediate(Register reg, Immediate immediate) {
779 LoadImmediate(reg, immediate.value());
780 }
781
782 void LoadSImmediate(XmmRegister dst, float value);
783 void LoadDImmediate(XmmRegister dst, double value);
784 void LoadQImmediate(XmmRegister dst, simd128_value_t value);
785
786 void Drop(intptr_t stack_elements);
787
788 void LoadIsolate(Register dst);
789 void LoadIsolateGroup(Register dst);
790
791 void LoadUniqueObject(Register dst, const Object& object) {
792 LoadObject(dst, object, /*movable_referent=*/true);
793 }
794
795 void LoadObject(Register dst,
796 const Object& object,
797 bool movable_referent = false);
798
799 // If 'object' is a large Smi, xor it with a per-assembler cookie value to
800 // prevent user-controlled immediates from appearing in the code stream.
801 void LoadObjectSafely(Register dst, const Object& object);
802
803 void PushObject(const Object& object);
804 void CompareObject(Register reg, const Object& object);
805
806 void StoreObjectIntoObjectNoBarrier(
807 Register object,
808 const Address& dest,
809 const Object& value,
810 MemoryOrder memory_order = kRelaxedNonAtomic,
811 OperandSize size = kFourBytes) override;
812
813 void StoreBarrier(Register object,
814 Register value,
815 CanBeSmi can_be_smi,
816 Register scratch) override;
817 void ArrayStoreBarrier(Register object,
818 Register slot,
819 Register value,
820 CanBeSmi can_be_smi,
821 Register scratch) override;
822 void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
823
824 // Stores a non-tagged value into a heap object.
825 void StoreInternalPointer(Register object,
826 const Address& dest,
827 Register value);
828
829 // Stores a Smi value into a heap object field that always contains a Smi.
830 void StoreIntoSmiField(const Address& dest, Register value);
831 void ZeroInitSmiField(const Address& dest);
832 // Increments a Smi field. Leaves flags in same state as an 'addl'.
833 void IncrementSmiField(const Address& dest, int32_t increment);
834
835 void DoubleNegate(XmmRegister d);
836 void FloatNegate(XmmRegister f);
837
838 void DoubleAbs(XmmRegister reg);
839
840 void LockCmpxchgl(const Address& address, Register reg) {
841 lock();
842 cmpxchgl(address, reg);
843 }
844
845 void EnterFrame(intptr_t frame_space);
846 void LeaveFrame();
847 void ReserveAlignedFrameSpace(intptr_t frame_space);
848
849 void MonomorphicCheckedEntryJIT();
850 void MonomorphicCheckedEntryAOT();
851 void BranchOnMonomorphicCheckedEntryJIT(Label* label);
852
853 void CombineHashes(Register dst, Register other) override;
854 void FinalizeHashForSize(intptr_t bit_size,
855 Register dst,
856 Register scratch = kNoRegister) override;
857
858 // In debug mode, this generates code to check that:
859 // FP + kExitLinkSlotFromEntryFp == SP
860 // or triggers breakpoint otherwise.
861 //
862 // Clobbers EAX.
863 void EmitEntryFrameVerification();
864
865 // Transitions safepoint and Thread state between generated and native code.
866 // Updates top-exit-frame info, VM tag and execution-state. Leaves/enters a
867 // safepoint.
868 //
869 // Require a temporary register 'tmp'.
870 // Clobber all non-CPU registers (e.g. XMM registers and the "FPU stack").
871 // However XMM0 is saved for convenience.
872 void TransitionGeneratedToNative(Register destination_address,
873 Register new_exit_frame,
874 Register new_exit_through_ffi,
875 bool enter_safepoint);
876 void TransitionNativeToGenerated(Register scratch,
877 bool exit_safepoint,
878 bool ignore_unwind_in_progress = false);
879 void EnterFullSafepoint(Register scratch);
880 void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
881
882 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
883 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
884
885 void Call(const Code& code,
886 bool movable_target = false,
887 CodeEntryKind entry_kind = CodeEntryKind::kNormal);
888 // Will not clobber any registers and can therefore be called with 5 live
889 // registers.
890 void CallVmStub(const Code& code);
891
892 void Call(Address target) { call(target); }
893
894 void CallCFunction(Address target) { Call(target); }
895
896 void CallCFunction(Register target) { call(target); }
897
898 void Jmp(const Code& code);
899 void J(Condition condition, const Code& code);
900
901 void RangeCheck(Register value,
902 Register temp,
903 intptr_t low,
904 intptr_t high,
905 RangeCheckCondition condition,
906 Label* target) override;
907
908 /*
909 * Loading and comparing classes of objects.
910 */
911 void LoadClassId(Register result, Register object);
912
913 void LoadClassById(Register result, Register class_id);
914
915 void CompareClassId(Register object, intptr_t class_id, Register scratch);
916
917 void LoadClassIdMayBeSmi(Register result, Register object);
918 void LoadTaggedClassIdMayBeSmi(Register result, Register object);
919 void EnsureHasClassIdInDEBUG(intptr_t cid,
920 Register src,
921 Register scratch,
922 bool can_be_null = false) override;
923
924 void SmiUntagOrCheckClass(Register object,
925 intptr_t class_id,
926 Register scratch,
927 Label* is_smi);
928
929 static bool AddressCanHoldConstantIndex(const Object& constant,
930 bool is_external,
931 intptr_t cid,
932 intptr_t index_scale);
933
934 static Address ElementAddressForIntIndex(bool is_external,
935 intptr_t cid,
936 intptr_t index_scale,
937 Register array,
938 intptr_t index,
939 intptr_t extra_disp = 0);
940
941 static Address ElementAddressForRegIndex(bool is_external,
942 intptr_t cid,
943 intptr_t index_scale,
944 bool index_unboxed,
945 Register array,
946 Register index,
947 intptr_t extra_disp = 0);
948
949 void LoadStaticFieldAddress(Register address,
950 Register field,
951 Register scratch) {
952 LoadFieldFromOffset(scratch, field,
953 target::Field::host_offset_or_field_id_offset());
954 const intptr_t field_table_offset =
955 compiler::target::Thread::field_table_values_offset();
956 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
957 static_assert(kSmiTagShift == 1, "adjust scale factor");
958 leal(address, Address(address, scratch, TIMES_HALF_WORD_SIZE, 0));
959 }
960
961 void LoadFieldAddressForRegOffset(Register address,
962 Register instance,
963 Register offset_in_words_as_smi) override {
964 static_assert(kSmiTagShift == 1, "adjust scale factor");
965 leal(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_2, 0));
966 }
967
968 void LoadFieldAddressForOffset(Register address,
969 Register instance,
970 int32_t offset) override {
971 leal(address, FieldAddress(instance, offset));
972 }
973
974 static Address VMTagAddress() {
975 return Address(THR, target::Thread::vm_tag_offset());
976 }
977
978 /*
979 * Misc. functionality
980 */
981 void SmiTag(Register reg) override { addl(reg, reg); }
982
983 void SmiUntag(Register reg) { sarl(reg, Immediate(kSmiTagSize)); }
984
985 // Truncates upper bits.
986 void LoadInt32FromBoxOrSmi(Register result, Register value) override {
987 if (result != value) {
988 MoveRegister(result, value);
989 value = result;
990 }
991 ASSERT(value == result);
992 compiler::Label done;
993 SmiUntag(result); // Leaves CF after SmiUntag.
994 j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
995 // Undo untagging by multiplying value by 2.
996 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
997 COMPILE_ASSERT(kSmiTagShift == 1);
998 movl(result, compiler::Address(result, result, TIMES_1,
999 target::Mint::value_offset()));
1000 Bind(&done);
1001 }
1002
1003 void BranchIfNotSmi(Register reg,
1004 Label* label,
1005 JumpDistance distance = kFarJump) {
1006 testl(reg, Immediate(kSmiTagMask));
1007 j(NOT_ZERO, label, distance);
1008 }
1009
1010 void BranchIfSmi(Register reg,
1011 Label* label,
1012 JumpDistance distance = kFarJump) override {
1013 testl(reg, Immediate(kSmiTagMask));
1014 j(ZERO, label, distance);
1015 }
1016
1017 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
1018 void CompareWords(Register reg1,
1019 Register reg2,
1020 intptr_t offset,
1021 Register count,
1022 Register temp,
1023 Label* equals) override;
1024
1025 void Align(intptr_t alignment, intptr_t offset);
1026 void Bind(Label* label) override;
1027 void Jump(Label* label, JumpDistance distance = kFarJump) {
1028 jmp(label, distance);
1029 }
1030 // Unconditional jump to a given address in register.
1031 void Jump(Register target) { jmp(target); }
1032
1033 // Moves one word from the memory at [from] to the memory at [to].
1034 // Needs a temporary register.
1035 void MoveMemoryToMemory(Address to, Address from, Register tmp);
1036
1037 // Set up a Dart frame on entry with a frame pointer and PC information to
1038 // enable easy access to the RawInstruction object of code corresponding
1039 // to this frame.
1040 // The dart frame layout is as follows:
1041 // ....
1042 // ret PC
1043 // saved EBP <=== EBP
1044 // pc (used to derive the RawInstruction Object of the dart code)
1045 // locals space <=== ESP
1046 // .....
1047 // This code sets this up with the sequence:
1048 // pushl ebp
1049 // movl ebp, esp
1050 // call L
1051 // L: <code to adjust saved pc if there is any intrinsification code>
1052 // .....
1053 void EnterDartFrame(intptr_t frame_size);
1054 void LeaveDartFrame();
1055
1056 // Set up a Dart frame for a function compiled for on-stack replacement.
1057 // The frame layout is a normal Dart frame, but the frame is partially set
1058 // up on entry (it is the frame of the unoptimized code).
1059 void EnterOsrFrame(intptr_t extra_size);
1060
1061 // Set up a stub frame so that the stack traversal code can easily identify
1062 // a stub frame.
1063 // The stub frame layout is as follows:
1064 // ....
1065 // ret PC
1066 // saved EBP
1067 // 0 (used to indicate frame is a stub frame)
1068 // .....
1069 // This code sets this up with the sequence:
1070 // pushl ebp
1071 // movl ebp, esp
1072 // pushl immediate(0)
1073 // .....
1074 void EnterStubFrame();
1075 void LeaveStubFrame();
1076 static constexpr intptr_t kEnterStubFramePushedWords = 2;
1077
1078 // Set up a frame for calling a C function.
1079 // Automatically save the pinned registers in Dart which are not callee-
1080 // saved in the native calling convention.
1081 // Use together with CallCFunction.
1082 void EnterCFrame(intptr_t frame_space);
1083 void LeaveCFrame();
1084
1085 // Instruction pattern from entrypoint is used in dart frame prologs
1086 // to set up the frame and save a PC which can be used to figure out the
1087 // RawInstruction object corresponding to the code running in the frame.
1088 // entrypoint:
1089 // pushl ebp (size is 1 byte)
1090 // movl ebp, esp (size is 2 bytes)
1091 // call L (size is 5 bytes)
1092 // L:
1093 static constexpr intptr_t kEntryPointToPcMarkerOffset = 8;
1094 static intptr_t EntryPointToPcMarkerOffset() {
1095 return kEntryPointToPcMarkerOffset;
1096 }
1097
1098 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1099 // which will allocate in the runtime where tracing occurs.
1100 void MaybeTraceAllocation(intptr_t cid,
1101 Label* trace,
1102 Register temp_reg,
1103 JumpDistance distance = JumpDistance::kFarJump);
1104
1105 void TryAllocateObject(intptr_t cid,
1106 intptr_t instance_size,
1107 Label* failure,
1108 JumpDistance distance,
1109 Register instance_reg,
1110 Register temp_reg) override;
1111
1112 void TryAllocateArray(intptr_t cid,
1113 intptr_t instance_size,
1114 Label* failure,
1115 JumpDistance distance,
1116 Register instance,
1117 Register end_address,
1118 Register temp);
1119
1120 void CheckAllocationCanary(Register top) {
1121#if defined(DEBUG)
1122 Label okay;
1123 cmpl(Address(top, 0), Immediate(kAllocationCanary));
1124 j(EQUAL, &okay, Assembler::kNearJump);
1125 Stop("Allocation canary");
1126 Bind(&okay);
1127#endif
1128 }
1129 void WriteAllocationCanary(Register top) {
1130#if defined(DEBUG)
1131 movl(Address(top, 0), Immediate(kAllocationCanary));
1132#endif
1133 }
1134
1135 // Copy [size] bytes from [src] address to [dst] address.
1136 // [size] should be a multiple of word size.
1137 // Clobbers [src], [dst], [size] and [temp] registers.
1138 // IA32 requires fixed registers for memory copying:
1139 // [src] = ESI, [dst] = EDI, [size] = ECX.
1140 void CopyMemoryWords(Register src,
1141 Register dst,
1142 Register size,
1143 Register temp = kNoRegister);
1144
1145 // Debugging and bringup support.
1146 void Breakpoint() override { int3(); }
1147
1148 // Check if the given value is an integer value that can be directly
1149 // embedded into the code without additional XORing with jit_cookie.
1150 // We consider 16-bit integers, powers of two and corresponding masks
1151 // as safe values that can be embedded into the code object.
1152 static bool IsSafeSmi(const Object& object) {
1153 if (!target::IsSmi(object)) {
1154 return false;
1155 }
1156 int64_t value;
1157 if (HasIntegerValue(object, &value)) {
1158 return Utils::IsInt(16, value) || Utils::IsPowerOfTwo(value) ||
1159 Utils::IsPowerOfTwo(value + 1);
1160 }
1161 return false;
1162 }
1163 static bool IsSafe(const Object& object) {
1164 return !target::IsSmi(object) || IsSafeSmi(object);
1165 }
1166
1167 Object& GetSelfHandle() const { return code_; }
1168
1169 void PushCodeObject();
1170
1171 private:
1172 void Alu(int bytes, uint8_t opcode, Register dst, Register src);
1173 void Alu(uint8_t modrm_opcode, Register dst, const Immediate& imm);
1174 void Alu(int bytes, uint8_t opcode, Register dst, const Address& src);
1175 void Alu(int bytes, uint8_t opcode, const Address& dst, Register src);
1176 void Alu(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1177
1178 inline void EmitUint8(uint8_t value);
1179 inline void EmitInt32(int32_t value);
1180 inline void EmitRegisterOperand(int rm, int reg);
1181 inline void EmitXmmRegisterOperand(int rm, XmmRegister reg);
1182 inline void EmitFixup(AssemblerFixup* fixup);
1183 inline void EmitOperandSizeOverride();
1184
1185 void EmitOperand(int rm, const Operand& operand);
1186 void EmitImmediate(const Immediate& imm);
1187 void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
1188 void EmitLabel(Label* label, intptr_t instruction_size);
1189 void EmitLabelLink(Label* label);
1190 void EmitNearLabelLink(Label* label);
1191
1192 void EmitGenericShift(int rm, Register reg, const Immediate& imm);
1193 void EmitGenericShift(int rm, const Operand& operand, Register shifter);
1194
1195 int32_t jit_cookie();
1196
1197 int32_t jit_cookie_;
1198 Object& code_;
1199
1201 DISALLOW_COPY_AND_ASSIGN(Assembler);
1202};
1203
1204inline void Assembler::EmitUint8(uint8_t value) {
1205 buffer_.Emit<uint8_t>(value);
1206}
1207
1208inline void Assembler::EmitInt32(int32_t value) {
1209 buffer_.Emit<int32_t>(value);
1210}
1211
1212inline void Assembler::EmitRegisterOperand(int rm, int reg) {
1213 ASSERT(rm >= 0 && rm < 8);
1214 buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
1215}
1216
1217inline void Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) {
1218 EmitRegisterOperand(rm, static_cast<Register>(reg));
1219}
1220
1221inline void Assembler::EmitFixup(AssemblerFixup* fixup) {
1222 buffer_.EmitFixup(fixup);
1223}
1224
1225inline void Assembler::EmitOperandSizeOverride() {
1226 EmitUint8(0x66);
1227}
1228
1229} // namespace compiler
1230} // namespace dart
1231
1232#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_IA32_H_
Align
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static const double J
#define EQUAL(field)
static bool equals(T *a, T *b)
#define ALU_OPS(F)
#define DECLARE_ALU(op, opcode, opcode2, modrm_opcode)
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
VkInstance instance
Definition main.cc:48
static bool b
struct MyStruct a[10]
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition fuchsia.cc:52
bool HasIntegerValue(const dart::Object &object, int64_t *value)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
call(args)
Definition dom.py:159
SIN Vec< N, uint16_t > mull(const Vec< N, uint8_t > &x, const Vec< N, uint8_t > &y)
Definition SkVx.h:906
Definition ref_ptr.h:256
SkScalar w
#define DISALLOW_ALLOCATION()
Definition globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
int32_t width
const Scalar scale
Point offset

◆ DECLARE_ALU

#define DECLARE_ALU (   op,
  opcode,
  opcode2,
  modrm_opcode 
)
Value:
void op##l(Register dst, Register src) { Alu(4, opcode, dst, src); } \
void op##w(Register dst, Register src) { Alu(2, opcode, dst, src); } \
void op##l(Register dst, const Address& src) { Alu(4, opcode, dst, src); } \
void op##w(Register dst, const Address& src) { Alu(2, opcode, dst, src); } \
void op##l(const Address& dst, Register src) { Alu(4, opcode2, dst, src); } \
void op##w(const Address& dst, Register src) { Alu(2, opcode2, dst, src); } \
void op##l(Register dst, const Immediate& imm) { \
Alu(modrm_opcode, dst, imm); \
} \
void op##l(const Address& dst, const Immediate& imm) { \
Alu(modrm_opcode, dst, imm); \
}

Definition at line 486 of file assembler_ia32.h.

487 { Alu(4, opcode, dst, src); } \
488 void op##w(Register dst, Register src) { Alu(2, opcode, dst, src); } \
489 void op##l(Register dst, const Address& src) { Alu(4, opcode, dst, src); } \
490 void op##w(Register dst, const Address& src) { Alu(2, opcode, dst, src); } \
491 void op##l(const Address& dst, Register src) { Alu(4, opcode2, dst, src); } \
492 void op##w(const Address& dst, Register src) { Alu(2, opcode2, dst, src); } \
493 void op##l(Register dst, const Immediate& imm) { \
494 Alu(modrm_opcode, dst, imm); \
495 } \
496 void op##l(const Address& dst, const Immediate& imm) { \
497 Alu(modrm_opcode, dst, imm); \
498 }