Rewrite use/def masks to support 128 bits.
Reduce LIR memory usage by holding masks by pointers in the
LIR rather than directly and using pre-defined const masks
for the common cases, allocating very few on the arena.
Change-Id: I0f6d27ef6867acd157184c8c74f9612cebfe6c16
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 6a6b0f6..01afc99 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -102,17 +102,14 @@
#define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
#define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
-enum ArmResourceEncodingPos {
- kArmGPReg0 = 0,
- kArmRegLR = 30,
- kArmRegSP = 31,
- kArmFPReg0 = 32,
- kArmRegEnd = 64,
+enum Arm64ResourceEncodingPos {
+ kArm64GPReg0 = 0,
+ kArm64RegLR = 30,
+ kArm64RegSP = 31,
+ kArm64FPReg0 = 32,
+ kArm64RegEnd = 64,
};
-#define ENCODE_ARM_REG_SP (1ULL << kArmRegSP)
-#define ENCODE_ARM_REG_LR (1ULL << kArmRegLR)
-
#define IS_SIGNED_IMM(size, value) \
((value) >= -(1 << ((size) - 1)) && (value) < (1 << ((size) - 1)))
#define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)