Rewrite use/def masks to support 128 bits.

Reduce LIR memory usage by holding masks by pointers in the
LIR rather than directly and using pre-defined const masks
for the common cases, allocating very few on the arena.

Change-Id: I0f6d27ef6867acd157184c8c74f9612cebfe6c16
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 9a868fc..4f9f312 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,6 +48,7 @@
 	dex/quick/mips/utility_mips.cc \
 	dex/quick/mir_to_lir.cc \
 	dex/quick/ralloc_util.cc \
+	dex/quick/resource_mask.cc \
 	dex/quick/x86/assemble_x86.cc \
 	dex/quick/x86/call_x86.cc \
 	dex/quick/x86/fp_x86.cc \
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index f0b4787..55a4c78 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -82,22 +82,6 @@
   kDead,
 };
 
-/*
- * Def/Use encoding in 64-bit use_mask/def_mask.  Low positions used for target-specific
- * registers (and typically use the register number as the position).  High positions
- * reserved for common and abstract resources.
- */
-
-enum ResourceEncodingPos {
-  kMustNotAlias = 63,
-  kHeapRef = 62,          // Default memory reference type.
-  kLiteral = 61,          // Literal pool memory reference.
-  kDalvikReg = 60,        // Dalvik v_reg memory reference.
-  kFPStatus = 59,
-  kCCode = 58,
-  kLowestCommonResource = kCCode
-};
-
 // Shared pseudo opcodes - must be < 0.
 enum LIRPseudoOpcode {
   kPseudoExportedPC = -16,
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index e32e7cb..6272555 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -109,12 +109,6 @@
   kArmRegEnd   = 48,
 };
 
-#define ENCODE_ARM_REG_LIST(N)      (static_cast<uint64_t>(N))
-#define ENCODE_ARM_REG_SP           (1ULL << kArmRegSP)
-#define ENCODE_ARM_REG_LR           (1ULL << kArmRegLR)
-#define ENCODE_ARM_REG_PC           (1ULL << kArmRegPC)
-#define ENCODE_ARM_REG_FPCS_LIST(N) (static_cast<uint64_t>(N) << kArmFPReg16)
-
 enum ArmNativeRegisterPool {
   r0           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
   r1           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index db0731f..5466abd 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -87,7 +87,7 @@
   tab_rec->anchor = switch_branch;
   // Needs to use setflags encoding here
   OpRegRegImm(kOpSub, r_idx, r_idx, 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   OpCondBranch(kCondNe, target);
 }
 
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9c801a5..c977a23 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -63,7 +63,7 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint64_t GetRegMaskCommon(RegStorage reg);
+    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
     void AdjustSpillMask();
     void ClobberCallerSave();
     void FreeCallTemps();
@@ -79,12 +79,13 @@
     int AssignInsnOffsets();
     void AssignOffsets();
     static uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
+    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
     const char* GetTargetInstFmt(int opcode);
     const char* GetTargetInstName(int opcode);
     std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    uint64_t GetPCUseDefEncoding();
+    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
     uint64_t GetTargetInstFlags(int opcode);
     int GetInsnSize(LIR* lir);
     bool IsUnconditionalBranch(LIR* lir);
@@ -217,6 +218,10 @@
     bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
     bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
     void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
+
+    static constexpr ResourceMask GetRegMaskArm(RegStorage reg);
+    static constexpr ResourceMask EncodeArmRegList(int reg_list);
+    static constexpr ResourceMask EncodeArmRegFpcsList(int reg_list);
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 4732e52..916c528 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -224,13 +224,13 @@
     bool cheap_false_val = InexpensiveConstantInt(false_val);
     if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
       OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
-      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+      DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
       LIR* it = OpIT(true_val == 0 ? kCondNe : kCondUge, "");
       LoadConstant(rl_result.reg, false_val);
       OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
     } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
       OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
-      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+      DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
       LIR* it = OpIT(kCondLs, "");
       LoadConstant(rl_result.reg, false_val);
       OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
@@ -882,14 +882,14 @@
     }
     FreeTemp(r_tmp_high);  // Now unneeded
 
-    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
     it = OpIT(kCondEq, "T");
     NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
 
   } else {
     NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
     OpRegReg(kOpSub, r_tmp, rl_expected.reg);
-    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
     it = OpIT(kCondEq, "T");
     NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
   }
@@ -907,7 +907,7 @@
   // result := (tmp1 != 0) ? 0 : 1;
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
-  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   it = OpIT(kCondUlt, "");
   LoadConstant(rl_result.reg, 0); /* cc */
   FreeTemp(r_tmp);  // Now unneeded.
@@ -971,7 +971,7 @@
 LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
   // Combine sub & test using sub setflags encoding here
   OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   return OpCondBranch(c_code, target);
 }
 
@@ -1004,7 +1004,7 @@
 
   // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
   DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = ENCODE_ALL;
+  barrier->u.m.def_mask = &kEncodeAll;
   return ret;
 #else
   return false;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 3b30cde..e1e2d5b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -135,30 +135,32 @@
 /*
  * Decode the register id.
  */
-uint64_t ArmMir2Lir::GetRegMaskCommon(RegStorage reg) {
-  uint64_t seed;
-  int shift;
-  int reg_id = reg.GetRegNum();
-  /* Each double register is equal to a pair of single-precision FP registers */
-  if (reg.IsDouble()) {
-    seed = 0x3;
-    reg_id = reg_id << 1;
-  } else {
-    seed = 1;
-  }
-  /* FP register starts at bit position 16 */
-  shift = reg.IsFloat() ? kArmFPReg0 : 0;
-  /* Expand the double register id into single offset */
-  shift += reg_id;
-  return (seed << shift);
+ResourceMask ArmMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+  return GetRegMaskArm(reg);
 }
 
-uint64_t ArmMir2Lir::GetPCUseDefEncoding() {
-  return ENCODE_ARM_REG_PC;
+constexpr ResourceMask ArmMir2Lir::GetRegMaskArm(RegStorage reg) {
+  return reg.IsDouble()
+      /* Each double register is equal to a pair of single-precision FP registers */
+      ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kArmFPReg0)
+      : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kArmFPReg0 : reg.GetRegNum());
+}
+
+constexpr ResourceMask ArmMir2Lir::EncodeArmRegList(int reg_list) {
+  return ResourceMask::RawMask(static_cast<uint64_t >(reg_list), 0u);
+}
+
+constexpr ResourceMask ArmMir2Lir::EncodeArmRegFpcsList(int reg_list) {
+  return ResourceMask::RawMask(static_cast<uint64_t >(reg_list) << kArmFPReg16, 0u);
+}
+
+ResourceMask ArmMir2Lir::GetPCUseDefEncoding() const {
+  return ResourceMask::Bit(kArmRegPC);
 }
 
 // Thumb2 specific setup.  TODO: inline?:
-void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
+void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                          ResourceMask* use_mask, ResourceMask* def_mask) {
   DCHECK_EQ(cu_->instruction_set, kThumb2);
   DCHECK(!lir->flags.use_def_invalid);
 
@@ -169,70 +171,70 @@
                 REG_DEF_FPCS_LIST0 | REG_DEF_FPCS_LIST2 | REG_USE_PC | IS_IT | REG_USE_LIST0 |
                 REG_USE_LIST1 | REG_USE_FPCS_LIST0 | REG_USE_FPCS_LIST2 | REG_DEF_LR)) != 0) {
     if (flags & REG_DEF_SP) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_SP;
+      def_mask->SetBit(kArmRegSP);
     }
 
     if (flags & REG_USE_SP) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_SP;
+      use_mask->SetBit(kArmRegSP);
     }
 
     if (flags & REG_DEF_LIST0) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+      def_mask->SetBits(EncodeArmRegList(lir->operands[0]));
     }
 
     if (flags & REG_DEF_LIST1) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+      def_mask->SetBits(EncodeArmRegList(lir->operands[1]));
     }
 
     if (flags & REG_DEF_FPCS_LIST0) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+      def_mask->SetBits(EncodeArmRegList(lir->operands[0]));
     }
 
     if (flags & REG_DEF_FPCS_LIST2) {
       for (int i = 0; i < lir->operands[2]; i++) {
-        SetupRegMask(&lir->u.m.def_mask, lir->operands[1] + i);
+        SetupRegMask(def_mask, lir->operands[1] + i);
       }
     }
 
     if (flags & REG_USE_PC) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_PC;
+      use_mask->SetBit(kArmRegPC);
     }
 
     /* Conservatively treat the IT block */
     if (flags & IS_IT) {
-      lir->u.m.def_mask = ENCODE_ALL;
+      *def_mask = kEncodeAll;
     }
 
     if (flags & REG_USE_LIST0) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+      use_mask->SetBits(EncodeArmRegList(lir->operands[0]));
     }
 
     if (flags & REG_USE_LIST1) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+      use_mask->SetBits(EncodeArmRegList(lir->operands[1]));
     }
 
     if (flags & REG_USE_FPCS_LIST0) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+      use_mask->SetBits(EncodeArmRegList(lir->operands[0]));
     }
 
     if (flags & REG_USE_FPCS_LIST2) {
       for (int i = 0; i < lir->operands[2]; i++) {
-        SetupRegMask(&lir->u.m.use_mask, lir->operands[1] + i);
+        SetupRegMask(use_mask, lir->operands[1] + i);
       }
     }
     /* Fixup for kThumbPush/lr and kThumbPop/pc */
     if (opcode == kThumbPush || opcode == kThumbPop) {
-      uint64_t r8Mask = GetRegMaskCommon(rs_r8);
-      if ((opcode == kThumbPush) && (lir->u.m.use_mask & r8Mask)) {
-        lir->u.m.use_mask &= ~r8Mask;
-        lir->u.m.use_mask |= ENCODE_ARM_REG_LR;
-      } else if ((opcode == kThumbPop) && (lir->u.m.def_mask & r8Mask)) {
-        lir->u.m.def_mask &= ~r8Mask;
-        lir->u.m.def_mask |= ENCODE_ARM_REG_PC;
+      constexpr ResourceMask r8Mask = GetRegMaskArm(rs_r8);
+      if ((opcode == kThumbPush) && (use_mask->Intersects(r8Mask))) {
+        use_mask->ClearBits(r8Mask);
+        use_mask->SetBit(kArmRegLR);
+      } else if ((opcode == kThumbPop) && (def_mask->Intersects(r8Mask))) {
+        def_mask->ClearBits(r8Mask);
+        def_mask->SetBit(kArmRegPC);;
       }
     }
     if (flags & REG_DEF_LR) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_LR;
+      def_mask->SetBit(kArmRegLR);
     }
   }
 }
@@ -486,44 +488,44 @@
   return buf;
 }
 
-void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) {
+void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
   char buf[256];
   buf[0] = 0;
 
-  if (mask == ENCODE_ALL) {
+  if (mask.Equals(kEncodeAll)) {
     strcpy(buf, "all");
   } else {
     char num[8];
     int i;
 
     for (i = 0; i < kArmRegEnd; i++) {
-      if (mask & (1ULL << i)) {
+      if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
       }
     }
 
-    if (mask & ENCODE_CCODE) {
+    if (mask.HasBit(ResourceMask::kCCode)) {
       strcat(buf, "cc ");
     }
-    if (mask & ENCODE_FP_STATUS) {
+    if (mask.HasBit(ResourceMask::kFPStatus)) {
       strcat(buf, "fpcc ");
     }
 
     /* Memory bits */
-    if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+    if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
                DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
     }
-    if (mask & ENCODE_LITERAL) {
+    if (mask.HasBit(ResourceMask::kLiteral)) {
       strcat(buf, "lit ");
     }
 
-    if (mask & ENCODE_HEAP_REF) {
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
       strcat(buf, "heap ");
     }
-    if (mask & ENCODE_MUST_NOT_ALIAS) {
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
       strcat(buf, "noalias ");
     }
   }
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 86d32f4..92781b5 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -87,9 +87,9 @@
   if (data_target == NULL) {
     data_target = AddWordData(&literal_list_, value);
   }
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
                           r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
-  SetMemRefType(load_pc_rel, true, kLiteral);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
 }
@@ -670,6 +670,7 @@
     if (data_target == NULL) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
     if (r_dest.IsFloat()) {
       res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
                    r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
@@ -678,7 +679,6 @@
       res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
                    r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
     }
-    SetMemRefType(res, true, kLiteral);
     AppendLIR(res);
   }
   return res;
@@ -946,7 +946,8 @@
   }
 
   // TODO: in future may need to differentiate Dalvik accesses w/ spills
-  if (r_base == rs_rARM_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rARM_SP);
     AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
   }
   return load;
@@ -1085,7 +1086,8 @@
   }
 
   // TODO: In future, may need to differentiate Dalvik & spill accesses
-  if (r_base == rs_rARM_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rARM_SP);
     AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
   }
   return store;
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 6a6b0f6..01afc99 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -102,17 +102,14 @@
 #define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
 #define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
 
-enum ArmResourceEncodingPos {
-  kArmGPReg0   = 0,
-  kArmRegLR    = 30,
-  kArmRegSP    = 31,
-  kArmFPReg0   = 32,
-  kArmRegEnd   = 64,
+enum Arm64ResourceEncodingPos {
+  kArm64GPReg0   = 0,
+  kArm64RegLR    = 30,
+  kArm64RegSP    = 31,
+  kArm64FPReg0   = 32,
+  kArm64RegEnd   = 64,
 };
 
-#define ENCODE_ARM_REG_SP           (1ULL << kArmRegSP)
-#define ENCODE_ARM_REG_LR           (1ULL << kArmRegLR)
-
 #define IS_SIGNED_IMM(size, value) \
   ((value) >= -(1 << ((size) - 1)) && (value) < (1 << ((size) - 1)))
 #define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 21db771..75e24fe 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -63,7 +63,7 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint64_t GetRegMaskCommon(RegStorage reg);
+    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
     void AdjustSpillMask();
     void ClobberCallerSave();
     void FreeCallTemps();
@@ -78,12 +78,13 @@
     int AssignInsnOffsets();
     void AssignOffsets();
     uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
+    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
     const char* GetTargetInstFmt(int opcode);
     const char* GetTargetInstName(int opcode);
     std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    uint64_t GetPCUseDefEncoding();
+    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
     uint64_t GetTargetInstFlags(int opcode);
     int GetInsnSize(LIR* lir);
     bool IsUnconditionalBranch(LIR* lir);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 0a76b9b..1ad0435 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -572,7 +572,7 @@
   } else {
     NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_ptr.GetReg(), 0);
     OpRegReg(kOpSub, r_tmp, rl_expected.reg);
-    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
     // OpIT(kCondEq, "T");
     NewLIR4(kA64Stxr3wrX /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
   }
@@ -588,7 +588,7 @@
   // result := (tmp1 != 0) ? 0 : 1;
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
-  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   // OpIT(kCondUlt, "");
   LoadConstant(rl_result.reg, 0); /* cc */
   FreeTemp(r_tmp);  // Now unneeded.
@@ -640,7 +640,7 @@
 LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
   // Combine sub & test using sub setflags encoding here
   OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   return OpCondBranch(c_code, target);
 }
 
@@ -673,7 +673,7 @@
 
   // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
   DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = ENCODE_ALL;
+  barrier->u.m.def_mask = &kEncodeAll;
   return ret;
 #else
   return false;
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 439dc8c..e2846ae 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -139,41 +139,43 @@
 /*
  * Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
  */
-uint64_t Arm64Mir2Lir::GetRegMaskCommon(RegStorage reg) {
+ResourceMask Arm64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
   // TODO(Arm64): this function depends too much on the internal RegStorage encoding. Refactor.
 
-  int reg_raw = reg.GetRawBits();
   // Check if the shape mask is zero (i.e. invalid).
   if (UNLIKELY(reg == rs_wzr || reg == rs_xzr)) {
     // The zero register is not a true register. It is just an immediate zero.
-    return 0;
+    return kEncodeNone;
   }
 
-  return UINT64_C(1) << (reg_raw & RegStorage::kRegTypeMask);
+  return ResourceMask::Bit(
+      // FP register starts at bit position 32.
+      (reg.IsFloat() ? kArm64FPReg0 : 0) + reg.GetRegNum());
 }
 
-uint64_t Arm64Mir2Lir::GetPCUseDefEncoding() {
+ResourceMask Arm64Mir2Lir::GetPCUseDefEncoding() const {
   LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for Arm64";
-  return 0ULL;
+  return kEncodeNone;
 }
 
 // Arm64 specific setup.  TODO: inline?:
-void Arm64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
+void Arm64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                            ResourceMask* use_mask, ResourceMask* def_mask) {
   DCHECK_EQ(cu_->instruction_set, kArm64);
   DCHECK(!lir->flags.use_def_invalid);
 
   // These flags are somewhat uncommon - bypass if we can.
   if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LR)) != 0) {
     if (flags & REG_DEF_SP) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_SP;
+      def_mask->SetBit(kArm64RegSP);
     }
 
     if (flags & REG_USE_SP) {
-      lir->u.m.use_mask |= ENCODE_ARM_REG_SP;
+      use_mask->SetBit(kArm64RegSP);
     }
 
     if (flags & REG_DEF_LR) {
-      lir->u.m.def_mask |= ENCODE_ARM_REG_LR;
+      def_mask->SetBit(kArm64RegLR);
     }
   }
 }
@@ -510,44 +512,44 @@
   return buf;
 }
 
-void Arm64Mir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix) {
+void Arm64Mir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
   char buf[256];
   buf[0] = 0;
 
-  if (mask == ENCODE_ALL) {
+  if (mask.Equals(kEncodeAll)) {
     strcpy(buf, "all");
   } else {
     char num[8];
     int i;
 
-    for (i = 0; i < kArmRegEnd; i++) {
-      if (mask & (1ULL << i)) {
+    for (i = 0; i < kArm64RegEnd; i++) {
+      if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
       }
     }
 
-    if (mask & ENCODE_CCODE) {
+    if (mask.HasBit(ResourceMask::kCCode)) {
       strcat(buf, "cc ");
     }
-    if (mask & ENCODE_FP_STATUS) {
+    if (mask.HasBit(ResourceMask::kFPStatus)) {
       strcat(buf, "fpcc ");
     }
 
     /* Memory bits */
-    if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+    if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
                DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
     }
-    if (mask & ENCODE_LITERAL) {
+    if (mask.HasBit(ResourceMask::kLiteral)) {
       strcat(buf, "lit ");
     }
 
-    if (mask & ENCODE_HEAP_REF) {
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
       strcat(buf, "heap ");
     }
-    if (mask & ENCODE_MUST_NOT_ALIAS) {
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
       strcat(buf, "noalias ");
     }
   }
@@ -850,6 +852,8 @@
     return;
   }
 
+  // Handle dalvik registers.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   for (int i = 0; i < cu_->num_ins; i++) {
     PromotionMap* v_map = &promotion_map_[start_vreg + i];
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 4f0d7bc..ab5014f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -102,9 +102,9 @@
     data_target = AddWordData(&literal_list_, value);
   }
 
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
                             r_dest, 0, 0, 0, 0, data_target);
-  SetMemRefType(load_pc_rel, true, kLiteral);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
 }
@@ -129,9 +129,9 @@
   }
 
   DCHECK(RegStorage::IsFloat(r_dest));
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
                             r_dest, 0, 0, 0, 0, data_target);
-  SetMemRefType(load_pc_rel, true, kLiteral);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
 }
@@ -683,9 +683,9 @@
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
 
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
     LIR* res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
                       r_dest.GetReg(), 0, 0, 0, 0, data_target);
-    SetMemRefType(res, true, kLiteral);
     AppendLIR(res);
     return res;
   }
@@ -905,7 +905,8 @@
   }
 
   // TODO: in future may need to differentiate Dalvik accesses w/ spills
-  if (r_base == rs_rA64_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rA64_SP);
     AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
   }
   return load;
@@ -986,7 +987,8 @@
   }
 
   // TODO: In future, may need to differentiate Dalvik & spill accesses.
-  if (r_base == rs_rA64_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rA64_SP);
     AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
   }
   return store;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 3fbbc4e..ec0fb43 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -74,9 +74,9 @@
 
 void Mir2Lir::MarkSafepointPC(LIR* inst) {
   DCHECK(!inst->flags.use_def_invalid);
-  inst->u.m.def_mask = ENCODE_ALL;
+  inst->u.m.def_mask = &kEncodeAll;
   LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
-  DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
+  DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
 }
 
 /* Remove a LIR from the list. */
@@ -108,37 +108,40 @@
 }
 
 void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
-  uint64_t *mask_ptr;
-  uint64_t mask = ENCODE_MEM;
   DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
   DCHECK(!lir->flags.use_def_invalid);
+  // TODO: Avoid the extra Arena allocation!
+  const ResourceMask** mask_ptr;
+  ResourceMask mask;
   if (is_load) {
     mask_ptr = &lir->u.m.use_mask;
   } else {
     mask_ptr = &lir->u.m.def_mask;
   }
+  mask = **mask_ptr;
   /* Clear out the memref flags */
-  *mask_ptr &= ~mask;
+  mask.ClearBits(kEncodeMem);
   /* ..and then add back the one we need */
   switch (mem_type) {
-    case kLiteral:
+    case ResourceMask::kLiteral:
       DCHECK(is_load);
-      *mask_ptr |= ENCODE_LITERAL;
+      mask.SetBit(ResourceMask::kLiteral);
       break;
-    case kDalvikReg:
-      *mask_ptr |= ENCODE_DALVIK_REG;
+    case ResourceMask::kDalvikReg:
+      mask.SetBit(ResourceMask::kDalvikReg);
       break;
-    case kHeapRef:
-      *mask_ptr |= ENCODE_HEAP_REF;
+    case ResourceMask::kHeapRef:
+      mask.SetBit(ResourceMask::kHeapRef);
       break;
-    case kMustNotAlias:
+    case ResourceMask::kMustNotAlias:
       /* Currently only loads can be marked as kMustNotAlias */
       DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
-      *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
+      mask.SetBit(ResourceMask::kMustNotAlias);
       break;
     default:
       LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
   }
+  *mask_ptr = mask_cache_.GetMask(mask);
 }
 
 /*
@@ -146,7 +149,8 @@
  */
 void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
                                       bool is64bit) {
-  SetMemRefType(lir, is_load, kDalvikReg);
+  DCHECK((is_load ? lir->u.m.use_mask : lir->u.m.def_mask)->Intersection(kEncodeMem).Equals(
+      kEncodeDalvikReg));
 
   /*
    * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
@@ -241,10 +245,10 @@
   }
 
   if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use"));
+    DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.use_mask, "use"));
   }
   if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def"));
+    DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.def_mask, "def"));
   }
 }
 
@@ -794,7 +798,7 @@
     new_label->operands[0] = keyVal;
     new_label->flags.fixup = kFixupLabel;
     DCHECK(!new_label->flags.use_def_invalid);
-    new_label->u.m.def_mask = ENCODE_ALL;
+    new_label->u.m.def_mask = &kEncodeAll;
     InsertLIRAfter(boundary_lir, new_label);
     res = new_label;
   }
@@ -972,7 +976,9 @@
       fp_spill_mask_(0),
       first_lir_insn_(NULL),
       last_lir_insn_(NULL),
-      slow_paths_(arena, 32, kGrowableArraySlowPaths) {
+      slow_paths_(arena, 32, kGrowableArraySlowPaths),
+      mem_ref_type_(ResourceMask::kHeapRef),
+      mask_cache_(arena) {
   // Reserve pointer id 0 for NULL.
   size_t null_idx = WrapPointer(NULL);
   DCHECK_EQ(null_idx, 0U);
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 69ca715..8f6d716 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -44,7 +44,7 @@
   LIR* barrier = NewLIR0(kPseudoBarrier);
   /* Mark all resources as being clobbered */
   DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = ENCODE_ALL;
+  barrier->u.m.def_mask = &kEncodeAll;
 }
 
 void Mir2Lir::GenDivZeroException() {
@@ -447,6 +447,7 @@
     for (int i = 0; i < elems; i++) {
       RegLocation loc = UpdateLoc(info->args[i]);
       if (loc.location == kLocPhysReg) {
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
       }
     }
@@ -484,7 +485,12 @@
     // Generate the copy loop.  Going backwards for convenience
     LIR* target = NewLIR0(kPseudoTargetLabel);
     // Copy next element
-    LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
+    {
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+      LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
+      // NOTE: No dalvik register annotation, local optimizations will be stopped
+      // by the loop boundaries.
+    }
     StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
     FreeTemp(r_val);
     OpDecAndBranch(kCondGe, r_idx, target);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index b7ea362..35a98e6 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -493,6 +493,7 @@
    * end up half-promoted.  In those cases, we must flush the promoted
    * half to memory as well.
    */
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   for (int i = 0; i < cu_->num_ins; i++) {
     PromotionMap* v_map = &promotion_map_[start_vreg + i];
     RegStorage reg = GetArgMappingToPhysicalReg(i);
@@ -901,11 +902,17 @@
       } else {
         // kArg2 & rArg3 can safely be used here
         reg = TargetReg(kArg3);
-        Load32Disp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
+        {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+          Load32Disp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
+        }
         call_state = next_call_insn(cu_, info, call_state, target_method,
                                     vtable_idx, direct_code, direct_method, type);
       }
-      Store32Disp(TargetReg(kSp), (next_use + 1) * 4, reg);
+      {
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+        Store32Disp(TargetReg(kSp), (next_use + 1) * 4, reg);
+      }
       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                                   direct_code, direct_method, type);
       next_use++;
@@ -929,12 +936,15 @@
                                     vtable_idx, direct_code, direct_method, type);
       }
       int outs_offset = (next_use + 1) * 4;
-      if (rl_arg.wide) {
-        StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64);
-        next_use += 2;
-      } else {
-        Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
-        next_use++;
+      {
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+        if (rl_arg.wide) {
+          StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64);
+          next_use += 2;
+        } else {
+          Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
+          next_use++;
+        }
       }
       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                                direct_code, direct_method, type);
@@ -998,12 +1008,14 @@
     if (loc.wide) {
       loc = UpdateLocWide(loc);
       if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
       }
       next_arg += 2;
     } else {
       loc = UpdateLoc(loc);
       if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
       }
       next_arg++;
@@ -1026,24 +1038,32 @@
     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                              direct_code, direct_method, type);
     OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
-    LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
+    LIR* ld = nullptr;
+    {
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+      ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
+    }
     // TUNING: loosen barrier
-    ld->u.m.def_mask = ENCODE_ALL;
-    SetMemRefType(ld, true /* is_load */, kDalvikReg);
+    ld->u.m.def_mask = &kEncodeAll;
     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                              direct_code, direct_method, type);
     OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                              direct_code, direct_method, type);
-    LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
-    SetMemRefType(st, false /* is_load */, kDalvikReg);
-    st->u.m.def_mask = ENCODE_ALL;
+    LIR* st = nullptr;
+    {
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+      st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
+    }
+    st->u.m.def_mask = &kEncodeAll;
     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                              direct_code, direct_method, type);
   } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
     int current_src_offset = start_offset;
     int current_dest_offset = outs_offset;
 
+    // Only davik regs are accessed in this loop; no next_call_insn() calls.
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     while (regs_left_to_pass_via_stack > 0) {
       // This is based on the knowledge that the stack itself is 16-byte aligned.
       bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
@@ -1110,8 +1130,7 @@
             AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
           } else {
             // Set barrier for 128-bit load.
-            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
-            ld1->u.m.def_mask = ENCODE_ALL;
+            ld1->u.m.def_mask = &kEncodeAll;
           }
         }
         if (st1 != nullptr) {
@@ -1121,8 +1140,7 @@
             AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
           } else {
             // Set barrier for 128-bit store.
-            SetMemRefType(st1, false /* is_load */, kDalvikReg);
-            st1->u.m.def_mask = ENCODE_ALL;
+            st1->u.m.def_mask = &kEncodeAll;
           }
         }
 
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 6ef7934..6469d9c 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -65,6 +65,7 @@
         OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
       } else {
         // Lives in the frame, need to store.
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32);
       }
       if (!zero_reg.Valid()) {
@@ -90,6 +91,7 @@
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     if (rl_src.ref) {
       LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
     } else {
@@ -123,6 +125,7 @@
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64);
   }
 }
@@ -210,6 +213,7 @@
   ResetDefLoc(rl_dest);
   if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
     def_start = last_lir_insn_;
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
     MarkClean(rl_dest);
     def_end = last_lir_insn_;
@@ -296,6 +300,7 @@
     def_start = last_lir_insn_;
     DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
               mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
     MarkClean(rl_dest);
     def_end = last_lir_insn_;
@@ -323,6 +328,7 @@
   ResetDefLoc(rl_dest);
   if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
     LIR *def_start = last_lir_insn_;
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
     MarkClean(rl_dest);
     LIR *def_end = last_lir_insn_;
@@ -358,6 +364,7 @@
     LIR *def_start = last_lir_insn_;
     DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
               mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
     MarkClean(rl_dest);
     LIR *def_end = last_lir_insn_;
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 4a918a1..b97ff2a 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -21,8 +21,8 @@
 #define DEBUG_OPT(X)
 
 /* Check RAW, WAR, and RAW dependency on the register operands */
-#define CHECK_REG_DEP(use, def, check) ((def & check->u.m.use_mask) || \
-                                        ((use | def) & check->u.m.def_mask))
+#define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
+                                       (use.Union(def).Intersects(*check->u.m.def_mask))
 
 /* Scheduler heuristics */
 #define MAX_HOIST_DISTANCE 20
@@ -109,20 +109,23 @@
     bool is_this_lir_load = target_flags & IS_LOAD;
     LIR* check_lir;
     /* Use the mem mask to determine the rough memory location */
-    uint64_t this_mem_mask = (this_lir->u.m.use_mask | this_lir->u.m.def_mask) & ENCODE_MEM;
+    ResourceMask this_mem_mask = kEncodeMem.Intersection(
+        this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask));
 
     /*
      * Currently only eliminate redundant ld/st for constant and Dalvik
      * register accesses.
      */
-    if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) {
+    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg))) {
       continue;
     }
 
-    uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
-    uint64_t stop_use_reg_mask;
+    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
+    ResourceMask stop_use_reg_mask;
     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-      stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM;
+      // TODO: Stop the abuse of kIsBranch as a bit specification for ResourceMask.
+      stop_use_reg_mask = ResourceMask::Bit(kIsBranch).Union(*this_lir->u.m.use_mask).Without(
+          kEncodeMem);
     } else {
       /*
        * Add pc to the resource mask to prevent this instruction
@@ -130,7 +133,7 @@
        * region bits since stop_mask is used to check data/control
        * dependencies.
        */
-        stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->u.m.use_mask) & ~ENCODE_MEM;
+      stop_use_reg_mask = GetPCUseDefEncoding().Union(*this_lir->u.m.use_mask).Without(kEncodeMem);
     }
 
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
@@ -142,8 +145,9 @@
         continue;
       }
 
-      uint64_t check_mem_mask = (check_lir->u.m.use_mask | check_lir->u.m.def_mask) & ENCODE_MEM;
-      uint64_t alias_condition = this_mem_mask & check_mem_mask;
+      ResourceMask check_mem_mask = kEncodeMem.Intersection(
+          check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask));
+      ResourceMask alias_condition = this_mem_mask.Intersection(check_mem_mask);
       bool stop_here = false;
 
       /*
@@ -153,9 +157,9 @@
       // TUNING: Support instructions with multiple register targets.
       if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
         stop_here = true;
-      } else if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+      } else if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
         bool is_check_lir_load = check_flags & IS_LOAD;
-        if  (alias_condition == ENCODE_LITERAL) {
+        if  (alias_condition.Equals(kEncodeLiteral)) {
           /*
            * Should only see literal loads in the instruction
            * stream.
@@ -175,7 +179,7 @@
             }
             NopLIR(check_lir);
           }
-        } else if (alias_condition == ENCODE_DALVIK_REG) {
+        } else if (alias_condition.Equals(kEncodeDalvikReg)) {
           /* Must alias */
           if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
             /* Only optimize compatible registers */
@@ -304,7 +308,7 @@
       continue;
     }
 
-    uint64_t stop_use_all_mask = this_lir->u.m.use_mask;
+    ResourceMask stop_use_all_mask = *this_lir->u.m.use_mask;
 
     if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
       /*
@@ -313,14 +317,14 @@
        * locations are safe to be hoisted. So only mark the heap references
        * conservatively here.
        */
-      if (stop_use_all_mask & ENCODE_HEAP_REF) {
-        stop_use_all_mask |= GetPCUseDefEncoding();
+      if (stop_use_all_mask.HasBit(ResourceMask::kHeapRef)) {
+        stop_use_all_mask.SetBits(GetPCUseDefEncoding());
       }
     }
 
     /* Similar as above, but just check for pure register dependency */
-    uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
-    uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
+    ResourceMask stop_use_reg_mask = stop_use_all_mask.Without(kEncodeMem);
+    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
 
     int next_slot = 0;
     bool stop_here = false;
@@ -335,22 +339,22 @@
         continue;
       }
 
-      uint64_t check_mem_mask = check_lir->u.m.def_mask & ENCODE_MEM;
-      uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
+      ResourceMask check_mem_mask = check_lir->u.m.def_mask->Intersection(kEncodeMem);
+      ResourceMask alias_condition = stop_use_all_mask.Intersection(check_mem_mask);
       stop_here = false;
 
       /* Potential WAR alias seen - check the exact relation */
-      if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+      if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
         /* We can fully disambiguate Dalvik references */
-        if (alias_condition == ENCODE_DALVIK_REG) {
-          /* Must alias or partually overlap */
+        if (alias_condition.Equals(kEncodeDalvikReg)) {
+          /* Must alias or partially overlap */
           if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
             IsDalvikRegisterClobbered(this_lir, check_lir)) {
             stop_here = true;
           }
         /* Conservatively treat all heap refs as may-alias */
         } else {
-          DCHECK_EQ(alias_condition, ENCODE_HEAP_REF);
+          DCHECK(alias_condition.Equals(kEncodeHeapRef));
           stop_here = true;
         }
         /* Memory content may be updated. Stop looking now. */
@@ -413,7 +417,7 @@
         LIR* prev_lir = prev_inst_list[slot+1];
 
         /* Check the highest instruction */
-        if (prev_lir->u.m.def_mask == ENCODE_ALL) {
+        if (prev_lir->u.m.def_mask->Equals(kEncodeAll)) {
           /*
            * If the first instruction is a load, don't hoist anything
            * above it since it is unlikely to be beneficial.
@@ -443,7 +447,8 @@
          */
         bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false :
             (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
-        if (((cur_lir->u.m.use_mask & prev_lir->u.m.def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
+        if ((prev_is_load && (cur_lir->u.m.use_mask->Intersects(*prev_lir->u.m.def_mask))) ||
+            (slot < LD_LATENCY)) {
           break;
         }
       }
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index ea3c901..62a7f24 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -63,7 +63,7 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint64_t GetRegMaskCommon(RegStorage reg);
+    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
     void AdjustSpillMask();
     void ClobberCallerSave();
     void FreeCallTemps();
@@ -77,12 +77,13 @@
     int AssignInsnOffsets();
     void AssignOffsets();
     AssemblerStatus AssembleInstructions(CodeOffset start_addr);
-    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
+    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
     const char* GetTargetInstFmt(int opcode);
     const char* GetTargetInstName(int opcode);
     std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    uint64_t GetPCUseDefEncoding();
+    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
     uint64_t GetTargetInstFlags(int opcode);
     int GetInsnSize(LIR* lir);
     bool IsUnconditionalBranch(LIR* lir);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 381c7ce..76b5243 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -120,60 +120,50 @@
 /*
  * Decode the register id.
  */
-uint64_t MipsMir2Lir::GetRegMaskCommon(RegStorage reg) {
-  uint64_t seed;
-  int shift;
-  int reg_id = reg.GetRegNum();
-  /* Each double register is equal to a pair of single-precision FP registers */
-  if (reg.IsDouble()) {
-    seed = 0x3;
-    reg_id = reg_id << 1;
-  } else {
-    seed = 1;
-  }
-  /* FP register starts at bit position 32 */
-  shift = reg.IsFloat() ? kMipsFPReg0 : 0;
-  /* Expand the double register id into single offset */
-  shift += reg_id;
-  return (seed << shift);
+ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+  return reg.IsDouble()
+      /* Each double register is equal to a pair of single-precision FP registers */
+      ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0)
+      : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kMipsFPReg0 : reg.GetRegNum());
 }
 
-uint64_t MipsMir2Lir::GetPCUseDefEncoding() {
-  return ENCODE_MIPS_REG_PC;
+ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
+  return ResourceMask::Bit(kMipsRegPC);
 }
 
 
-void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                           ResourceMask* use_mask, ResourceMask* def_mask) {
   DCHECK_EQ(cu_->instruction_set, kMips);
   DCHECK(!lir->flags.use_def_invalid);
 
   // Mips-specific resource map setup here.
   if (flags & REG_DEF_SP) {
-    lir->u.m.def_mask |= ENCODE_MIPS_REG_SP;
+    def_mask->SetBit(kMipsRegSP);
   }
 
   if (flags & REG_USE_SP) {
-    lir->u.m.use_mask |= ENCODE_MIPS_REG_SP;
+    use_mask->SetBit(kMipsRegSP);
   }
 
   if (flags & REG_DEF_LR) {
-    lir->u.m.def_mask |= ENCODE_MIPS_REG_LR;
+    def_mask->SetBit(kMipsRegLR);
   }
 
   if (flags & REG_DEF_HI) {
-    lir->u.m.def_mask |= ENCODE_MIPS_REG_HI;
+    def_mask->SetBit(kMipsRegHI);
   }
 
   if (flags & REG_DEF_LO) {
-    lir->u.m.def_mask |= ENCODE_MIPS_REG_LO;
+    def_mask->SetBit(kMipsRegLO);
   }
 
   if (flags & REG_USE_HI) {
-    lir->u.m.use_mask |= ENCODE_MIPS_REG_HI;
+    use_mask->SetBit(kMipsRegHI);
   }
 
   if (flags & REG_USE_LO) {
-    lir->u.m.use_mask |= ENCODE_MIPS_REG_LO;
+    use_mask->SetBit(kMipsRegLO);
   }
 }
 
@@ -283,43 +273,43 @@
 }
 
 // FIXME: need to redo resource maps for MIPS - fix this at that time
-void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix) {
+void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
   char buf[256];
   buf[0] = 0;
 
-  if (mask == ENCODE_ALL) {
+  if (mask.Equals(kEncodeAll)) {
     strcpy(buf, "all");
   } else {
     char num[8];
     int i;
 
     for (i = 0; i < kMipsRegEnd; i++) {
-      if (mask & (1ULL << i)) {
+      if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
       }
     }
 
-    if (mask & ENCODE_CCODE) {
+    if (mask.HasBit(ResourceMask::kCCode)) {
       strcat(buf, "cc ");
     }
-    if (mask & ENCODE_FP_STATUS) {
+    if (mask.HasBit(ResourceMask::kFPStatus)) {
       strcat(buf, "fpcc ");
     }
     /* Memory bits */
-    if (mips_lir && (mask & ENCODE_DALVIK_REG)) {
+    if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
                DECODE_ALIAS_INFO_WIDE(mips_lir->flags.alias_info) ? "(+1)" : "");
     }
-    if (mask & ENCODE_LITERAL) {
+    if (mask.HasBit(ResourceMask::kLiteral)) {
       strcat(buf, "lit ");
     }
 
-    if (mask & ENCODE_HEAP_REF) {
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
       strcat(buf, "heap ");
     }
-    if (mask & ENCODE_MUST_NOT_ALIAS) {
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
       strcat(buf, "noalias ");
     }
   }
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 2757b7b..01b25f92 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -534,7 +534,8 @@
     }
   }
 
-  if (r_base == rs_rMIPS_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rMIPS_SP);
     AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                             true /* is_load */, pair /* is64bit */);
     if (pair) {
@@ -634,7 +635,8 @@
     FreeTemp(r_scratch);
   }
 
-  if (r_base == rs_rMIPS_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rMIPS_SP);
     AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                             false /* is_load */, pair /* is64bit */);
     if (pair) {
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 2f37520..9912101 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -57,7 +57,7 @@
       (opcode == kPseudoExportedPC)) {
     // Always make labels scheduling barriers
     DCHECK(!insn->flags.use_def_invalid);
-    insn->u.m.use_mask = insn->u.m.def_mask = ENCODE_ALL;
+    insn->u.m.use_mask = insn->u.m.def_mask = &kEncodeAll;
   }
   return insn;
 }
@@ -140,19 +140,20 @@
 /*
  * Mark the corresponding bit(s).
  */
-inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) {
+inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
   DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
   DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
-  *mask |= reginfo_map_.Get(reg)->DefUseMask();
+  *mask = mask->Union(reginfo_map_.Get(reg)->DefUseMask());
 }
 
 /*
  * Set up the proper fields in the resource mask
  */
-inline void Mir2Lir::SetupResourceMasks(LIR* lir, bool leave_mem_ref) {
+inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
   int opcode = lir->opcode;
 
   if (IsPseudoLirOp(opcode)) {
+    lir->u.m.use_mask = lir->u.m.def_mask = &kEncodeNone;
     if (opcode != kPseudoBarrier) {
       lir->flags.fixup = kFixupLabel;
     }
@@ -166,13 +167,27 @@
     lir->flags.fixup = kFixupLabel;
   }
 
-  /* Get the starting size of the instruction's template */
+  /* Get the starting size of the instruction's template. */
   lir->flags.size = GetInsnSize(lir);
   estimated_native_code_size_ += lir->flags.size;
-  /* Set up the mask for resources that are updated */
-  if (!leave_mem_ref && (flags & (IS_LOAD | IS_STORE))) {
-    /* Default to heap - will catch specialized classes later */
-    SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
+
+  /* Set up the mask for resources. */
+  ResourceMask use_mask;
+  ResourceMask def_mask;
+
+  if (flags & (IS_LOAD | IS_STORE)) {
+    /* Set memory reference type (defaults to heap, overridden by ScopedMemRefType). */
+    if (flags & IS_LOAD) {
+      use_mask.SetBit(mem_ref_type_);
+    } else {
+      /* Currently only loads can be marked as kMustNotAlias. */
+      DCHECK(mem_ref_type_ != ResourceMask::kMustNotAlias);
+    }
+    if (flags & IS_STORE) {
+      /* Literals cannot be written to. */
+      DCHECK(mem_ref_type_ != ResourceMask::kLiteral);
+      def_mask.SetBit(mem_ref_type_);
+    }
   }
 
   /*
@@ -180,52 +195,55 @@
    * turn will trash everything.
    */
   if (flags & IS_BRANCH) {
-    lir->u.m.def_mask = lir->u.m.use_mask = ENCODE_ALL;
+    lir->u.m.def_mask = lir->u.m.use_mask = &kEncodeAll;
     return;
   }
 
   if (flags & REG_DEF0) {
-    SetupRegMask(&lir->u.m.def_mask, lir->operands[0]);
+    SetupRegMask(&def_mask, lir->operands[0]);
   }
 
   if (flags & REG_DEF1) {
-    SetupRegMask(&lir->u.m.def_mask, lir->operands[1]);
+    SetupRegMask(&def_mask, lir->operands[1]);
   }
 
   if (flags & REG_DEF2) {
-    SetupRegMask(&lir->u.m.def_mask, lir->operands[2]);
+    SetupRegMask(&def_mask, lir->operands[2]);
   }
 
   if (flags & REG_USE0) {
-    SetupRegMask(&lir->u.m.use_mask, lir->operands[0]);
+    SetupRegMask(&use_mask, lir->operands[0]);
   }
 
   if (flags & REG_USE1) {
-    SetupRegMask(&lir->u.m.use_mask, lir->operands[1]);
+    SetupRegMask(&use_mask, lir->operands[1]);
   }
 
   if (flags & REG_USE2) {
-    SetupRegMask(&lir->u.m.use_mask, lir->operands[2]);
+    SetupRegMask(&use_mask, lir->operands[2]);
   }
 
   if (flags & REG_USE3) {
-    SetupRegMask(&lir->u.m.use_mask, lir->operands[3]);
+    SetupRegMask(&use_mask, lir->operands[3]);
   }
 
   if (flags & REG_USE4) {
-    SetupRegMask(&lir->u.m.use_mask, lir->operands[4]);
+    SetupRegMask(&use_mask, lir->operands[4]);
   }
 
   if (flags & SETS_CCODES) {
-    lir->u.m.def_mask |= ENCODE_CCODE;
+    def_mask.SetBit(ResourceMask::kCCode);
   }
 
   if (flags & USES_CCODES) {
-    lir->u.m.use_mask |= ENCODE_CCODE;
+    use_mask.SetBit(ResourceMask::kCCode);
   }
 
   // Handle target-specific actions
-  SetupTargetResourceMasks(lir, flags);
+  SetupTargetResourceMasks(lir, flags, &def_mask, &use_mask);
+
+  lir->u.m.use_mask = mask_cache_.GetMask(use_mask);
+  lir->u.m.def_mask = mask_cache_.GetMask(def_mask);
 }
 
 inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(RegStorage reg) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index a85be5e..40205ea 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -68,6 +68,7 @@
 
 // TODO: needs revisit for 64-bit.
 RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
 
   if (cu_->instruction_set == kX86) {
@@ -159,6 +160,7 @@
 }
 
 void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
   if (cu_->instruction_set == kX86) {
     /*
@@ -1171,7 +1173,7 @@
       head_lir = &block_label_list_[bb->id];
       // Set the first label as a scheduling barrier.
       DCHECK(!head_lir->flags.use_def_invalid);
-      head_lir->u.m.def_mask = ENCODE_ALL;
+      head_lir->u.m.def_mask = &kEncodeAll;
     }
 
     if (opcode == kMirOpCheck) {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9718acd..b051d6c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -23,6 +23,7 @@
 #include "dex/compiler_ir.h"
 #include "dex/reg_storage.h"
 #include "dex/backend.h"
+#include "dex/quick/resource_mask.h"
 #include "driver/compiler_driver.h"
 #include "leb128.h"
 #include "safe_map.h"
@@ -136,8 +137,8 @@
 typedef std::vector<uint8_t> CodeBuffer;
 
 struct UseDefMasks {
-  uint64_t use_mask;        // Resource mask for use.
-  uint64_t def_mask;        // Resource mask for def.
+  const ResourceMask* use_mask;        // Resource mask for use.
+  const ResourceMask* def_mask;        // Resource mask for def.
 };
 
 struct AssemblyInfo {
@@ -188,20 +189,6 @@
 #define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
 #define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
 
-// Common resource macros.
-#define ENCODE_CCODE            (1ULL << kCCode)
-#define ENCODE_FP_STATUS        (1ULL << kFPStatus)
-
-// Abstract memory locations.
-#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
-#define ENCODE_LITERAL          (1ULL << kLiteral)
-#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
-#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
-
-#define ENCODE_ALL              (~0ULL)
-#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
-                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
-
 #define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8))
 #define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \
   do { \
@@ -327,7 +314,7 @@
      */
     class RegisterInfo {
      public:
-      RegisterInfo(RegStorage r, uint64_t mask = ENCODE_ALL);
+      RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
       ~RegisterInfo() {}
       static void* operator new(size_t size, ArenaAllocator* arena) {
         return arena->Alloc(size, kArenaAllocRegAlloc);
@@ -378,8 +365,8 @@
       RegStorage Partner() { return partner_; }
       void SetPartner(RegStorage partner) { partner_ = partner; }
       int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; }
-      uint64_t DefUseMask() { return def_use_mask_; }
-      void SetDefUseMask(uint64_t def_use_mask) { def_use_mask_ = def_use_mask; }
+      const ResourceMask& DefUseMask() { return def_use_mask_; }
+      void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; }
       RegisterInfo* Master() { return master_; }
       void SetMaster(RegisterInfo* master) {
         master_ = master;
@@ -417,7 +404,7 @@
       bool aliased_;               // Is this the master for other aliased RegisterInfo's?
       RegStorage partner_;         // If wide_value, other reg of pair or self if 64-bit register.
       int s_reg_;                  // Name of live value.
-      uint64_t def_use_mask_;      // Resources for this element.
+      ResourceMask def_use_mask_;  // Resources for this element.
       uint32_t used_storage_;      // 1 bit per 4 bytes of storage. Unused by aliases.
       uint32_t liveness_;          // 1 bit per 4 bytes of storage. Unused by aliases.
       RegisterInfo* master_;       // Pointer to controlling storage mask.
@@ -539,6 +526,26 @@
       LIR* const cont_;
     };
 
+    // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_.
+    class ScopedMemRefType {
+     public:
+      ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type)
+          : m2l_(m2l),
+            old_mem_ref_type_(m2l->mem_ref_type_) {
+        m2l_->mem_ref_type_ = new_mem_ref_type;
+      }
+
+      ~ScopedMemRefType() {
+        m2l_->mem_ref_type_ = old_mem_ref_type_;
+      }
+
+     private:
+      Mir2Lir* const m2l_;
+      ResourceMask::ResourceBit old_mem_ref_type_;
+
+      DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType);
+    };
+
     virtual ~Mir2Lir() {}
 
     int32_t s4FromSwitchData(const void* switch_data) {
@@ -625,10 +632,10 @@
     virtual void Materialize();
     virtual CompiledMethod* GetCompiledMethod();
     void MarkSafepointPC(LIR* inst);
-    void SetupResourceMasks(LIR* lir, bool leave_mem_ref = false);
+    void SetupResourceMasks(LIR* lir);
     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
-    void SetupRegMask(uint64_t* mask, int reg);
+    void SetupRegMask(ResourceMask* mask, int reg);
     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
     void DumpPromotionMap();
     void CodegenDump();
@@ -1136,7 +1143,7 @@
     virtual RegLocation LocCReturnDouble() = 0;
     virtual RegLocation LocCReturnFloat() = 0;
     virtual RegLocation LocCReturnWide() = 0;
-    virtual uint64_t GetRegMaskCommon(RegStorage reg) = 0;
+    virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0;
     virtual void AdjustSpillMask() = 0;
     virtual void ClobberCallerSave() = 0;
     virtual void FreeCallTemps() = 0;
@@ -1147,12 +1154,13 @@
 
     // Required for target - miscellaneous.
     virtual void AssembleLIR() = 0;
-    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
-    virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags) = 0;
+    virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0;
+    virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                          ResourceMask* use_mask, ResourceMask* def_mask) = 0;
     virtual const char* GetTargetInstFmt(int opcode) = 0;
     virtual const char* GetTargetInstName(int opcode) = 0;
     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
-    virtual uint64_t GetPCUseDefEncoding() = 0;
+    virtual ResourceMask GetPCUseDefEncoding() const = 0;
     virtual uint64_t GetTargetInstFlags(int opcode) = 0;
     virtual int GetInsnSize(LIR* lir) = 0;
     virtual bool IsUnconditionalBranch(LIR* lir) = 0;
@@ -1576,6 +1584,17 @@
     LIR* last_lir_insn_;
 
     GrowableArray<LIRSlowPath*> slow_paths_;
+
+    // The memory reference type for new LIRs.
+    // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
+    // invoke RawLIR() would clutter the code and reduce the readability.
+    ResourceMask::ResourceBit mem_ref_type_;
+
+    // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR
+    // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks
+    // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
+    // to deduplicate the masks.
+    ResourceMaskCache mask_cache_;
 };  // Class Mir2Lir
 
 }  // namespace art
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index bbeef50..cae59c8 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -38,7 +38,7 @@
   }
 }
 
-Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, uint64_t mask)
+Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, const ResourceMask& mask)
   : reg_(r), is_temp_(false), wide_value_(false), dirty_(false), aliased_(false), partner_(r),
     s_reg_(INVALID_SREG), def_use_mask_(mask), master_(this), def_start_(nullptr),
     def_end_(nullptr), alias_chain_(nullptr) {
@@ -82,22 +82,22 @@
   }
 
   // Construct the register pool.
-  for (RegStorage reg : core_regs) {
+  for (const RegStorage& reg : core_regs) {
     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
     m2l_->reginfo_map_.Put(reg.GetReg(), info);
     core_regs_.Insert(info);
   }
-  for (RegStorage reg : core64_regs) {
+  for (const RegStorage& reg : core64_regs) {
     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
     m2l_->reginfo_map_.Put(reg.GetReg(), info);
     core64_regs_.Insert(info);
   }
-  for (RegStorage reg : sp_regs) {
+  for (const RegStorage& reg : sp_regs) {
     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
     m2l_->reginfo_map_.Put(reg.GetReg(), info);
     sp_regs_.Insert(info);
   }
-  for (RegStorage reg : dp_regs) {
+  for (const RegStorage& reg : dp_regs) {
     RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
     m2l_->reginfo_map_.Put(reg.GetReg(), info);
     dp_regs_.Insert(info);
@@ -126,7 +126,7 @@
   }
 
   // Add an entry for InvalidReg with zero'd mask.
-  RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), 0);
+  RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone);
   m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg);
 
   // Existence of core64 registers implies wide references.
@@ -734,6 +734,7 @@
         info1 = info2;
       }
       int v_reg = mir_graph_->SRegToVReg(info1->SReg());
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
     }
   } else {
@@ -741,6 +742,7 @@
     if (info->IsLive() && info->IsDirty()) {
       info->SetIsDirty(false);
       int v_reg = mir_graph_->SRegToVReg(info->SReg());
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
     }
   }
@@ -752,6 +754,7 @@
   if (info->IsLive() && info->IsDirty()) {
     info->SetIsDirty(false);
     int v_reg = mir_graph_->SRegToVReg(info->SReg());
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord);
   }
 }
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
new file mode 100644
index 0000000..17995fb
--- /dev/null
+++ b/compiler/dex/quick/resource_mask.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iomanip>
+
+#include "resource_mask.h"
+
+#include "utils/arena_allocator.h"
+
+namespace art {
+
+namespace {  // anonymous namespace
+
+constexpr ResourceMask kNoRegMasks[] = {
+    kEncodeNone,
+    kEncodeHeapRef,
+    kEncodeLiteral,
+    kEncodeDalvikReg,
+    ResourceMask::Bit(ResourceMask::kFPStatus),
+    ResourceMask::Bit(ResourceMask::kCCode),
+};
+// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
+COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
+    kEncodeHeapRef), check_kNoRegMasks_heap_ref_index);
+COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
+    kEncodeLiteral), check_kNoRegMasks_literal_index);
+COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
+    kEncodeDalvikReg), check_kNoRegMasks_dalvik_reg_index);
+COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
+    ResourceMask::Bit(ResourceMask::kFPStatus)), check_kNoRegMasks_fp_status_index);
+COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kCCode].Equals(
+    ResourceMask::Bit(ResourceMask::kCCode)), check_kNoRegMasks_ccode_index);
+
+template <size_t special_bit>
+constexpr ResourceMask OneRegOneSpecial(size_t reg) {
+  return ResourceMask::Bit(reg).Union(ResourceMask::Bit(special_bit));
+}
+
+// NOTE: Working around gcc bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61484 .
+// This should be a two-dimensions array, kSingleRegMasks[][32] and each line should be
+// enclosed in an extra { }. However, gcc issues a bogus "error: array must be initialized
+// with a brace-enclosed initializer" for that, so we flatten this to a one-dimensional array.
+constexpr ResourceMask kSingleRegMasks[] = {
+#define DEFINE_LIST_32(fn) \
+    fn(0), fn(1), fn(2), fn(3), fn(4), fn(5), fn(6), fn(7),           \
+    fn(8), fn(9), fn(10), fn(11), fn(12), fn(13), fn(14), fn(15),     \
+    fn(16), fn(17), fn(18), fn(19), fn(20), fn(21), fn(22), fn(23),   \
+    fn(24), fn(25), fn(26), fn(27), fn(28), fn(29), fn(30), fn(31)
+    // NOTE: Each line is 512B of constant data, 3KiB in total.
+    DEFINE_LIST_32(ResourceMask::Bit),
+    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kHeapRef>),
+    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kLiteral>),
+    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kDalvikReg>),
+    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kFPStatus>),
+    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kCCode>),
+#undef DEFINE_LIST_32
+};
+
+constexpr size_t SingleRegMaskIndex(size_t main_index, size_t sub_index) {
+  return main_index * 32u + sub_index;
+}
+
+// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
+COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
+    OneRegOneSpecial<ResourceMask::kHeapRef>(0)), check_kSingleRegMasks_heap_ref_index);
+COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
+    OneRegOneSpecial<ResourceMask::kLiteral>(0)), check_kSingleRegMasks_literal_index);
+COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
+    OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), check_kSingleRegMasks_dalvik_reg_index);
+COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
+    OneRegOneSpecial<ResourceMask::kFPStatus>(0)), check_kSingleRegMasks_fp_status_index);
+COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
+    OneRegOneSpecial<ResourceMask::kCCode>(0)), check_kSingleRegMasks_ccode_index);
+
+// NOTE: arraysize(kNoRegMasks) multiplied by 32 due to the gcc bug workaround, see above.
+COMPILE_ASSERT(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, check_arraysizes);
+
+constexpr ResourceMask kTwoRegsMasks[] = {
+#define TWO(a, b) ResourceMask::Bit(a).Union(ResourceMask::Bit(b))
+    // NOTE: 16 * 15 / 2 = 120 entries, 16 bytes each, 1920B in total.
+    TWO(0, 1),
+    TWO(0, 2), TWO(1, 2),
+    TWO(0, 3), TWO(1, 3), TWO(2, 3),
+    TWO(0, 4), TWO(1, 4), TWO(2, 4), TWO(3, 4),
+    TWO(0, 5), TWO(1, 5), TWO(2, 5), TWO(3, 5), TWO(4, 5),
+    TWO(0, 6), TWO(1, 6), TWO(2, 6), TWO(3, 6), TWO(4, 6), TWO(5, 6),
+    TWO(0, 7), TWO(1, 7), TWO(2, 7), TWO(3, 7), TWO(4, 7), TWO(5, 7), TWO(6, 7),
+    TWO(0, 8), TWO(1, 8), TWO(2, 8), TWO(3, 8), TWO(4, 8), TWO(5, 8), TWO(6, 8), TWO(7, 8),
+    TWO(0, 9), TWO(1, 9), TWO(2, 9), TWO(3, 9), TWO(4, 9), TWO(5, 9), TWO(6, 9), TWO(7, 9),
+        TWO(8, 9),
+    TWO(0, 10), TWO(1, 10), TWO(2, 10), TWO(3, 10), TWO(4, 10), TWO(5, 10), TWO(6, 10), TWO(7, 10),
+        TWO(8, 10), TWO(9, 10),
+    TWO(0, 11), TWO(1, 11), TWO(2, 11), TWO(3, 11), TWO(4, 11), TWO(5, 11), TWO(6, 11), TWO(7, 11),
+        TWO(8, 11), TWO(9, 11), TWO(10, 11),
+    TWO(0, 12), TWO(1, 12), TWO(2, 12), TWO(3, 12), TWO(4, 12), TWO(5, 12), TWO(6, 12), TWO(7, 12),
+        TWO(8, 12), TWO(9, 12), TWO(10, 12), TWO(11, 12),
+    TWO(0, 13), TWO(1, 13), TWO(2, 13), TWO(3, 13), TWO(4, 13), TWO(5, 13), TWO(6, 13), TWO(7, 13),
+        TWO(8, 13), TWO(9, 13), TWO(10, 13), TWO(11, 13), TWO(12, 13),
+    TWO(0, 14), TWO(1, 14), TWO(2, 14), TWO(3, 14), TWO(4, 14), TWO(5, 14), TWO(6, 14), TWO(7, 14),
+        TWO(8, 14), TWO(9, 14), TWO(10, 14), TWO(11, 14), TWO(12, 14), TWO(13, 14),
+    TWO(0, 15), TWO(1, 15), TWO(2, 15), TWO(3, 15), TWO(4, 15), TWO(5, 15), TWO(6, 15), TWO(7, 15),
+        TWO(8, 15), TWO(9, 15), TWO(10, 15), TWO(11, 15), TWO(12, 15), TWO(13, 15), TWO(14, 15),
+#undef TWO
+};
+COMPILE_ASSERT(arraysize(kTwoRegsMasks) ==  16 * 15 / 2, check_arraysize_kTwoRegsMasks);
+
+constexpr size_t TwoRegsIndex(size_t higher, size_t lower) {
+  return (higher * (higher - 1)) / 2u + lower;
+}
+
+constexpr bool CheckTwoRegsMask(size_t higher, size_t lower) {
+  return ResourceMask::Bit(lower).Union(ResourceMask::Bit(higher)).Equals(
+      kTwoRegsMasks[TwoRegsIndex(higher, lower)]);
+}
+
+constexpr bool CheckTwoRegsMaskLine(size_t line, size_t lower = 0u) {
+  return (lower == line) ||
+      (CheckTwoRegsMask(line, lower) && CheckTwoRegsMaskLine(line, lower + 1u));
+}
+
+constexpr bool CheckTwoRegsMaskTable(size_t lines) {
+  return lines == 0 ||
+      (CheckTwoRegsMaskLine(lines - 1) && CheckTwoRegsMaskTable(lines - 1u));
+}
+
+COMPILE_ASSERT(CheckTwoRegsMaskTable(16), check_two_regs_masks_table);
+
+}  // anonymous namespace
+
+const ResourceMask* ResourceMaskCache::GetMask(const ResourceMask& mask) {
+  // Instead of having a deduplication map, we shall just use pre-defined constexpr
+  // masks for the common cases. At most one of the these special bits is allowed:
+  constexpr ResourceMask kAllowedSpecialBits = ResourceMask::Bit(ResourceMask::kFPStatus)
+      .Union(ResourceMask::Bit(ResourceMask::kCCode))
+      .Union(kEncodeHeapRef).Union(kEncodeLiteral).Union(kEncodeDalvikReg);
+  const ResourceMask* res = nullptr;
+  // Limit to low 32 regs and the kAllowedSpecialBits.
+  if ((mask.masks_[0] >> 32) == 0u && (mask.masks_[1] & ~kAllowedSpecialBits.masks_[1]) == 0u) {
+    // Check if it's only up to two registers.
+    uint32_t low_regs = static_cast<uint32_t>(mask.masks_[0]);
+    uint32_t low_regs_without_lowest = low_regs & (low_regs - 1u);
+    if (low_regs_without_lowest == 0u && IsPowerOfTwo(mask.masks_[1])) {
+      // 0 or 1 register, 0 or 1 bit from kAllowedBits. Use a pre-defined mask.
+      size_t index = (mask.masks_[1] != 0u) ? CLZ(mask.masks_[1]) : 0u;
+      DCHECK_LT(index, arraysize(kNoRegMasks));
+      res = (low_regs != 0) ? &kSingleRegMasks[SingleRegMaskIndex(index, CTZ(low_regs))]
+                            : &kNoRegMasks[index];
+    } else if (IsPowerOfTwo(low_regs_without_lowest) && mask.masks_[1] == 0u) {
+      // 2 registers and no other flags. Use predefined mask if higher reg is < 16.
+      if (low_regs_without_lowest < (1u << 16)) {
+        res = &kTwoRegsMasks[TwoRegsIndex(CTZ(low_regs_without_lowest), CTZ(low_regs))];
+      }
+    }
+  } else if (mask.Equals(kEncodeAll)) {
+    res = &kEncodeAll;
+  }
+  if (res != nullptr) {
+    DCHECK(res->Equals(mask))
+        << "(" << std::hex << std::setw(16) << mask.masks_[0]
+        << ", "<< std::hex << std::setw(16) << mask.masks_[1]
+        << ") != (" << std::hex << std::setw(16) << res->masks_[0]
+        << ", "<< std::hex << std::setw(16) << res->masks_[1] << ")";
+    return res;
+  }
+
+  // TODO: Deduplicate. (At least the most common masks.)
+  void* mem = allocator_->Alloc(sizeof(ResourceMask), kArenaAllocLIRResourceMask);
+  return new (mem) ResourceMask(mask);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
new file mode 100644
index 0000000..12ce98a
--- /dev/null
+++ b/compiler/dex/quick/resource_mask.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
+#define ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "dex/reg_storage.h"
+
+namespace art {
+
+class ArenaAllocator;
+
+/**
+ * @brief Resource mask for LIR insn uses or defs.
+ * @detail Def/Use mask used for checking dependencies between LIR insns in local
+ * optimizations such as load hoisting.
+ */
+class ResourceMask {
+ private:
+  constexpr ResourceMask(uint64_t mask1, uint64_t mask2)
+      : masks_{ mask1, mask2 } {  // NOLINT
+  }
+
+ public:
+  /*
+   * Def/Use encoding in 128-bit use_mask/def_mask.  Low positions used for target-specific
+   * registers (and typically use the register number as the position).  High positions
+   * reserved for common and abstract resources.
+   */
+  enum ResourceBit {
+    kMustNotAlias = 127,
+    kHeapRef = 126,         // Default memory reference type.
+    kLiteral = 125,         // Literal pool memory reference.
+    kDalvikReg = 124,       // Dalvik v_reg memory reference.
+    kFPStatus = 123,
+    kCCode = 122,
+    kLowestCommonResource = kCCode,
+    kHighestCommonResource = kMustNotAlias
+  };
+
+  // Default-constructible.
+  constexpr ResourceMask()
+    : masks_ { 0u, 0u } {
+  }
+
+  // Copy-constructible and copyable.
+  ResourceMask(const ResourceMask& other) = default;
+  ResourceMask& operator=(const ResourceMask& other) = default;
+
+  static constexpr ResourceMask RawMask(uint64_t mask1, uint64_t mask2) {
+    return ResourceMask(mask1, mask2);
+  }
+
+  static constexpr ResourceMask Bit(size_t bit) {
+    return ResourceMask(bit >= 64u ? 0u : UINT64_C(1) << bit,
+                        bit >= 64u ? UINT64_C(1) << (bit - 64u) : 0u);
+  }
+
+  // Two consecutive bits. The start_bit must be even.
+  static constexpr ResourceMask TwoBits(size_t start_bit) {
+    return
+        DCHECK_CONSTEXPR((start_bit & 1u) == 0u, << start_bit << " isn't even", Bit(0))
+        ResourceMask(start_bit >= 64u ? 0u : UINT64_C(3) << start_bit,
+                     start_bit >= 64u ? UINT64_C(3) << (start_bit - 64u) : 0u);
+  }
+
+  static constexpr ResourceMask NoBits() {
+    return ResourceMask(UINT64_C(0), UINT64_C(0));
+  }
+
+  static constexpr ResourceMask AllBits() {
+    return ResourceMask(~UINT64_C(0), ~UINT64_C(0));
+  }
+
+  constexpr ResourceMask Union(const ResourceMask& other) const {
+    return ResourceMask(masks_[0] | other.masks_[0], masks_[1] | other.masks_[1]);
+  }
+
+  constexpr ResourceMask Intersection(const ResourceMask& other) const {
+    return ResourceMask(masks_[0] & other.masks_[0], masks_[1] & other.masks_[1]);
+  }
+
+  constexpr ResourceMask Without(const ResourceMask& other) const {
+    return ResourceMask(masks_[0] & ~other.masks_[0], masks_[1] & ~other.masks_[1]);
+  }
+
+  constexpr bool Equals(const ResourceMask& other) const {
+    return masks_[0] == other.masks_[0] && masks_[1] == other.masks_[1];
+  }
+
+  constexpr bool Intersects(const ResourceMask& other) const {
+    return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
+  }
+
+  void SetBit(size_t bit) {
+    DCHECK_LE(bit, kHighestCommonResource);
+    masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+  }
+
+  constexpr bool HasBit(size_t bit) const {
+    return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
+  }
+
+  ResourceMask& SetBits(const ResourceMask& other) {
+    masks_[0] |= other.masks_[0];
+    masks_[1] |= other.masks_[1];
+    return *this;
+  }
+
+  ResourceMask& ClearBits(const ResourceMask& other) {
+    masks_[0] &= ~other.masks_[0];
+    masks_[1] &= ~other.masks_[1];
+    return *this;
+  }
+
+ private:
+  uint64_t masks_[2];
+
+  friend class ResourceMaskCache;
+};
+
+constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
+constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
+constexpr ResourceMask kEncodeHeapRef = ResourceMask::Bit(ResourceMask::kHeapRef);
+constexpr ResourceMask kEncodeLiteral = ResourceMask::Bit(ResourceMask::kLiteral);
+constexpr ResourceMask kEncodeDalvikReg = ResourceMask::Bit(ResourceMask::kDalvikReg);
+constexpr ResourceMask kEncodeMem = kEncodeLiteral.Union(kEncodeDalvikReg).Union(
+    kEncodeHeapRef).Union(ResourceMask::Bit(ResourceMask::kMustNotAlias));
+
+class ResourceMaskCache {
+ public:
+  explicit ResourceMaskCache(ArenaAllocator* allocator)
+      : allocator_(allocator) {
+  }
+
+  const ResourceMask* GetMask(const ResourceMask& mask);
+
+ private:
+  ArenaAllocator* allocator_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 0a8193a..d37ee67 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1541,7 +1541,9 @@
                   << " delta: " << delta << " old delta: " << lir->operands[0];
             }
             lir->opcode = kX86Jcc32;
-            SetupResourceMasks(lir);
+            lir->flags.size = GetInsnSize(lir);
+            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
+            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
             res = kRetryAll;
           }
           if (kVerbosePcFixup) {
@@ -1605,7 +1607,9 @@
               LOG(INFO) << "Retry for JMP growth at " << lir->offset;
             }
             lir->opcode = kX86Jmp32;
-            SetupResourceMasks(lir);
+            lir->flags.size = GetInsnSize(lir);
+            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
+            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
             res = kRetryAll;
           }
           lir->operands[0] = delta;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 61c9f4f..6ae553d 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -99,7 +99,7 @@
   RegLocation LocCReturnDouble();
   RegLocation LocCReturnFloat();
   RegLocation LocCReturnWide();
-  uint64_t GetRegMaskCommon(RegStorage reg);
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
   void AdjustSpillMask();
   void ClobberCallerSave();
   void FreeCallTemps();
@@ -113,12 +113,13 @@
   int AssignInsnOffsets();
   void AssignOffsets();
   AssemblerStatus AssembleInstructions(CodeOffset start_addr);
-  void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-  void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
   const char* GetTargetInstFmt(int opcode);
   const char* GetTargetInstName(int opcode);
   std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-  uint64_t GetPCUseDefEncoding();
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
   uint64_t GetTargetInstFlags(int opcode);
   int GetInsnSize(LIR* lir);
   bool IsUnconditionalBranch(LIR* lir);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index c3580f7..ced6400 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -147,6 +147,9 @@
   // Update the in-register state of source.
   rl_src = UpdateLocWide(rl_src);
 
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   // If the source is in physical register, then put it in its location on stack.
   if (rl_src.location == kLocPhysReg) {
     RegisterInfo* reg_info = GetRegInfo(rl_src.reg);
@@ -191,15 +194,12 @@
      * right class. So we call EvalLoc(Wide) first which will ensure that it will get moved to the
      * correct register class.
      */
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
     if (is_double) {
-      rl_result = EvalLocWide(rl_dest, kFPReg, true);
-
       LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
 
       StoreFinalValueWide(rl_dest, rl_result);
     } else {
-      rl_result = EvalLoc(rl_dest, kFPReg, true);
-
       Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
 
       StoreFinalValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a050a05..4a77df2 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -794,34 +794,61 @@
     RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_rBX, rs_rCX);
     LoadValueDirectWideFixed(rl_src_expected, r_tmp1);
     LoadValueDirectWideFixed(rl_src_new_value, r_tmp2);
-    NewLIR1(kX86Push32R, rs_rDI.GetReg());
-    MarkTemp(rs_rDI);
-    LockTemp(rs_rDI);
-    NewLIR1(kX86Push32R, rs_rSI.GetReg());
-    MarkTemp(rs_rSI);
-    LockTemp(rs_rSI);
-    const int push_offset = 4 /* push edi */ + 4 /* push esi */;
-    int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0
-                : (IsInReg(this, rl_src_obj, rs_rDI) ? 4
-                : (SRegOffset(rl_src_obj.s_reg_low) + push_offset));
     // FIXME: needs 64-bit update.
-    LoadWordDisp(TargetReg(kSp), srcObjSp, rs_rDI);
-    int srcOffsetSp = IsInReg(this, rl_src_offset, rs_rSI) ? 0
-                   : (IsInReg(this, rl_src_offset, rs_rDI) ? 4
-                   : (SRegOffset(rl_src_offset.s_reg_low) + push_offset));
-    LoadWordDisp(TargetReg(kSp), srcOffsetSp, rs_rSI);
-    NewLIR4(kX86LockCmpxchg64A, rs_rDI.GetReg(), rs_rSI.GetReg(), 0, 0);
+    const bool obj_in_di = IsInReg(this, rl_src_obj, rs_rDI);
+    const bool obj_in_si = IsInReg(this, rl_src_obj, rs_rSI);
+    DCHECK(!obj_in_si || !obj_in_di);
+    const bool off_in_di = IsInReg(this, rl_src_offset, rs_rDI);
+    const bool off_in_si = IsInReg(this, rl_src_offset, rs_rSI);
+    DCHECK(!off_in_si || !off_in_di);
+    // If obj/offset is in a reg, use that reg. Otherwise, use the empty reg.
+    RegStorage rs_obj = obj_in_di ? rs_rDI : obj_in_si ? rs_rSI : !off_in_di ? rs_rDI : rs_rSI;
+    RegStorage rs_off = off_in_si ? rs_rSI : off_in_di ? rs_rDI : !obj_in_si ? rs_rSI : rs_rDI;
+    bool push_di = (!obj_in_di && !off_in_di) && (rs_obj == rs_rDI || rs_off == rs_rDI);
+    bool push_si = (!obj_in_si && !off_in_si) && (rs_obj == rs_rSI || rs_off == rs_rSI);
+    if (push_di) {
+      NewLIR1(kX86Push32R, rs_rDI.GetReg());
+      MarkTemp(rs_rDI);
+      LockTemp(rs_rDI);
+    }
+    if (push_si) {
+      NewLIR1(kX86Push32R, rs_rSI.GetReg());
+      MarkTemp(rs_rSI);
+      LockTemp(rs_rSI);
+    }
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+    const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
+    if (!obj_in_si && !obj_in_di) {
+      LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
+      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
+    }
+    if (!off_in_si && !off_in_di) {
+      LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
+      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
+    }
+    NewLIR4(kX86LockCmpxchg64A, rs_obj.GetReg(), rs_off.GetReg(), 0, 0);
 
     // After a store we need to insert barrier in case of potential load. Since the
     // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
     GenMemBarrier(kStoreLoad);
 
-    FreeTemp(rs_rSI);
-    UnmarkTemp(rs_rSI);
-    NewLIR1(kX86Pop32R, rs_rSI.GetReg());
-    FreeTemp(rs_rDI);
-    UnmarkTemp(rs_rDI);
-    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+
+    if (push_si) {
+      FreeTemp(rs_rSI);
+      UnmarkTemp(rs_rSI);
+      NewLIR1(kX86Pop32R, rs_rSI.GetReg());
+    }
+    if (push_di) {
+      FreeTemp(rs_rDI);
+      UnmarkTemp(rs_rDI);
+      NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+    }
     FreeCallTemps();
   } else {
     // EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
@@ -885,11 +912,11 @@
   // We don't know the proper offset for the value, so pick one that will force
   // 4 byte offset.  We will fix this up in the assembler later to have the right
   // value.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256,
                     0, 0, target);
   res->target = target;
   res->flags.fixup = kFixupLoad;
-  SetMemRefType(res, true, kLiteral);
   store_method_addr_used_ = true;
   return res;
 }
@@ -1077,6 +1104,9 @@
 }
 
 void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   LIR *m;
   switch (val) {
     case 0:
@@ -1095,6 +1125,9 @@
 
 void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2) {
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   if (Gen64Bit()) {
     if (rl_src1.is_const) {
       std::swap(rl_src1, rl_src2);
@@ -1346,6 +1379,7 @@
   int r_base = TargetReg(kSp).GetReg();
   int displacement = SRegOffset(rl_src.s_reg_low);
 
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   LIR *lir = NewLIR3(x86op, Gen64Bit() ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET);
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
                           true /* is_load */, true /* is64bit */);
@@ -1379,6 +1413,7 @@
   int r_base = TargetReg(kSp).GetReg();
   int displacement = SRegOffset(rl_dest.s_reg_low);
 
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET,
                      Gen64Bit() ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg());
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
@@ -2061,6 +2096,7 @@
       int r_base = TargetReg(kSp).GetReg();
       int displacement = SRegOffset(rl_dest.s_reg_low);
 
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val);
       LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val);
       AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
@@ -2091,6 +2127,7 @@
     int r_base = TargetReg(kSp).GetReg();
     int displacement = SRegOffset(rl_dest.s_reg_low);
 
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     if (!IsNoOp(op, val_lo)) {
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
       LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo);
@@ -2469,6 +2506,9 @@
     return;
   }
 
+  // If we generate any memory access below, it will reference a dalvik reg.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   if (unary) {
     rl_lhs = LoadValue(rl_lhs, kCoreReg);
     rl_result = UpdateLocTyped(rl_dest, kCoreReg);
@@ -2620,6 +2660,7 @@
     NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   } else {
     int displacement = SRegOffset(rl_src.s_reg_low);
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(),
                      displacement + LOWORD_OFFSET);
     AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -2670,6 +2711,7 @@
     rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
     if (rl_result.location != kLocPhysReg) {
       // Okay, we can do this into memory
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       OpMemReg(op, rl_result, t_reg.GetReg());
     } else if (!rl_result.reg.IsFloat()) {
       // Can do this directly into the result register
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index ec165af..d1ba239 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -206,77 +206,70 @@
 /*
  * Decode the register id.
  */
-uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
-  uint64_t seed;
-  int shift;
-  int reg_id;
-
-  reg_id = reg.GetRegNum();
-  /* Double registers in x86 are just a single FP register */
-  seed = 1;
-  /* FP register starts at bit position 16 */
-  shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0;
-  /* Expand the double register id into single offset */
-  shift += reg_id;
-  return (seed << shift);
+ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+  /* Double registers in x86 are just a single FP register. This is always just a single bit. */
+  return ResourceMask::Bit(
+      /* FP register starts at bit position 16 */
+      ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
 }
 
-uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
+ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
   /*
    * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
    * able to clean up some of the x86/Arm_Mips differences
    */
   LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
-  return 0ULL;
+  return kEncodeNone;
 }
 
-void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
+void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                          ResourceMask* use_mask, ResourceMask* def_mask) {
   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
   DCHECK(!lir->flags.use_def_invalid);
 
   // X86-specific resource map setup here.
   if (flags & REG_USE_SP) {
-    lir->u.m.use_mask |= ENCODE_X86_REG_SP;
+    use_mask->SetBit(kX86RegSP);
   }
 
   if (flags & REG_DEF_SP) {
-    lir->u.m.def_mask |= ENCODE_X86_REG_SP;
+    def_mask->SetBit(kX86RegSP);
   }
 
   if (flags & REG_DEFA) {
-    SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
+    SetupRegMask(def_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_DEFD) {
-    SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
+    SetupRegMask(def_mask, rs_rDX.GetReg());
   }
   if (flags & REG_USEA) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
+    SetupRegMask(use_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_USEC) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
+    SetupRegMask(use_mask, rs_rCX.GetReg());
   }
 
   if (flags & REG_USED) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
+    SetupRegMask(use_mask, rs_rDX.GetReg());
   }
 
   if (flags & REG_USEB) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
+    SetupRegMask(use_mask, rs_rBX.GetReg());
   }
 
   // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
   if (lir->opcode == kX86RepneScasw) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
-    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
-    SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
-    SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
+    SetupRegMask(use_mask, rs_rAX.GetReg());
+    SetupRegMask(use_mask, rs_rCX.GetReg());
+    SetupRegMask(use_mask, rs_rDI.GetReg());
+    SetupRegMask(def_mask, rs_rDI.GetReg());
   }
 
   if (flags & USE_FP_STACK) {
-    lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
-    lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
+    use_mask->SetBit(kX86FPStack);
+    def_mask->SetBit(kX86FPStack);
   }
 }
 
@@ -368,40 +361,40 @@
   return buf;
 }
 
-void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
+void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
   char buf[256];
   buf[0] = 0;
 
-  if (mask == ENCODE_ALL) {
+  if (mask.Equals(kEncodeAll)) {
     strcpy(buf, "all");
   } else {
     char num[8];
     int i;
 
     for (i = 0; i < kX86RegEnd; i++) {
-      if (mask & (1ULL << i)) {
+      if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
       }
     }
 
-    if (mask & ENCODE_CCODE) {
+    if (mask.HasBit(ResourceMask::kCCode)) {
       strcat(buf, "cc ");
     }
     /* Memory bits */
-    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+    if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
                (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
     }
-    if (mask & ENCODE_LITERAL) {
+    if (mask.HasBit(ResourceMask::kLiteral)) {
       strcat(buf, "lit ");
     }
 
-    if (mask & ENCODE_HEAP_REF) {
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
       strcat(buf, "heap ");
     }
-    if (mask & ENCODE_MUST_NOT_ALIAS) {
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
       strcat(buf, "noalias ");
     }
   }
@@ -551,7 +544,7 @@
   } else {
     // Mark as a scheduling barrier.
     DCHECK(!mem_barrier->flags.use_def_invalid);
-    mem_barrier->u.m.def_mask = ENCODE_ALL;
+    mem_barrier->u.m.def_mask = &kEncodeAll;
   }
   return ret;
 #else
@@ -822,6 +815,7 @@
     int r_base = TargetReg(kSp).GetReg();
     int displacement = SRegOffset(rl_dest.s_reg_low);
 
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
     AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
                               false /* is_load */, true /* is64bit */);
@@ -1109,7 +1103,10 @@
       } else {
         // Load the start index from stack, remembering that we pushed EDI.
         int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
-        Load32Disp(rs_rX86_SP, displacement, rs_rBX);
+        {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+          Load32Disp(rs_rX86_SP, displacement, rs_rBX);
+        }
         OpRegReg(kOpXor, rs_rDI, rs_rDI);
         OpRegReg(kOpCmp, rs_rBX, rs_rDI);
         OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
@@ -1413,10 +1410,10 @@
   // We don't know the proper offset for the value, so pick one that will force
   // 4 byte offset.  We will fix this up in the assembler later to have the right
   // value.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(),  256 /* bogus */);
   load->flags.fixup = kFixupLoad;
   load->target = data_target;
-  SetMemRefType(load, true, kLiteral);
 }
 
 void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
@@ -1856,6 +1853,7 @@
    * end up half-promoted.  In those cases, we must flush the promoted
    * half to memory as well.
    */
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   for (int i = 0; i < cu_->num_ins; i++) {
     PromotionMap* v_map = &promotion_map_[start_vreg + i];
     RegStorage reg = RegStorage::InvalidReg();
@@ -1986,12 +1984,14 @@
       if (loc.wide) {
         loc = UpdateLocWide(loc);
         if (loc.location == kLocPhysReg) {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
           StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
         }
         next_arg += 2;
       } else {
         loc = UpdateLoc(loc);
         if (loc.location == kLocPhysReg) {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
           StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
         }
         next_arg++;
@@ -2008,6 +2008,8 @@
     int current_src_offset = start_offset;
     int current_dest_offset = outs_offset;
 
+    // Only davik regs are accessed in this loop; no next_call_insn() calls.
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     while (regs_left_to_pass_via_stack > 0) {
       // This is based on the knowledge that the stack itself is 16-byte aligned.
       bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
@@ -2045,6 +2047,7 @@
         bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
         bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
 
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         if (src_is_16b_aligned) {
           ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
         } else if (src_is_8b_aligned) {
@@ -2074,8 +2077,7 @@
             AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
           } else {
             // Set barrier for 128-bit load.
-            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
-            ld1->u.m.def_mask = ENCODE_ALL;
+            ld1->u.m.def_mask = &kEncodeAll;
           }
         }
         if (st1 != nullptr) {
@@ -2085,8 +2087,7 @@
             AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
           } else {
             // Set barrier for 128-bit store.
-            SetMemRefType(st1, false /* is_load */, kDalvikReg);
-            st1->u.m.def_mask = ENCODE_ALL;
+            st1->u.m.def_mask = &kEncodeAll;
           }
         }
 
@@ -2123,20 +2124,23 @@
       if (!reg.Valid()) {
         int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
 
-        if (rl_arg.wide) {
-          if (rl_arg.location == kLocPhysReg) {
-            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+        {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+          if (rl_arg.wide) {
+            if (rl_arg.location == kLocPhysReg) {
+              StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+            } else {
+              LoadValueDirectWideFixed(rl_arg, regWide);
+              StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
+            }
+            i++;
           } else {
-            LoadValueDirectWideFixed(rl_arg, regWide);
-            StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
-          }
-          i++;
-        } else {
-          if (rl_arg.location == kLocPhysReg) {
-            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
-          } else {
-            LoadValueDirectFixed(rl_arg, regSingle);
-            StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+            if (rl_arg.location == kLocPhysReg) {
+              StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
+            } else {
+              LoadValueDirectFixed(rl_arg, regSingle);
+              StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+            }
           }
         }
         call_state = next_call_insn(cu_, info, call_state, target_method,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index d074d81..c72e8cd 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -376,7 +376,8 @@
       break;
   }
   LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
-  if (r_base == rs_rX86_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rX86_SP);
     AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
   }
   return l;
@@ -403,8 +404,10 @@
       break;
   }
   LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
-  AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
-  AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+    AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
+  }
   return l;
 }
 
@@ -427,7 +430,9 @@
       break;
   }
   LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
-  AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+  }
   return l;
 }
 
@@ -575,11 +580,11 @@
         // We don't know the proper offset for the value, so pick one that will force
         // 4 byte offset.  We will fix this up in the assembler later to have the right
         // value.
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
         res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
                            kDouble);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
-        SetMemRefType(res, true, kLiteral);
         store_method_addr_used_ = true;
       } else {
         if (val_lo == 0) {
@@ -684,7 +689,8 @@
                         displacement + HIWORD_OFFSET);
       }
     }
-    if (r_base == rs_rX86_SP) {
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK(r_base == rs_rX86_SP);
       AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               true /* is_load */, is64bit);
       if (pair) {
@@ -815,7 +821,8 @@
       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
       store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
     }
-    if (r_base == rs_rX86_SP) {
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK(r_base == rs_rX86_SP);
       AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               false /* is_load */, is64bit);
       if (pair) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 5022529..f1b5811 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -111,9 +111,6 @@
   kX86RegEnd   = kX86FPStack,
 };
 
-#define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
-#define ENCODE_X86_FP_STACK         (1ULL << kX86FPStack)
-
 // FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
 enum X86NativeRegisterPool {
   r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 925d4a2..da49524 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -32,10 +32,11 @@
 constexpr size_t Arena::kDefaultSize;
 
 template <bool kCount>
-const char* ArenaAllocatorStatsImpl<kCount>::kAllocNames[kNumArenaAllocKinds] = {
+const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
   "Misc       ",
   "BasicBlock ",
   "LIR        ",
+  "LIR masks  ",
   "MIR        ",
   "DataFlow   ",
   "GrowList   ",
@@ -101,6 +102,7 @@
        << num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
   }
   os << "===== Allocation by kind\n";
+  COMPILE_ASSERT(arraysize(kAllocNames) == kNumArenaAllocKinds, check_arraysize_kAllocNames);
   for (int i = 0; i < kNumArenaAllocKinds; i++) {
       os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
   }
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index ac3938f..f4bcb1d 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -41,6 +41,7 @@
   kArenaAllocMisc,
   kArenaAllocBB,
   kArenaAllocLIR,
+  kArenaAllocLIRResourceMask,
   kArenaAllocMIR,
   kArenaAllocDFInfo,
   kArenaAllocGrowableArray,
@@ -92,7 +93,7 @@
   // TODO: Use std::array<size_t, kNumArenaAllocKinds> from C++11 when we upgrade the STL.
   size_t alloc_stats_[kNumArenaAllocKinds];  // Bytes used by various allocation kinds.
 
-  static const char* kAllocNames[kNumArenaAllocKinds];
+  static const char* const kAllocNames[];
 };
 
 typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;