Merge "Do not visit null object from transaction logs."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 791e954..3295d86 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -17,6 +17,7 @@
 LOCAL_PATH := art
 
 TEST_COMMON_SRC_FILES := \
+	compiler/dex/local_value_numbering_test.cc \
 	compiler/driver/compiler_driver_test.cc \
 	compiler/elf_writer_test.cc \
 	compiler/image_test.cc \
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 9e83210..a3ea034 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,8 +16,120 @@
 
 #include "local_value_numbering.h"
 
+#include "mir_graph.h"
+
 namespace art {
 
+uint16_t LocalValueNumbering::GetFieldId(const DexFile* dex_file, uint16_t field_idx) {
+  FieldReference key = { dex_file, field_idx };
+  auto it = field_index_map_.find(key);
+  if (it != field_index_map_.end()) {
+    return it->second;
+  }
+  uint16_t id = field_index_map_.size();
+  field_index_map_.Put(key, id);
+  return id;
+}
+
+void LocalValueNumbering::AdvanceGlobalMemory() {
+  // See AdvanceMemoryVersion() for explanation.
+  global_memory_version_ = next_memory_version_;
+  ++next_memory_version_;
+}
+
+uint16_t LocalValueNumbering::GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
+  // See AdvanceMemoryVersion() for explanation.
+  MemoryVersionKey key = { base, field, type };
+  MemoryVersionMap::iterator it = memory_version_map_.find(key);
+  uint16_t memory_version = (it != memory_version_map_.end()) ? it->second : 0u;
+  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
+    // Check modifications by potentially aliased access.
+    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
+    auto aa_it = memory_version_map_.find(aliased_access_key);
+    if (aa_it != memory_version_map_.end() && aa_it->second > memory_version) {
+      memory_version = aa_it->second;
+    }
+    memory_version = std::max(memory_version, global_memory_version_);
+  } else if (base != NO_VALUE) {
+    // Ignore global_memory_version_ for access via unique references.
+  } else {
+    memory_version = std::max(memory_version, global_memory_version_);
+  }
+  return memory_version;
+};
+
+uint16_t LocalValueNumbering::AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
+  // When we read the same value from memory, we want to assign the same value name to it.
+  // However, we need to be careful not to assign the same value name if the memory location
+  // may have been written to between the reads. To avoid that we do "memory versioning".
+  //
+  // For each write to a memory location (instance field, static field, array element) we assign
+  // a new memory version number to the location identified by the value name of the base register,
+  // the field id and type, or "{ base, field, type }". For static fields the "base" is NO_VALUE
+  // since they are not accessed via a reference. For arrays the "field" is NO_VALUE since they
+  // don't have a field id.
+  //
+  // To account for the possibility of aliased access to the same memory location via different
+  // "base", we also store the memory version number with the key "{ NO_VALUE, field, type }"
+  // if "base" is an aliasing reference and check it in GetMemoryVersion() on reads via
+  // aliasing references. A global memory version is set for method calls as a method can
+  // potentially write to any memory location accessed via an aliasing reference.
+
+  uint16_t result = next_memory_version_;
+  ++next_memory_version_;
+  MemoryVersionKey key = { base, field, type };
+  memory_version_map_.Overwrite(key, result);
+  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
+    // Advance memory version for aliased access.
+    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
+    memory_version_map_.Overwrite(aliased_access_key, result);
+  }
+  return result;
+};
+
+uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
+  uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
+  SetOperandValue(mir->ssa_rep->defs[0], res);
+  DCHECK(null_checked_.find(res) == null_checked_.end());
+  null_checked_.insert(res);
+  non_aliasing_refs_.insert(res);
+  return res;
+}
+
+void LocalValueNumbering::MakeArgsAliasing(MIR* mir) {
+  for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+    uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+    non_aliasing_refs_.erase(reg);
+  }
+}
+
+void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
+  if (null_checked_.find(reg) != null_checked_.end()) {
+    if (cu_->verbose) {
+      LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+    }
+    mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+  } else {
+    null_checked_.insert(reg);
+  }
+}
+
+void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
+  if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
+    if (cu_->verbose) {
+      LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+    }
+    mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+  }
+  // Use side effect to note range check completed.
+  (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+}
+
+void LocalValueNumbering::HandlePutObject(MIR* mir) {
+  // If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
+  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+  non_aliasing_refs_.erase(base);
+}
 
 uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
   uint16_t res = NO_VALUE;
@@ -36,8 +148,6 @@
     case Instruction::CHECK_CAST:
     case Instruction::THROW:
     case Instruction::FILL_ARRAY_DATA:
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
     case Instruction::PACKED_SWITCH:
     case Instruction::SPARSE_SWITCH:
     case Instruction::IF_EQ:
@@ -52,16 +162,6 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ:
-    case Instruction::INVOKE_STATIC_RANGE:
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE:
     case kMirOpFusedCmplFloat:
     case kMirOpFusedCmpgFloat:
     case kMirOpFusedCmplDouble:
@@ -70,25 +170,55 @@
       // Nothing defined - take no action.
       break;
 
-    case Instruction::MOVE_EXCEPTION:
+    case Instruction::FILLED_NEW_ARRAY:
+    case Instruction::FILLED_NEW_ARRAY_RANGE:
+      // Nothing defined but the result will be unique and non-null.
+      if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+        MarkNonAliasingNonNull(mir->next);
+        // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
+      }
+      MakeArgsAliasing(mir);
+      break;
+
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+    case Instruction::INVOKE_SUPER:
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE: {
+        // Nothing defined but handle the null check.
+        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
+        HandleNullCheck(mir, reg);
+      }
+      // Intentional fall-through.
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
+      AdvanceGlobalMemory();
+      MakeArgsAliasing(mir);
+      break;
+
     case Instruction::MOVE_RESULT:
     case Instruction::MOVE_RESULT_OBJECT:
     case Instruction::INSTANCE_OF:
+      // 1 result, treat as unique each time, use result s_reg - will be unique.
+      res = GetOperandValue(mir->ssa_rep->defs[0]);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
+      break;
+    case Instruction::MOVE_EXCEPTION:
     case Instruction::NEW_INSTANCE:
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
     case Instruction::CONST_CLASS:
-    case Instruction::NEW_ARRAY: {
-        // 1 result, treat as unique each time, use result s_reg - will be unique.
-        uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::NEW_ARRAY:
+      // 1 result, treat as unique each time, use result s_reg - will be unique.
+      res = MarkNonAliasingNonNull(mir);
       break;
-    case Instruction::MOVE_RESULT_WIDE: {
-        // 1 wide result, treat as unique each time, use result s_reg - will be unique.
-        uint16_t res = GetOperandValueWide(mir->ssa_rep->defs[0]);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::MOVE_RESULT_WIDE:
+      // 1 wide result, treat as unique each time, use result s_reg - will be unique.
+      res = GetOperandValueWide(mir->ssa_rep->defs[0]);
+      SetOperandValueWide(mir->ssa_rep->defs[0], res);
       break;
 
     case kMirOpPhi:
@@ -104,35 +234,31 @@
     case Instruction::MOVE_OBJECT_16:
     case Instruction::MOVE_FROM16:
     case Instruction::MOVE_OBJECT_FROM16:
-    case kMirOpCopy: {
-        // Just copy value number of source to value number of resulit.
-        uint16_t res = GetOperandValue(mir->ssa_rep->uses[0]);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
+    case kMirOpCopy:
+      // Just copy value number of source to value number of result.
+      res = GetOperandValue(mir->ssa_rep->uses[0]);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
     case Instruction::MOVE_WIDE:
     case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_WIDE_FROM16: {
-        // Just copy value number of source to value number of result.
-        uint16_t res = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::MOVE_WIDE_FROM16:
+      // Just copy value number of source to value number of result.
+      res = GetOperandValueWide(mir->ssa_rep->uses[0]);
+      SetOperandValueWide(mir->ssa_rep->defs[0], res);
       break;
 
     case Instruction::CONST:
     case Instruction::CONST_4:
-    case Instruction::CONST_16: {
-        uint16_t res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                                   High16Bits(mir->dalvikInsn.vB >> 16), 0);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::CONST_16:
+      res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+                        High16Bits(mir->dalvikInsn.vB >> 16), 0);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
-    case Instruction::CONST_HIGH16: {
-        uint16_t res = LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::CONST_HIGH16:
+      res = LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
     case Instruction::CONST_WIDE_16:
@@ -145,8 +271,8 @@
         } else {
           high_res = LookupValue(Instruction::CONST, 0, 0, 2);
         }
-        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
+        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
 
@@ -157,7 +283,7 @@
                                        High16Bits(low_word), 1);
         uint16_t high_res = LookupValue(Instruction::CONST, Low16Bits(high_word),
                                        High16Bits(high_word), 2);
-        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -165,7 +291,7 @@
     case Instruction::CONST_WIDE_HIGH16: {
         uint16_t low_res = LookupValue(Instruction::CONST, 0, 0, 1);
         uint16_t high_res = LookupValue(Instruction::CONST, 0, Low16Bits(mir->dalvikInsn.vB), 2);
-        uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+        res = LookupValue(Instruction::CONST, low_res, high_res, 3);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -181,7 +307,7 @@
     case Instruction::FLOAT_TO_INT: {
         // res = op + 1 operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -192,7 +318,7 @@
     case Instruction::DOUBLE_TO_INT: {
         // res = op + 1 wide operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -205,7 +331,7 @@
     case Instruction::NEG_DOUBLE: {
         // wide res = op + 1 wide operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -216,7 +342,7 @@
     case Instruction::INT_TO_LONG: {
         // wide res = op + 1 operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -227,7 +353,7 @@
         // res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -259,7 +385,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -293,7 +419,7 @@
         // wide res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -307,7 +433,7 @@
         // wide res = op + 1 wide operand + 1 operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -325,7 +451,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -352,38 +478,25 @@
         // Same as res = op + 2 operands, except use vB as operand 2
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vB, 0, 0);
-        uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
 
-    case Instruction::AGET_WIDE:
-    case Instruction::AGET:
     case Instruction::AGET_OBJECT:
+    case Instruction::AGET:
+    case Instruction::AGET_WIDE:
     case Instruction::AGET_BOOLEAN:
     case Instruction::AGET_BYTE:
     case Instruction::AGET_CHAR:
     case Instruction::AGET_SHORT: {
+        uint16_t type = opcode - Instruction::AGET;
         uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
-        if (null_checked_.find(array) != null_checked_.end()) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-        } else {
-          null_checked_.insert(array);
-        }
+        HandleNullCheck(mir, array);
         uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
-        if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
-        }
-        // Use side effect to note range check completed.
-        (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+        HandleRangeCheck(mir, array, index);
         // Establish value number for loaded register. Note use of memory version.
-        uint16_t memory_version = GetMemoryVersion(array, NO_VALUE);
+        uint16_t memory_version = GetMemoryVersion(array, NO_VALUE, type);
         uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
         if (opcode == Instruction::AGET_WIDE) {
           SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -393,116 +506,113 @@
       }
       break;
 
-    case Instruction::APUT_WIDE:
-    case Instruction::APUT:
     case Instruction::APUT_OBJECT:
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
+      HandlePutObject(mir);
+      // Intentional fall-through.
+    case Instruction::APUT:
+    case Instruction::APUT_WIDE:
     case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN: {
+    case Instruction::APUT_BOOLEAN:
+    case Instruction::APUT_SHORT:
+    case Instruction::APUT_CHAR: {
+        uint16_t type = opcode - Instruction::APUT;
         int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
         int index_idx = array_idx + 1;
         uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
-        if (null_checked_.find(array) != null_checked_.end()) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-        } else {
-          null_checked_.insert(array);
-        }
+        HandleNullCheck(mir, array);
         uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
-        if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
-        }
-        // Use side effect to note range check completed.
-        (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+        HandleRangeCheck(mir, array, index);
         // Rev the memory version
-        AdvanceMemoryVersion(array, NO_VALUE);
+        AdvanceMemoryVersion(array, NO_VALUE, type);
       }
       break;
 
     case Instruction::IGET_OBJECT:
-    case Instruction::IGET_WIDE:
     case Instruction::IGET:
-    case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT:
+    case Instruction::IGET_WIDE:
     case Instruction::IGET_BOOLEAN:
-    case Instruction::IGET_BYTE: {
+    case Instruction::IGET_BYTE:
+    case Instruction::IGET_CHAR:
+    case Instruction::IGET_SHORT: {
         uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-        if (null_checked_.find(base) != null_checked_.end()) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-        } else {
-          null_checked_.insert(base);
-        }
-        uint16_t field_ref = mir->dalvikInsn.vC;
-        uint16_t memory_version = GetMemoryVersion(base, field_ref);
+        HandleNullCheck(mir, base);
+        uint16_t memory_version;
+        uint16_t field_id;
+        // TODO: all gets treated as volatile.
+        // Volatile fields always get a new memory version; field id is irrelevant.
+        // Unresolved fields are always marked as volatile and handled the same way here.
+        field_id = 0u;
+        memory_version = next_memory_version_;
+        ++next_memory_version_;
         if (opcode == Instruction::IGET_WIDE) {
-          uint16_t res = LookupValue(Instruction::IGET_WIDE, base, field_ref, memory_version);
+          res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
           SetOperandValueWide(mir->ssa_rep->defs[0], res);
         } else {
-          uint16_t res = LookupValue(Instruction::IGET, base, field_ref, memory_version);
+          res = LookupValue(Instruction::IGET, base, field_id, memory_version);
           SetOperandValue(mir->ssa_rep->defs[0], res);
         }
       }
       break;
 
-    case Instruction::IPUT_WIDE:
     case Instruction::IPUT_OBJECT:
+      HandlePutObject(mir);
+      // Intentional fall-through.
     case Instruction::IPUT:
+    case Instruction::IPUT_WIDE:
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
     case Instruction::IPUT_CHAR:
     case Instruction::IPUT_SHORT: {
+        uint16_t type = opcode - Instruction::IPUT;
         int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
         uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
-        if (null_checked_.find(base) != null_checked_.end()) {
-          if (cu_->verbose) {
-            LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
-          }
-          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-        } else {
-          null_checked_.insert(base);
-        }
-        uint16_t field_ref = mir->dalvikInsn.vC;
-        AdvanceMemoryVersion(base, field_ref);
+        HandleNullCheck(mir, base);
+        // TODO: all puts treated as unresolved.
+        // Unresolved fields always alias with everything of the same type.
+        unresolved_ifield_version_[type] = next_memory_version_;
+        ++next_memory_version_;
       }
       break;
 
     case Instruction::SGET_OBJECT:
     case Instruction::SGET:
+    case Instruction::SGET_WIDE:
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT:
-    case Instruction::SGET_WIDE: {
-        uint16_t field_ref = mir->dalvikInsn.vB;
-        uint16_t memory_version = GetMemoryVersion(NO_VALUE, field_ref);
+    case Instruction::SGET_SHORT: {
+        uint16_t memory_version;
+        uint16_t field_id;
+        // TODO: all gets treated as volatile.
+        // Volatile fields always get a new memory version; field id is irrelevant.
+        // Unresolved fields are always marked as volatile and handled the same way here.
+        field_id = 0u;
+        memory_version = next_memory_version_;
+        ++next_memory_version_;
         if (opcode == Instruction::SGET_WIDE) {
-          uint16_t res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_ref, memory_version);
+          res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
           SetOperandValueWide(mir->ssa_rep->defs[0], res);
         } else {
-          uint16_t res = LookupValue(Instruction::SGET, NO_VALUE, field_ref, memory_version);
+          res = LookupValue(Instruction::SGET, NO_VALUE, field_id, memory_version);
           SetOperandValue(mir->ssa_rep->defs[0], res);
         }
       }
       break;
 
     case Instruction::SPUT_OBJECT:
+      HandlePutObject(mir);
+      // Intentional fall-through.
     case Instruction::SPUT:
+    case Instruction::SPUT_WIDE:
     case Instruction::SPUT_BOOLEAN:
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT:
-    case Instruction::SPUT_WIDE: {
-        uint16_t field_ref = mir->dalvikInsn.vB;
-        AdvanceMemoryVersion(NO_VALUE, field_ref);
+    case Instruction::SPUT_SHORT: {
+        uint16_t type = opcode - Instruction::SPUT;
+        // TODO: all puts treated as unresolved.
+        // Unresolved fields always alias with everything of the same type.
+        unresolved_sfield_version_[type] = next_memory_version_;
+        ++next_memory_version_;
       }
       break;
   }
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 33ca8f1..348bedc 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -24,16 +24,78 @@
 
 namespace art {
 
-// Key is s_reg, value is value name.
-typedef SafeMap<uint16_t, uint16_t> SregValueMap;
-// Key is concatenation of quad, value is value name.
-typedef SafeMap<uint64_t, uint16_t> ValueMap;
-// Key represents a memory address, value is generation.
-typedef SafeMap<uint32_t, uint16_t> MemoryVersionMap;
+class DexFile;
 
 class LocalValueNumbering {
+ private:
+  // Field types correspond to the ordering of GET/PUT instructions; this order is the same
+  // for IGET, IPUT, SGET, SPUT, AGET and APUT:
+  // op         0
+  // op_WIDE    1
+  // op_OBJECT  2
+  // op_BOOLEAN 3
+  // op_BYTE    4
+  // op_CHAR    5
+  // op_SHORT   6
+  static constexpr size_t kFieldTypeCount = 7;
+
+  // FieldReference represents either a unique resolved field or all unresolved fields together.
+  struct FieldReference {
+    const DexFile* dex_file;
+    uint16_t field_idx;
+  };
+
+  struct FieldReferenceComparator {
+    bool operator()(const FieldReference& lhs, const FieldReference& rhs) const {
+      if (lhs.field_idx != rhs.field_idx) {
+        return lhs.field_idx < rhs.field_idx;
+      }
+      return lhs.dex_file < rhs.dex_file;
+    }
+  };
+
+  struct MemoryVersionKey {
+    uint16_t base;
+    uint16_t field_id;
+    uint16_t type;
+  };
+
+  struct MemoryVersionKeyComparator {
+    bool operator()(const MemoryVersionKey& lhs, const MemoryVersionKey& rhs) const {
+      if (lhs.base != rhs.base) {
+        return lhs.base < rhs.base;
+      }
+      if (lhs.field_id != rhs.field_id) {
+        return lhs.field_id < rhs.field_id;
+      }
+      return lhs.type < rhs.type;
+    }
+  };
+
+  // Key is s_reg, value is value name.
+  typedef SafeMap<uint16_t, uint16_t> SregValueMap;
+  // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
+  typedef SafeMap<uint64_t, uint16_t> ValueMap;
+  // Key represents a memory address, value is generation.
+  typedef SafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator> MemoryVersionMap;
+  // Maps field key to field id for resolved fields.
+  typedef SafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+
  public:
-  explicit LocalValueNumbering(CompilationUnit* cu) : cu_(cu) {}
+  explicit LocalValueNumbering(CompilationUnit* cu)
+      : cu_(cu),
+        sreg_value_map_(),
+        sreg_wide_value_map_(),
+        value_map_(),
+        next_memory_version_(1u),
+        global_memory_version_(0u),
+        memory_version_map_(),
+        field_index_map_(),
+        non_aliasing_refs_(),
+        null_checked_() {
+    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+  }
 
   static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
@@ -59,29 +121,6 @@
     return (it != value_map_.end());
   };
 
-  uint16_t GetMemoryVersion(uint16_t base, uint16_t field) {
-    uint32_t key = (base << 16) | field;
-    uint16_t res;
-    MemoryVersionMap::iterator it = memory_version_map_.find(key);
-    if (it == memory_version_map_.end()) {
-      res = 0;
-      memory_version_map_.Put(key, res);
-    } else {
-      res = it->second;
-    }
-    return res;
-  };
-
-  void AdvanceMemoryVersion(uint16_t base, uint16_t field) {
-    uint32_t key = (base << 16) | field;
-    MemoryVersionMap::iterator it = memory_version_map_.find(key);
-    if (it == memory_version_map_.end()) {
-      memory_version_map_.Put(key, 0);
-    } else {
-      it->second++;
-    }
-  };
-
   void SetOperandValue(uint16_t s_reg, uint16_t value) {
     SregValueMap::iterator it = sreg_value_map_.find(s_reg);
     if (it != sreg_value_map_.end()) {
@@ -129,11 +168,28 @@
   uint16_t GetValueNumber(MIR* mir);
 
  private:
+  uint16_t GetFieldId(const DexFile* dex_file, uint16_t field_idx);
+  void AdvanceGlobalMemory();
+  uint16_t GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
+  uint16_t AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
+  uint16_t MarkNonAliasingNonNull(MIR* mir);
+  void MakeArgsAliasing(MIR* mir);
+  void HandleNullCheck(MIR* mir, uint16_t reg);
+  void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
+  void HandlePutObject(MIR* mir);
+
   CompilationUnit* const cu_;
   SregValueMap sreg_value_map_;
   SregValueMap sreg_wide_value_map_;
   ValueMap value_map_;
+  uint16_t next_memory_version_;
+  uint16_t global_memory_version_;
+  uint16_t unresolved_sfield_version_[kFieldTypeCount];
+  uint16_t unresolved_ifield_version_[kFieldTypeCount];
   MemoryVersionMap memory_version_map_;
+  FieldIndexMap field_index_map_;
+  // Value names of references to objects that cannot be reached through a different value name.
+  std::set<uint16_t> non_aliasing_refs_;
   std::set<uint16_t> null_checked_;
 };
 
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
new file mode 100644
index 0000000..6ab6c51
--- /dev/null
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+
+#include "local_value_numbering.h"
+#include "compiler_internals.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class LocalValueNumberingTest : public testing::Test {
+ protected:
+  struct IFieldDef {
+    uint16_t field_idx;
+    uintptr_t declaring_dex_file;
+    uint16_t declaring_field_idx;
+    bool is_volatile;
+  };
+
+  struct SFieldDef {
+    uint16_t field_idx;
+    uintptr_t declaring_dex_file;
+    uint16_t declaring_field_idx;
+    bool is_volatile;
+  };
+
+  struct MIRDef {
+    static constexpr size_t kMaxSsaDefs = 2;
+    static constexpr size_t kMaxSsaUses = 3;
+
+    Instruction::Code opcode;
+    int64_t value;
+    uint32_t field_annotation;
+    size_t num_uses;
+    int32_t uses[kMaxSsaUses];
+    size_t num_defs;
+    int32_t defs[kMaxSsaDefs];
+  };
+
+#define DEF_CONST(opcode, reg, value) \
+    { opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(opcode, reg, value) \
+    { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_IGET(opcode, reg, obj, field_annotation) \
+    { opcode, 0u, field_annotation, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(opcode, reg, obj, field_annotation) \
+    { opcode, 0u, field_annotation, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(opcode, reg, obj, field_annotation) \
+    { opcode, 0u, field_annotation, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(opcode, reg, obj, field_annotation) \
+    { opcode, 0u, field_annotation, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(opcode, reg, field_annotation) \
+    { opcode, 0u, field_annotation, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(opcode, reg, field_annotation) \
+    { opcode, 0u, field_annotation, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(opcode, reg, field_annotation) \
+    { opcode, 0u, field_annotation, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(opcode, reg, field_annotation) \
+    { opcode, 0u, field_annotation, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_INVOKE1(opcode, reg) \
+    { opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_UNIQUE_REF(opcode, reg) \
+    { opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
+
+  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+  }
+
+  template <size_t count>
+  void PrepareIFields(const IFieldDef (&defs)[count]) {
+    DoPrepareIFields(defs, count);
+  }
+
+  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+  }
+
+  template <size_t count>
+  void PrepareSFields(const SFieldDef (&defs)[count]) {
+    DoPrepareSFields(defs, count);
+  }
+
+  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+    mir_count_ = count;
+    mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, ArenaAllocator::kAllocMIR));
+    ssa_reps_.resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const MIRDef* def = &defs[i];
+      MIR* mir = &mirs_[i];
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+      mir->dalvikInsn.vB_wide = def->value;
+      mir->ssa_rep = &ssa_reps_[i];
+      mir->ssa_rep->num_uses = def->num_uses;
+      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
+      mir->ssa_rep->fp_use = nullptr;  // Not used by LVN.
+      mir->ssa_rep->num_defs = def->num_defs;
+      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
+      mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->offset = i;  // LVN uses offset only for debug output
+      mir->width = 1u;  // Not used by LVN.
+      mir->optimization_flags = 0u;
+
+      if (i != 0u) {
+        mirs_[i - 1u].next = mir;
+      }
+    }
+    mirs_[count - 1u].next = nullptr;
+  }
+
+  template <size_t count>
+  void PrepareMIRs(const MIRDef (&defs)[count]) {
+    DoPrepareMIRs(defs, count);
+  }
+
+  void PerformLVN() {
+    value_names_.resize(mir_count_);
+    for (size_t i = 0; i != mir_count_; ++i) {
+      value_names_[i] =  lvn_.GetValueNumber(&mirs_[i]);
+    }
+  }
+
+  LocalValueNumberingTest() : pool_(), cu_(&pool_), mir_count_(0u), mirs_(nullptr), lvn_(&cu_) {
+    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+  }
+
+  ArenaPool pool_;
+  CompilationUnit cu_;
+  size_t mir_count_;
+  MIR* mirs_;
+  std::vector<SSARepresentation> ssa_reps_;
+  std::vector<uint16_t> value_names_;
+  LocalValueNumbering lvn_;
+};
+
+#if 0  // TODO: re-enable when LVN is handling memory igets.
+TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false }
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
+      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),
+      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+  EXPECT_NE(value_names_[0], value_names_[3]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+}
+#endif
+
+TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+      { 2u, 1u, 2u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // May alias.
+      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 3u,  0u, 1u),
+      DEF_IGET(Instruction::IGET, 4u,  2u, 1u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 5u);
+  EXPECT_NE(value_names_[0], value_names_[2]);
+  EXPECT_NE(value_names_[3], value_names_[4]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[4].optimization_flags, 0u);
+}
+
+#if 0  // TODO: re-enable when LVN is handling memory igets.
+TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 10u is unique.
+      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_EQ(value_names_[1], value_names_[3]);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+}
+#endif
+
+#if 0  // TODO: re-enable when LVN is handling memory igets.
+TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 11u),
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 11u is unique.
+      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_EQ(value_names_[1], value_names_[3]);
+  EXPECT_EQ(mirs_[1].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+}
+#endif
+
+#if 0  // TODO: re-enable when LVN is handling memory igets.
+TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),  // 10u still unique.
+      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 10u),  // 10u not unique anymore.
+      DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 6u);
+  EXPECT_EQ(value_names_[1], value_names_[3]);
+  EXPECT_NE(value_names_[1], value_names_[5]);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
+}
+#endif
+
+TEST_F(LocalValueNumberingTest, TestVolatile) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+      { 2u, 1u, 2u, true },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 1u),  // Volatile.
+      DEF_IGET(Instruction::IGET, 1u,  0u, 0u),  // Non-volatile.
+      DEF_IGET(Instruction::IGET, 2u, 10u, 1u),  // Volatile.
+      DEF_IGET(Instruction::IGET, 3u,  2u, 1u),  // Non-volatile.
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_NE(value_names_[0], value_names_[2]);  // Volatile has always different value name.
+  EXPECT_NE(value_names_[1], value_names_[3]);  // Used different base because of volatile.
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, 0u);
+}
+
+}  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d304db9..d844aac 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -917,6 +917,8 @@
   size_t num_non_special_compiler_temps_;
   size_t max_available_non_special_compiler_temps_;
   size_t max_available_special_compiler_temps_;
+
+  friend class LocalValueNumberingTest;
 };
 
 }  // namespace art
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 4f8739a..b60f296 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -116,7 +116,7 @@
 }
 
 void PassDriver::DispatchPass(CompilationUnit* c_unit, const Pass* curPass) {
-  LOG(DEBUG) << "Dispatching " << curPass->GetName();
+  VLOG(compiler) << "Dispatching " << curPass->GetName();
 
   DataFlowAnalysisMode mode = curPass->GetTraversal();
 
@@ -145,7 +145,7 @@
     case kNoNodes:
       break;
     default:
-      LOG(DEBUG) << "Iterator mode not handled in dispatcher: " << mode;
+      LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
       break;
   }
 }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index ce339bf..fd26cf6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1047,7 +1047,7 @@
     if (kSaveDexInput) {
       for (size_t i = 0; i < dex_files.size(); ++i) {
         const DexFile* dex_file = dex_files[i];
-        std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%d.dex", getpid(), i));
+        std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex", getpid(), i));
         UniquePtr<File> tmp_file(OS::CreateEmptyFile(tmp_file_name.c_str()));
         if (tmp_file.get() == nullptr) {
             PLOG(ERROR) << "Failed to open file " << tmp_file_name << ". Try: adb shell chmod 777 /data/local/tmp";
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b3fce5a..7cbeb29 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -29,8 +29,6 @@
 #include "object_utils.h"
 #include "runtime.h"
 
-
-
 namespace art {
 
 // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
@@ -448,8 +446,11 @@
         }
         ++cur_reg_;
         break;
-      case Primitive::kPrimNot:
-        sf_->SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
+      case Primitive::kPrimNot: {
+          StackReference<mirror::Object>* stack_ref =
+              reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+          sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
+        }
         break;
       case Primitive::kPrimBoolean:  // Fall-through.
       case Primitive::kPrimByte:     // Fall-through.
@@ -515,6 +516,7 @@
     JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
     // Pop transition.
     self->PopManagedStackFragment(fragment);
+    // No need to restore the args since the method has already been run by the interpreter.
     return result.GetJ();
   }
 }
@@ -533,8 +535,10 @@
     Primitive::Type type = GetParamPrimitiveType();
     switch (type) {
       case Primitive::kPrimNot: {
-        mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
-        val.l = soa_->AddLocalReference<jobject>(obj);
+        StackReference<mirror::Object>* stack_ref =
+            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+        val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
+        references_.push_back(std::make_pair(val.l, stack_ref));
         break;
       }
       case Primitive::kPrimLong:  // Fall-through.
@@ -551,7 +555,7 @@
       case Primitive::kPrimShort:    // Fall-through.
       case Primitive::kPrimInt:      // Fall-through.
       case Primitive::kPrimFloat:
-        val.i =  *reinterpret_cast<jint*>(GetParamAddress());
+        val.i = *reinterpret_cast<jint*>(GetParamAddress());
         break;
       case Primitive::kPrimVoid:
         LOG(FATAL) << "UNREACHABLE";
@@ -561,10 +565,18 @@
     args_->push_back(val);
   }
 
+  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    // Fixup any references which may have changed.
+    for (const auto& pair : references_) {
+      pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
+    }
+  }
+
  private:
   ScopedObjectAccessUnchecked* soa_;
   std::vector<jvalue>* args_;
-
+  // References which we must update when exiting in case the GC moved the objects.
+  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
   DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
 };
 
@@ -617,6 +629,8 @@
   self->EndAssertNoThreadSuspension(old_cause);
   JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
                                                rcvr_jobj, interface_method_jobj, args);
+  // Restore references which might have moved.
+  local_ref_visitor.FixupReferences();
   return result.GetJ();
 }
 
@@ -630,23 +644,25 @@
 
   virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (IsParamAReference()) {
-      mirror::Object** param_address = reinterpret_cast<mirror::Object**>(GetParamAddress());
+      StackReference<mirror::Object>* stack_ref =
+          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
       jobject reference =
-          soa_->AddLocalReference<jobject>(*param_address);
-      references_.push_back(std::make_pair(reference, param_address));
+          soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
+      references_.push_back(std::make_pair(reference, stack_ref));
     }
   }
 
   void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Fixup any references which may have changed.
-    for (std::pair<jobject, mirror::Object**>& it : references_) {
-      *it.second = soa_->Decode<mirror::Object*>(it.first);
+    for (const auto& pair : references_) {
+      pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
     }
   }
 
  private:
   ScopedObjectAccessUnchecked* soa_;
-  std::vector<std::pair<jobject, mirror::Object**> > references_;
+  // References which we must update when exiting in case the GC moved the objects.
+  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
   DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
 };
 
diff --git a/runtime/native/dalvik_system_Zygote.cc b/runtime/native/dalvik_system_Zygote.cc
index 29c0bc0..4d009db 100644
--- a/runtime/native/dalvik_system_Zygote.cc
+++ b/runtime/native/dalvik_system_Zygote.cc
@@ -217,10 +217,6 @@
 
 static void DropCapabilitiesBoundingSet() {
   for (int i = 0; prctl(PR_CAPBSET_READ, i, 0, 0, 0) >= 0; i++) {
-    if (i == CAP_NET_RAW) {
-      // Don't break /system/bin/ping
-      continue;
-    }
     int rc = prctl(PR_CAPBSET_DROP, i, 0, 0, 0);
     if (rc == -1) {
       if (errno == EINVAL) {
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index e2846dd..57065ef 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -9,5 +9,7 @@
 wideGetterSetterTest passes
 wideIdentityTest passes
 returnConstantTest passes
+LVNTests.testNPE1 passes
+LVNTests.testNPE2 passes
 longDivTest passes
 longModTest passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 3307e50..6829388 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -40,6 +40,8 @@
         wideGetterSetterTest();
         wideIdentityTest();
         returnConstantTest();
+        LVNTests.testNPE1();
+        LVNTests.testNPE2();
         ZeroTests.longDivTest();
         ZeroTests.longModTest();
     }
@@ -8440,6 +8442,52 @@
 
     public long wideIdent5(int a6, int a5, int a4, int a3, int a2, long a1) {
         return a1;
-  }
+    }
+}
 
+class LVNTests {
+    private LVNTests link = null;
+    private int value = 0;
+
+    private void setLink(LVNTests l) {
+        link = l;
+    }
+
+    private static void causeNPE1(LVNTests lhs, LVNTests rhs) {
+        LVNTests link1 = lhs.link;
+        rhs.link = null;
+        LVNTests link2 = lhs.link;
+        int value1 = link1.value;
+        int value2 = link2.value;
+        System.out.println("LVNTests.testNPE1 fails with " + value1 + " and " + value2);
+    }
+
+    public static void testNPE1() {
+        LVNTests t = new LVNTests();
+        t.link = new LVNTests();
+        try {
+          causeNPE1(t, t);
+        } catch (NullPointerException e) {
+          System.out.println("LVNTests.testNPE1 passes");
+        }
+    }
+
+    private static void causeNPE2(LVNTests lhs, LVNTests rhs) {
+      LVNTests link1 = lhs.link;
+      rhs.setLink(null);
+      LVNTests link2 = lhs.link;
+      int value1 = link1.value;
+      int value2 = link2.value;
+      System.out.println("LVNTests.testNPE2 fails with " + value1 + " and " + value2);
+    }
+
+    public static void testNPE2() {
+        LVNTests t = new LVNTests();
+        t.link = new LVNTests();
+        try {
+          causeNPE2(t, t);
+        } catch (NullPointerException e) {
+          System.out.println("LVNTests.testNPE2 passes");
+        }
+    }
 }