Merge "Revert "Add support for LZ4 compressed image files""
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 7dbfd7c..4c3f66a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1138,8 +1138,8 @@
   void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
     if (!has_deoptimization_on_constant_subscripts_ && array_get->IsInLoop()) {
       HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
-      if (loop->IsLoopInvariant(array_get->InputAt(0), false) &&
-          loop->IsLoopInvariant(array_get->InputAt(1), false)) {
+      if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
+          loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
         SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
         if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
           HoistToPreheaderOrDeoptBlock(loop, array_get);
@@ -1349,7 +1349,7 @@
    * by handling the null check under the hood of the array length operation.
    */
   bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) {
-    if (loop->IsLoopInvariant(length, false)) {
+    if (loop->IsDefinedOutOfTheLoop(length)) {
       return true;
     } else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
       if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
@@ -1365,11 +1365,11 @@
    * by generating a deoptimization test.
    */
   bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) {
-    if (loop->IsLoopInvariant(check, false)) {
+    if (loop->IsDefinedOutOfTheLoop(check)) {
       return true;
     } else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) {
       HInstruction* array = check->InputAt(0);
-      if (loop->IsLoopInvariant(array, false)) {
+      if (loop->IsDefinedOutOfTheLoop(array)) {
         // Generate: if (array == null) deoptimize;
         HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
         HInstruction* cond =
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index fdf8cc9..0b7fdf8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -705,7 +705,8 @@
       return loop_it->second;
     }
   }
-  if (loop->IsLoopInvariant(instruction, true)) {
+  if (loop->IsDefinedOutOfTheLoop(instruction)) {
+    DCHECK(instruction->GetBlock()->Dominates(loop->GetPreHeader()));
     InductionInfo* info = CreateInvariantFetch(instruction);
     AssignInfo(loop, instruction, info);
     return info;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 680f89f..389ada7 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -725,7 +725,7 @@
           // instruction is a store in the loop so the loop must does write.
           DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
 
-          if (loop_info->IsLoopInvariant(original_ref, false)) {
+          if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
             DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
             // Keep the store since its value may be needed at the loop header.
             possibly_redundant = false;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 9b26de4..461be25 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -587,15 +587,8 @@
   return other.blocks_.IsBitSet(header_->GetBlockId());
 }
 
-bool HLoopInformation::IsLoopInvariant(HInstruction* instruction, bool must_dominate) const {
-  HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
-  if (other_loop != this && (other_loop == nullptr || !other_loop->IsIn(*this))) {
-    if (must_dominate) {
-      return instruction->GetBlock()->Dominates(GetHeader());
-    }
-    return true;
-  }
-  return false;
+bool HLoopInformation::IsDefinedOutOfTheLoop(HInstruction* instruction) const {
+  return !blocks_.IsBitSet(instruction->GetBlock()->GetBlockId());
 }
 
 size_t HLoopInformation::GetLifetimeEnd() const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9d3c88c..3e38e9f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -564,11 +564,8 @@
   // Note that `other` *must* be populated before entering this function.
   bool IsIn(const HLoopInformation& other) const;
 
-  // Returns true if instruction is not defined within this loop or any loop nested inside
-  // this loop. If must_dominate is set, only definitions that actually dominate the loop
-  // header can be invariant. Otherwise, any definition outside the loop, including
-  // definitions that appear after the loop, is invariant.
-  bool IsLoopInvariant(HInstruction* instruction, bool must_dominate) const;
+  // Returns true if instruction is not defined within this loop.
+  bool IsDefinedOutOfTheLoop(HInstruction* instruction) const;
 
   const ArenaBitVector& GetBlocks() const { return blocks_; }
 
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 39e5259..48465e6 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -57,7 +57,7 @@
       __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
-      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset(4).Int32Value());
       __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
       break;
     case kQuickAbi:  // R9 holds Thread*.
@@ -91,7 +91,7 @@
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
                       Arm64ManagedRegister::FromXRegister(X0),
-                      Offset(JNIEnvExt::SelfOffset().Int32Value()));
+                      Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
 
       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
                 Arm64ManagedRegister::FromXRegister(IP0));
@@ -126,7 +126,7 @@
       __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
       __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
       break;
     case kQuickAbi:  // S1 holds Thread*.
@@ -158,7 +158,7 @@
       __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
       __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
       break;
     case kQuickAbi:  // Fall-through.
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d13526b..2d0ae63 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -344,8 +344,11 @@
     segment_state_.all = new_state;
   }
 
-  static Offset SegmentStateOffset() {
-    return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
+  static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+    // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
+    //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
+    //       is not pointer-size-safe.
+    return Offset(0);
   }
 
   // Release pages past the end of the table that may have previously held references.
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index dab1040..aa25f67 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -105,9 +105,32 @@
   stacked_local_ref_cookies.pop_back();
 }
 
-Offset JNIEnvExt::SegmentStateOffset() {
-  return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
-                IndirectReferenceTable::SegmentStateOffset().Int32Value());
+// Note: the offset code is brittle, as we can't use OFFSETOF_MEMBER or offsetof easily. Thus, there
+//       are tests in jni_internal_test to match the results against the actual values.
+
+// This is encoding the knowledge of the structure and layout of JNIEnv fields.
+static size_t JNIEnvSize(size_t pointer_size) {
+  // A single pointer.
+  return pointer_size;
+}
+
+Offset JNIEnvExt::SegmentStateOffset(size_t pointer_size) {
+  size_t locals_offset = JNIEnvSize(pointer_size) +
+                         2 * pointer_size +          // Thread* self + JavaVMExt* vm.
+                         4 +                         // local_ref_cookie.
+                         (pointer_size - 4);         // Padding.
+  size_t irt_segment_state_offset =
+      IndirectReferenceTable::SegmentStateOffset(pointer_size).Int32Value();
+  return Offset(locals_offset + irt_segment_state_offset);
+}
+
+Offset JNIEnvExt::LocalRefCookieOffset(size_t pointer_size) {
+  return Offset(JNIEnvSize(pointer_size) +
+                2 * pointer_size);          // Thread* self + JavaVMExt* vm
+}
+
+Offset JNIEnvExt::SelfOffset(size_t pointer_size) {
+  return Offset(JNIEnvSize(pointer_size));
 }
 
 // Use some defining part of the caller's frame as the identifying mark for the JNI segment.
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 3828ff0..2f8decf 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -50,15 +50,9 @@
   T AddLocalReference(mirror::Object* obj)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static Offset SegmentStateOffset();
-
-  static Offset LocalRefCookieOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
-  }
-
-  static Offset SelfOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
-  }
+  static Offset SegmentStateOffset(size_t pointer_size);
+  static Offset LocalRefCookieOffset(size_t pointer_size);
+  static Offset SelfOffset(size_t pointer_size);
 
   jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
   void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index d1687d7..b41d16b 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -18,7 +18,9 @@
 
 #include "art_method-inl.h"
 #include "common_compiler_test.h"
+#include "indirect_reference_table.h"
 #include "java_vm_ext.h"
+#include "jni_env_ext.h"
 #include "mirror/string-inl.h"
 #include "scoped_thread_state_change.h"
 #include "ScopedLocalRef.h"
@@ -2261,4 +2263,41 @@
   env_->DeleteGlobalRef(global_ref);
 }
 
+// Test the offset computation of IndirectReferenceTable offsets. b/26071368.
+TEST_F(JniInternalTest, IndirectReferenceTableOffsets) {
+  // The segment_state_ field is private, and we want to avoid friend declaration. So we'll check
+  // by modifying memory.
+  // The parameters don't really matter here.
+  IndirectReferenceTable irt(5, 5, IndirectRefKind::kGlobal, true);
+  uint32_t old_state = irt.GetSegmentState();
+
+  // Write some new state directly. We invert parts of old_state to ensure a new value.
+  uint32_t new_state = old_state ^ 0x07705005;
+  ASSERT_NE(old_state, new_state);
+
+  uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
+  int32_t segment_state_offset =
+      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
+  *reinterpret_cast<uint32_t*>(base + segment_state_offset) = new_state;
+
+  // Read and compare.
+  EXPECT_EQ(new_state, irt.GetSegmentState());
+}
+
+// Test the offset computation of JNIEnvExt offsets. b/26071368.
+TEST_F(JniInternalTest, JNIEnvExtOffsets) {
+  EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie),
+            JNIEnvExt::LocalRefCookieOffset(sizeof(void*)).Int32Value());
+
+  EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, self), JNIEnvExt::SelfOffset(sizeof(void*)).Int32Value());
+
+  // segment_state_ is private in the IndirectReferenceTable. So this test isn't as good as we'd
+  // hope it to be.
+  int32_t segment_state_now =
+      OFFSETOF_MEMBER(JNIEnvExt, locals) +
+      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
+  int32_t segment_state_computed = JNIEnvExt::SegmentStateOffset(sizeof(void*)).Int32Value();
+  EXPECT_EQ(segment_state_now, segment_state_computed);
+}
+
 }  // namespace art