Merge "ART: Refactor some TI test code for reuse"
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index b617387..555baf6 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -108,8 +108,10 @@
   V(StringCompareTo, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \
   V(StringEquals, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z") \
   V(StringGetCharsNoCheck, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "getCharsNoCheck", "(II[CI)V") \
-  V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(I)I") \
-  V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(II)I") \
+  V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(I)I") \
+  V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(II)I") \
+  V(StringStringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;)I") \
+  V(StringStringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;I)I") \
   V(StringIsEmpty, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "isEmpty", "()Z") \
   V(StringLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "length", "()I") \
   V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a5f248d..8b450e1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1375,30 +1375,4 @@
   return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
 }
 
-void CodeGenerator::EmitJitRoots(uint8_t* code,
-                                 Handle<mirror::ObjectArray<mirror::Object>> roots,
-                                 const uint8_t* roots_data,
-                                 Handle<mirror::DexCache> outer_dex_cache) {
-  DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
-  StackHandleScope<1> hs(Thread::Current());
-  MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  size_t index = 0;
-  for (auto& entry : jit_string_roots_) {
-    const DexFile& entry_dex_file = *entry.first.dex_file;
-    // Avoid the expensive FindDexCache call by checking if the string is
-    // in the compiled method's dex file.
-    h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file)
-        ? outer_dex_cache.Get()
-        : class_linker->FindDexCache(hs.Self(), entry_dex_file));
-    mirror::String* string = class_linker->LookupString(
-        entry_dex_file, entry.first.string_index, h_dex_cache);
-    DCHECK(string != nullptr) << "JIT roots require strings to have been loaded";
-    roots->Set(index, string);
-    entry.second = index;
-    ++index;
-  }
-  EmitJitRootPatches(code, roots_data);
-}
-
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 212d571..a81f24e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -31,7 +31,6 @@
 #include "nodes.h"
 #include "optimizing_compiler_stats.h"
 #include "stack_map_stream.h"
-#include "string_reference.h"
 #include "utils/label.h"
 
 namespace art {
@@ -332,17 +331,6 @@
 
   void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
   size_t ComputeStackMapsSize();
-  size_t GetNumberOfJitRoots() const {
-    return jit_string_roots_.size();
-  }
-
-  // Fills the `literals` array with literals collected during code generation.
-  // Also emits literal patches.
-  void EmitJitRoots(uint8_t* code,
-                    Handle<mirror::ObjectArray<mirror::Object>> roots,
-                    const uint8_t* roots_data,
-                    Handle<mirror::DexCache> outer_dex_cache)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsLeafMethod() const {
     return is_leaf_;
@@ -579,8 +567,6 @@
         fpu_callee_save_mask_(fpu_callee_save_mask),
         stack_map_stream_(graph->GetArena()),
         block_order_(nullptr),
-        jit_string_roots_(StringReferenceValueComparator(),
-                          graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         disasm_info_(nullptr),
         stats_(stats),
         graph_(graph),
@@ -647,12 +633,6 @@
     return current_slow_path_;
   }
 
-  // Emit the patches assocatied with JIT roots. Only applies to JIT compiled code.
-  virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
-                                  const uint8_t* roots_data ATTRIBUTE_UNUSED) {
-    DCHECK_EQ(jit_string_roots_.size(), 0u);
-  }
-
   // Frame size required for this method.
   uint32_t frame_size_;
   uint32_t core_spill_mask_;
@@ -678,11 +658,6 @@
   // The order to use for code generation.
   const ArenaVector<HBasicBlock*>* block_order_;
 
-  // Maps a StringReference (dex_file, string_index) to the index in the literal table.
-  // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
-  // indices.
-  ArenaSafeMap<StringReference, size_t, StringReferenceValueComparator> jit_string_roots_;
-
   DisassemblyInformation* disasm_info_;
 
  private:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index aedba2c..b6d7b80 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5886,9 +5886,6 @@
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      return HLoadString::LoadKind::kDexCacheViaMethod;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       break;
   }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 267fdd0..97224e0 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4383,9 +4383,6 @@
       break;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       break;
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      return HLoadString::LoadKind::kDexCacheViaMethod;
   }
   return desired_string_load_kind;
 }
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 56def31..f19e2fe 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5205,11 +5205,6 @@
     case HLoadString::LoadKind::kDexCacheViaMethod:
       fallback_load = false;
       break;
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      // TODO: implement.
-      fallback_load = true;
-      break;
   }
   if (fallback_load) {
     desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6fde7c6..4e343f1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6220,9 +6220,6 @@
       break;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       break;
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      return HLoadString::LoadKind::kDexCacheViaMethod;
   }
   return desired_string_load_kind;
 }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index df77362..f3ee7cf 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1266,8 +1266,7 @@
         simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-        fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-        jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+        fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
 }
 
@@ -5633,9 +5632,6 @@
       break;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       break;
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
   }
   return desired_string_load_kind;
 }
@@ -5665,14 +5661,6 @@
   }
 }
 
-Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
-  jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
-  // Add a patch entry and return the label.
-  jit_string_patches_.emplace_back(dex_file, dex_index);
-  PatchInfo<Label>* info = &jit_string_patches_.back();
-  return &info->label;
-}
-
 void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
   LocationSummary* locations = load->GetLocations();
   Location out_loc = locations->Out();
@@ -5704,15 +5692,6 @@
       __ Bind(slow_path->GetExitLabel());
       return;
     }
-    case HLoadString::LoadKind::kJitTableAddress: {
-      Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ true);
-      Label* fixup_label =
-          codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex());
-      // /* GcRoot<mirror::String> */ out = *address
-      GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
-      return;
-    }
     default:
       break;
   }
@@ -7098,20 +7077,6 @@
   }
 }
 
-void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
-  for (const PatchInfo<Label>& info : jit_string_patches_) {
-    const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
-    DCHECK(it != jit_string_roots_.end());
-    size_t index_in_table = it->second;
-    uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
-    uintptr_t address =
-        reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
-    typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
-    reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
-       dchecked_integral_cast<uint32_t>(address);
-  }
-}
-
 #undef __
 
 }  // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index bc78b8c..5a6dc54 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -411,14 +411,11 @@
   void RecordTypePatch(HLoadClass* load_class);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
   Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
-  Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
 
   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
 
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
-
   const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
     return isa_features_;
   }
@@ -604,9 +601,6 @@
   // Fixups for jump tables need to be handled specially.
   ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
 
-  // Patches for string literals in JIT compiled code.
-  ArenaDeque<PatchInfo<Label>> jit_string_patches_;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
 };
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index e06fdee..85b461d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -106,6 +106,7 @@
   void SimplifyFP2Int(HInvoke* invoke);
   void SimplifyStringCharAt(HInvoke* invoke);
   void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
+  void SimplifyNPEOnArgN(HInvoke* invoke, size_t);
   void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
 
   OptimizingCompilerStats* stats_;
@@ -1858,6 +1859,16 @@
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement);
 }
 
+// This method should only be used on intrinsics whose sole way of throwing an
+// exception is raising a NPE when the nth argument is null. If that argument
+// is provably non-null, we can clear the flag.
+void InstructionSimplifierVisitor::SimplifyNPEOnArgN(HInvoke* invoke, size_t n) {
+  HInstruction* arg = invoke->InputAt(n);
+  if (!arg->CanBeNull()) {
+    invoke->SetCanThrow(false);
+  }
+}
+
 void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
   uint32_t dex_pc = invoke->GetDexPc();
   HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
@@ -1911,6 +1922,10 @@
     case Intrinsics::kStringLength:
       SimplifyStringIsEmptyOrLength(instruction);
       break;
+    case Intrinsics::kStringStringIndexOf:
+    case Intrinsics::kStringStringIndexOfAfter:
+      SimplifyNPEOnArgN(instruction, 1);  // 0th has own NullCheck
+      break;
     case Intrinsics::kUnsafeLoadFence:
       SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
       break;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 93a2340..0c39223 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2600,6 +2600,9 @@
 UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 47e6d96..b9424a3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2788,6 +2788,9 @@
 UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 6ff0ca4..e5240a2 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2679,6 +2679,9 @@
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5239f8f..7c81588 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2495,6 +2495,9 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, MathTan)
 UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh)
 
+UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1d153e2..2d4f417 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1947,6 +1947,9 @@
 UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan)
 UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh)
 
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 43682c5..bac98d5 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3323,6 +3323,9 @@
 UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index de2606c..01577f7 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2992,6 +2992,9 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
 
+UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 4f2e257..9155322 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1432,10 +1432,10 @@
   AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
 
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
   AddSuccessor(new_block);
 
   GetGraph()->AddBlock(new_block);
@@ -1449,10 +1449,10 @@
   HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
 
   for (HBasicBlock* predecessor : GetPredecessors()) {
-    new_block->predecessors_.push_back(predecessor);
     predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block;
   }
-  predecessors_.clear();
+  new_block->predecessors_.swap(predecessors_);
+  DCHECK(predecessors_.empty());
   AddPredecessor(new_block);
 
   GetGraph()->AddBlock(new_block);
@@ -1477,16 +1477,16 @@
   new_block->instructions_.SetBlockOfInstructions(new_block);
 
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
 
   for (HBasicBlock* dominated : GetDominatedBlocks()) {
     dominated->dominator_ = new_block;
-    new_block->dominated_blocks_.push_back(dominated);
   }
-  dominated_blocks_.clear();
+  new_block->dominated_blocks_.swap(dominated_blocks_);
+  DCHECK(dominated_blocks_.empty());
   return new_block;
 }
 
@@ -1504,16 +1504,16 @@
 
   new_block->instructions_.SetBlockOfInstructions(new_block);
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
 
   for (HBasicBlock* dominated : GetDominatedBlocks()) {
     dominated->dominator_ = new_block;
-    new_block->dominated_blocks_.push_back(dominated);
   }
-  dominated_blocks_.clear();
+  new_block->dominated_blocks_.swap(dominated_blocks_);
+  DCHECK(dominated_blocks_.empty());
   return new_block;
 }
 
@@ -1852,17 +1852,19 @@
 
   // Update links to the successors of `other`.
   successors_.clear();
-  while (!other->successors_.empty()) {
-    HBasicBlock* successor = other->GetSuccessors()[0];
-    successor->ReplacePredecessor(other, this);
+  for (HBasicBlock* successor : other->GetSuccessors()) {
+    successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this;
   }
+  successors_.swap(other->successors_);
+  DCHECK(other->successors_.empty());
 
   // Update the dominator tree.
   RemoveDominatedBlock(other);
   for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
-    dominated_blocks_.push_back(dominated);
     dominated->SetDominator(this);
   }
+  dominated_blocks_.insert(
+      dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end());
   other->dominated_blocks_.clear();
   other->dominator_ = nullptr;
 
@@ -1889,16 +1891,18 @@
 
   // Update links to the successors of `other`.
   successors_.clear();
-  while (!other->successors_.empty()) {
-    HBasicBlock* successor = other->GetSuccessors()[0];
-    successor->ReplacePredecessor(other, this);
+  for (HBasicBlock* successor : other->GetSuccessors()) {
+    successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this;
   }
+  successors_.swap(other->successors_);
+  DCHECK(other->successors_.empty());
 
   // Update the dominator tree.
   for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
-    dominated_blocks_.push_back(dominated);
     dominated->SetDominator(this);
   }
+  dominated_blocks_.insert(
+      dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end());
   other->dominated_blocks_.clear();
   other->dominator_ = nullptr;
   other->graph_ = nullptr;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 0e449e3..883ac65 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1956,7 +1956,7 @@
 
   bool IsRemovable() const {
     return
-        !HasSideEffects() &&
+        !DoesAnyWrite() &&
         !CanThrow() &&
         !IsSuspendCheck() &&
         !IsControlFlow() &&
@@ -3782,6 +3782,8 @@
     return GetEnvironment()->IsFromInlinedInvoke();
   }
 
+  void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
+
   bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
 
   bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); }
@@ -3840,8 +3842,6 @@
     SetPackedFlag<kFlagCanThrow>(true);
   }
 
-  void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
-
   uint32_t number_of_arguments_;
   ArtMethod* const resolved_method_;
   ArenaVector<HUserRecord<HInstruction*>> inputs_;
@@ -5688,10 +5688,7 @@
     // all other types are unavailable.
     kDexCacheViaMethod,
 
-    // Load from the root table associated with the JIT compiled method.
-    kJitTableAddress,
-
-    kLast = kJitTableAddress,
+    kLast = kDexCacheViaMethod
   };
 
   HLoadString(HCurrentMethod* current_method,
@@ -5749,8 +5746,7 @@
     LoadKind load_kind = GetLoadKind();
     if (load_kind == LoadKind::kBootImageLinkTimeAddress ||
         load_kind == LoadKind::kBootImageLinkTimePcRelative ||
-        load_kind == LoadKind::kBootImageAddress ||
-        load_kind == LoadKind::kJitTableAddress) {
+        load_kind == LoadKind::kBootImageAddress) {
       return false;
     }
     return !IsInDexCache();
@@ -5803,8 +5799,7 @@
     return load_kind == LoadKind::kBootImageLinkTimeAddress ||
         load_kind == LoadKind::kBootImageLinkTimePcRelative ||
         load_kind == LoadKind::kBssEntry ||
-        load_kind == LoadKind::kDexCacheViaMethod ||
-        load_kind == LoadKind::kJitTableAddress;
+        load_kind == LoadKind::kDexCacheViaMethod;
   }
 
   static bool HasAddress(LoadKind load_kind) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1add660..6f84cdc 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -117,7 +117,6 @@
 
   size_t GetSize() const { return size_; }
   const ArenaVector<uint8_t>& GetMemory() const { return memory_; }
-  uint8_t* GetData() { return memory_.data(); }
 
  private:
   ArenaVector<uint8_t> memory_;
@@ -1127,7 +1126,7 @@
                                     jit::JitCodeCache* code_cache,
                                     ArtMethod* method,
                                     bool osr) {
-  StackHandleScope<3> hs(self);
+  StackHandleScope<2> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       method->GetDeclaringClass()->GetClassLoader()));
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
@@ -1173,43 +1172,22 @@
   }
 
   size_t stack_map_size = codegen->ComputeStackMapsSize();
-  size_t number_of_roots = codegen->GetNumberOfJitRoots();
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
-  // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
-  // executed, this array is not needed.
-  Handle<mirror::ObjectArray<mirror::Object>> roots(
-      hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
-          self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots)));
-  if (roots.Get() == nullptr) {
-    // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
-    return false;
-  }
-  uint8_t* stack_map_data = nullptr;
-  uint8_t* roots_data = nullptr;
-  code_cache->ReserveData(
-      self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data);
-  if (stack_map_data == nullptr || roots_data == nullptr) {
+  uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size, method);
+  if (stack_map_data == nullptr) {
     return false;
   }
   MaybeRecordStat(MethodCompilationStat::kCompiled);
   codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
-  codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache);
-
   const void* code = code_cache->CommitCode(
       self,
       method,
       stack_map_data,
-      roots_data,
       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
       codegen->GetCoreSpillMask(),
       codegen->GetFpuSpillMask(),
       code_allocator.GetMemory().data(),
       code_allocator.GetSize(),
-      osr,
-      roots);
+      osr);
 
   if (code == nullptr) {
     code_cache->ClearData(self, stack_map_data);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 15254ed..63e4ca6 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -281,8 +281,7 @@
         : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
 
     if (codegen_->GetCompilerOptions().IsBootImage()) {
-      // Compiling boot image. Resolve the string and allocate it if needed, to ensure
-      // the string will be added to the boot image.
+      // Compiling boot image. Resolve the string and allocate it if needed.
       DCHECK(!runtime->UseJitCompilation());
       mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
       CHECK(string != nullptr);
@@ -298,14 +297,10 @@
     } else if (runtime->UseJitCompilation()) {
       // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
       // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
-      mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
-      if (string != nullptr) {
-        if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
-          desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
-          address = reinterpret_cast64<uint64_t>(string);
-        } else {
-          desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
-        }
+      mirror::String* string = dex_cache->GetResolvedString(string_index);
+      if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
+        desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
+        address = reinterpret_cast64<uint64_t>(string);
       }
     } else {
       // AOT app compilation. Try to lookup the string without allocating if not found.
@@ -327,7 +322,6 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadString::LoadKind::kBssEntry:
     case HLoadString::LoadKind::kDexCacheViaMethod:
-    case HLoadString::LoadKind::kJitTableAddress:
       load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
       break;
     case HLoadString::LoadKind::kBootImageAddress:
diff --git a/runtime/image.cc b/runtime/image.cc
index 299d5fd..bd5ba93 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '2', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2fbf5ef..a26d850 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -80,18 +80,8 @@
 
   std::string error_str;
   // Map name specific for android_os_Debug.cpp accounting.
-  // Map in low 4gb to simplify accessing root tables for x86_64.
-  // We could do PC-relative addressing to avoid this problem, but that
-  // would require reserving code and data area before submitting, which
-  // means more windows for the code memory to be RWX.
   MemMap* data_map = MemMap::MapAnonymous(
-      "data-code-cache", nullptr,
-      max_capacity,
-      kProtAll,
-      /* low_4gb */ true,
-      /* reuse */ false,
-      &error_str,
-      use_ashmem);
+      "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str, use_ashmem);
   if (data_map == nullptr) {
     std::ostringstream oss;
     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -207,40 +197,34 @@
 
 uint8_t* JitCodeCache::CommitCode(Thread* self,
                                   ArtMethod* method,
-                                  uint8_t* stack_map,
-                                  uint8_t* roots_data,
+                                  const uint8_t* vmap_table,
                                   size_t frame_size_in_bytes,
                                   size_t core_spill_mask,
                                   size_t fp_spill_mask,
                                   const uint8_t* code,
                                   size_t code_size,
-                                  bool osr,
-                                  Handle<mirror::ObjectArray<mirror::Object>> roots) {
+                                  bool osr) {
   uint8_t* result = CommitCodeInternal(self,
                                        method,
-                                       stack_map,
-                                       roots_data,
+                                       vmap_table,
                                        frame_size_in_bytes,
                                        core_spill_mask,
                                        fp_spill_mask,
                                        code,
                                        code_size,
-                                       osr,
-                                       roots);
+                                       osr);
   if (result == nullptr) {
     // Retry.
     GarbageCollectCache(self);
     result = CommitCodeInternal(self,
                                 method,
-                                stack_map,
-                                roots_data,
+                                vmap_table,
                                 frame_size_in_bytes,
                                 core_spill_mask,
                                 fp_spill_mask,
                                 code,
                                 code_size,
-                                osr,
-                                roots);
+                                osr);
   }
   return result;
 }
@@ -259,66 +243,20 @@
   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
 }
 
-static uint32_t ComputeRootTableSize(uint32_t number_of_roots) {
-  return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
-}
-
-static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
-  // The length of the table is stored just before the stack map (and therefore at the end of
-  // the table itself), in order to be able to fetch it from a `stack_map` pointer.
-  return reinterpret_cast<const uint32_t*>(stack_map)[-1];
-}
-
-static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
-  uint32_t length = roots->GetLength();
-  // Put all roots in `roots_data`.
-  for (uint32_t i = 0; i < length; ++i) {
-    gc_roots[i] = GcRoot<mirror::Object>(roots->Get(i));
-  }
-  // Store the length of the table at the end. This will allow fetching it from a `stack_map`
-  // pointer.
-  reinterpret_cast<uint32_t*>(gc_roots + length)[0] = length;
-}
-
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
-  OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-  uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
-  uint32_t roots = GetNumberOfRoots(data);
-  if (number_of_roots != nullptr) {
-    *number_of_roots = roots;
-  }
-  return data - ComputeRootTableSize(roots);
-}
-
-void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
-  MutexLock mu(Thread::Current(), lock_);
-  for (const auto& entry : method_code_map_) {
-    uint32_t number_of_roots = 0;
-    uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
-    GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
-    for (uint32_t i = 0; i < number_of_roots; ++i) {
-      // This does not need a read barrier because this is called by GC.
-      mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
-      DCHECK(object->IsString());
-      mirror::Object* new_string = visitor->IsMarked(object);
-      // We know the string is marked because it's a strongly-interned string that
-      // is always alive.
-      // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
-      // out of the weak access/creation pause. b/32167580
-      DCHECK(new_string != nullptr);
-      roots[i] = GcRoot<mirror::Object>(new_string);
-    }
-  }
-}
-
 void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
+  const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
   DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
-  FreeData(GetRootTable(code_ptr));
+
+  // Use the offset directly to prevent sanity check that the method is
+  // compiled with optimizing.
+  // TODO(ngeoffray): Clean up.
+  if (method_header->vmap_table_offset_ != 0) {
+    const uint8_t* data = method_header->code_ - method_header->vmap_table_offset_;
+    FreeData(const_cast<uint8_t*>(data));
+  }
   FreeCode(reinterpret_cast<uint8_t*>(allocation));
 }
 
@@ -370,16 +308,13 @@
 
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
-                                          uint8_t* stack_map,
-                                          uint8_t* roots_data,
+                                          const uint8_t* vmap_table,
                                           size_t frame_size_in_bytes,
                                           size_t core_spill_mask,
                                           size_t fp_spill_mask,
                                           const uint8_t* code,
                                           size_t code_size,
-                                          bool osr,
-                                          Handle<mirror::ObjectArray<mirror::Object>> roots) {
-  DCHECK(stack_map != nullptr);
+                                          bool osr) {
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   // Ensure the header ends up at expected instruction alignment.
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -403,7 +338,7 @@
       std::copy(code, code + code_size, code_ptr);
       method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       new (method_header) OatQuickMethodHeader(
-          code_ptr - stack_map,
+          (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
           frame_size_in_bytes,
           core_spill_mask,
           fp_spill_mask,
@@ -418,8 +353,6 @@
   {
     MutexLock mu(self, lock_);
     method_code_map_.Put(code_ptr, method);
-    // Fill the root table before updating the entry point.
-    FillRootTable(roots_data, roots);
     if (osr) {
       number_of_osr_compilations_++;
       osr_code_map_.Put(method, code_ptr);
@@ -475,14 +408,8 @@
   FreeData(reinterpret_cast<uint8_t*>(data));
 }
 
-void JitCodeCache::ReserveData(Thread* self,
-                               size_t stack_map_size,
-                               size_t number_of_roots,
-                               ArtMethod* method,
-                               uint8_t** stack_map_data,
-                               uint8_t** roots_data) {
-  size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
+uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size, ArtMethod* method) {
+  size = RoundUp(size, sizeof(void*));
   uint8_t* result = nullptr;
 
   {
@@ -509,8 +436,7 @@
               << " for stack maps of "
               << ArtMethod::PrettyMethod(method);
   }
-  *roots_data = result;
-  *stack_map_data = result + table_size;
+  return result;
 }
 
 class MarkCodeVisitor FINAL : public StackVisitor {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a97ef68..e15c93a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -92,15 +92,13 @@
   // Allocate and write code and its metadata to the code cache.
   uint8_t* CommitCode(Thread* self,
                       ArtMethod* method,
-                      uint8_t* stack_map,
-                      uint8_t* roots_data,
+                      const uint8_t* vmap_table,
                       size_t frame_size_in_bytes,
                       size_t core_spill_mask,
                       size_t fp_spill_mask,
                       const uint8_t* code,
                       size_t code_size,
-                      bool osr,
-                      Handle<mirror::ObjectArray<mirror::Object>> roots)
+                      bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
@@ -110,14 +108,8 @@
   // Return true if the code cache contains this method.
   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
 
-  // Allocate a region of data that contain `size` bytes, and potentially space
-  // for storing `number_of_roots` roots. Returns null if there is no more room.
-  void ReserveData(Thread* self,
-                   size_t size,
-                   size_t number_of_roots,
-                   ArtMethod* method,
-                   uint8_t** stack_map_data,
-                   uint8_t** roots_data)
+  // Reserve a region of data of size at least "size". Returns null if there is no more room.
+  uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
@@ -196,10 +188,6 @@
 
   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
 
-  void SweepRootTables(IsMarkedVisitor* visitor)
-      REQUIRES(!lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
  private:
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
@@ -213,15 +201,13 @@
   // allocation fails. Return null if the allocation fails.
   uint8_t* CommitCodeInternal(Thread* self,
                               ArtMethod* method,
-                              uint8_t* stack_map,
-                              uint8_t* roots_data,
+                              const uint8_t* vmap_table,
                               size_t frame_size_in_bytes,
                               size_t core_spill_mask,
                               size_t fp_spill_mask,
                               const uint8_t* code,
                               size_t code_size,
-                              bool osr,
-                              Handle<mirror::ObjectArray<mirror::Object>> roots)
+                              bool osr)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 4afca7d..ee5002f 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -67,11 +67,6 @@
     return data;
   }
 
-  uint8_t* GetOptimizedCodeInfoPtr() {
-    DCHECK(IsOptimized());
-    return code_ - vmap_table_offset_;
-  }
-
   CodeInfo GetOptimizedCodeInfo() const {
     return CodeInfo(GetOptimizedCodeInfoPtr());
   }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d90e896..ca65c2b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -81,7 +81,6 @@
 #include "intern_table.h"
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
-#include "jit/jit_code_cache.h"
 #include "jni_internal.h"
 #include "linear_alloc.h"
 #include "mirror/array.h"
@@ -492,14 +491,6 @@
   GetMonitorList()->SweepMonitorList(visitor);
   GetJavaVM()->SweepJniWeakGlobals(visitor);
   GetHeap()->SweepAllocationRecords(visitor);
-  if (GetJit() != nullptr) {
-    // Visit JIT literal tables. Objects in these tables are classes and strings
-    // and only classes can be affected by class unloading. The strings always
-    // stay alive as they are strongly interned.
-    // TODO: Move this closer to CleanupClassLoaders, to avoid blocking weak accesses
-    // from mutators. See b/32167580.
-    GetJit()->GetCodeCache()->SweepRootTables(visitor);
-  }
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index 5d4aa56..af43973 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -196,7 +196,7 @@
   const-class v0, LMain;
   if-ne v0, v2, :exit
   :other_loop_entry
-  const-class v1, LIrreducibleLoop;
+  const-class v1, Ljava/lang/Class;  # LoadClass that can throw
   goto :loop_entry
   :exit
   return-object v0
@@ -250,7 +250,7 @@
   const/4 v0, 0
   if-ne p0, v0, :other_loop_entry
   :loop_entry
-  const-class v1, LIrreducibleLoop;
+  const-class v1, Ljava/lang/Class;  # LoadClass that can throw
   if-ne v0, p0, :exit
   :other_loop_entry
   sub-int v1, p0, p0
@@ -286,7 +286,7 @@
 .method public static licm3(III)I
   .registers 4
   :loop_entry
-  const-class v0, LIrreducibleLoop;
+  const-class v0, Ljava/lang/Class;  # LoadClass that can throw
   if-ne p1, p2, :exit
   goto :loop_body
 
diff --git a/test/624-checker-stringops/expected.txt b/test/624-checker-stringops/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/624-checker-stringops/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/624-checker-stringops/info.txt b/test/624-checker-stringops/info.txt
new file mode 100644
index 0000000..64344ac
--- /dev/null
+++ b/test/624-checker-stringops/info.txt
@@ -0,0 +1 @@
+Verify some properties of string operations represented by intrinsics.
diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java
new file mode 100644
index 0000000..34e8283
--- /dev/null
+++ b/test/624-checker-stringops/src/Main.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests properties of some string operations represented by intrinsics.
+ */
+public class Main {
+
+  static final String ABC = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  static final String XYZ = "XYZ";
+
+  //
+  // Variant intrinsics remain in the loop, but invariant references are hoisted out of the loop.
+  //
+  /// CHECK-START: int Main.liveIndexOf() licm (before)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none
+  //
+  /// CHECK-START: int Main.liveIndexOf() licm (after)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:none
+  static int liveIndexOf() {
+    int k = ABC.length() + XYZ.length();  // does LoadString before loops
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(c);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(c, 4);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(XYZ);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(XYZ, 2);
+    }
+    return k;
+  }
+
+  //
+  // All dead intrinsics can be removed completely.
+  //
+  /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (before)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none
+  //
+  /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (after)
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOf
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOfAfter
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOf
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOfAfter
+  static int deadIndexOf() {
+    int k = ABC.length() + XYZ.length();  // does LoadString before loops
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(c);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(c, 4);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(XYZ);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(XYZ, 2);
+    }
+    return k;
+  }
+
+  //
+  // Explicit null check on receiver, implicit null check on argument prevents hoisting.
+  //
+  /// CHECK-START: int Main.indexOfExceptions(java.lang.String, java.lang.String) licm (after)
+  /// CHECK-DAG: <<String:l\d+>> NullCheck                                                         loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:                 InvokeVirtual [<<String>>,{{l\d+}}] intrinsic:StringStringIndexOf loop:<<Loop>>      outer_loop:none
+  static int indexOfExceptions(String s, String t) {
+    int k = 0;
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += s.indexOf(t);
+    }
+    return k;
+  }
+
+  public static void main(String[] args) {
+    expectEquals(1865, liveIndexOf());
+    expectEquals(29, deadIndexOf());
+    try {
+      indexOfExceptions(null, XYZ);
+      throw new Error("Expected: NPE");
+    } catch (NullPointerException e) {
+    }
+    try {
+      indexOfExceptions(ABC, null);
+      throw new Error("Expected: NPE");
+    } catch (NullPointerException e) {
+    }
+    expectEquals(598, indexOfExceptions(ABC, XYZ));
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}