Merge "Add support for .bss section in oat files."
diff --git a/Android.mk b/Android.mk
index 447cf66..c740a0d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -281,6 +281,11 @@
 test-art-target-interpreter: test-art-target-run-test-interpreter
 	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
 
+# All target tests that run solely on the jit.
+.PHONY: test-art-target-jit
+test-art-target-jit: test-art-target-run-test-jit
+	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
 # Primary target architecture variants:
 .PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX)
 test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \
@@ -299,6 +304,10 @@
 test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
 	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
 
+.PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
 # Secondary target architecture variants:
 ifdef TARGET_2ND_ARCH
 .PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
@@ -317,6 +326,10 @@
 .PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
 test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
 	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+	$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
 endif
 
 endif  # art_test_bother
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 22ecb01..4d2fa41 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -64,10 +64,6 @@
     core_compile_options += --compiler-filter=interpret-only
     core_infix := -interpreter
   endif
-  ifeq ($(1),jit)
-    core_compile_options += --compiler-filter=interpret-only
-    core_infix := -jit
-  endif
   ifeq ($(1),default)
     # Default has no infix, no compile options.
   endif
@@ -147,14 +143,12 @@
 $(eval $(call create-core-oat-host-rule-combination,default,,))
 $(eval $(call create-core-oat-host-rule-combination,optimizing,,))
 $(eval $(call create-core-oat-host-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-host-rule-combination,jit,,))
 
 valgrindHOST_CORE_IMG_OUTS :=
 valgrindHOST_CORE_OAT_OUTS :=
 $(eval $(call create-core-oat-host-rule-combination,default,valgrind,32))
 $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32))
 $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32))
 
 valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
 
@@ -184,10 +178,6 @@
     core_compile_options += --compiler-filter=interpret-only
     core_infix := -interpreter
   endif
-  ifeq ($(1),jit)
-    core_compile_options += --compiler-filter=interpret-only
-    core_infix := -jit
-  endif
   ifeq ($(1),default)
     # Default has no infix, no compile options.
   endif
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 966a92d..8348626 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -587,6 +587,9 @@
     case Instruction::MOVE_FROM16:
     case Instruction::MOVE_OBJECT_FROM16:
       StoreValue(rl_dest, rl_src[0]);
+      if (rl_src[0].is_const && (mir_graph_->ConstantValue(rl_src[0]) == 0)) {
+        Workaround7250540(rl_dest, RegStorage::InvalidReg());
+      }
       break;
 
     case Instruction::MOVE_WIDE:
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 2577391..0283791 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@
       false,
       false,
       false,
-      true,  // pic
+      false,  // pic
       nullptr,
       pass_manager_options,
       nullptr));
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2a57fdc..ba5f7d8 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -386,7 +386,9 @@
           compiler_options);
     }
     case kArm64: {
-      return new arm64::CodeGeneratorARM64(graph, compiler_options);
+      return new arm64::CodeGeneratorARM64(graph,
+          *isa_features.AsArm64InstructionSetFeatures(),
+          compiler_options);
     }
     case kMips:
       return nullptr;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 729bab7..c21084a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -16,6 +16,7 @@
 
 #include "code_generator_arm64.h"
 
+#include "arch/arm64/instruction_set_features_arm64.h"
 #include "common_arm64.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -397,7 +398,9 @@
   return next_location;
 }
 
-CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options)
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
+                                       const Arm64InstructionSetFeatures& isa_features,
+                                       const CompilerOptions& compiler_options)
     : CodeGenerator(graph,
                     kNumberOfAllocatableRegisters,
                     kNumberOfAllocatableFPRegisters,
@@ -408,7 +411,8 @@
       block_labels_(nullptr),
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
-      move_resolver_(graph->GetArena(), this) {
+      move_resolver_(graph->GetArena(), this),
+      isa_features_(isa_features) {
   // Save the link register (containing the return address) to mimic Quick.
   AddAllocatedRegister(LocationFrom(lr));
 }
@@ -998,9 +1002,10 @@
   UseScratchRegisterScope temps(GetVIXLAssembler());
   Register temp = temps.AcquireW();
   size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
 
   // Even if the initialized flag is set, we need to ensure consistent memory ordering.
-  if (kUseAcquireRelease) {
+  if (use_acquire_release) {
     // TODO(vixl): Let the MacroAssembler handle MemOperand.
     __ Add(temp, class_reg, status_offset);
     __ Ldar(temp, HeapOperand(temp));
@@ -1689,9 +1694,10 @@
 
 void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
+  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
 
   if (instruction->IsVolatile()) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       // NB: LoadAcquire will record the pc info if needed.
       codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
     } else {
@@ -1718,9 +1724,10 @@
   CPURegister value = InputCPURegisterAt(instruction, 1);
   Offset offset = instruction->GetFieldOffset();
   Primitive::Type field_type = instruction->GetFieldType();
+  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
 
   if (instruction->IsVolatile()) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
       codegen_->MaybeRecordImplicitNullCheck(instruction);
     } else {
@@ -2437,9 +2444,10 @@
 
 void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
+  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
 
   if (instruction->IsVolatile()) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       // NB: LoadAcquire will record the pc info if needed.
       codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
     } else {
@@ -2464,9 +2472,10 @@
   CPURegister value = InputCPURegisterAt(instruction, 1);
   Offset offset = instruction->GetFieldOffset();
   Primitive::Type field_type = instruction->GetFieldType();
+  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
 
   if (instruction->IsVolatile()) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset));
     } else {
       GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index afb7fc3..48961d6 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -32,10 +32,6 @@
 
 class CodeGeneratorARM64;
 
-// TODO: Tune the use of Load-Acquire, Store-Release vs Data Memory Barriers.
-// For now we prefer the use of load-acquire, store-release over explicit memory barriers.
-static constexpr bool kUseAcquireRelease = true;
-
 // Use a local definition to prevent copying mistakes.
 static constexpr size_t kArm64WordSize = kArm64PointerSize;
 
@@ -195,7 +191,9 @@
 
 class CodeGeneratorARM64 : public CodeGenerator {
  public:
-  CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options);
+  CodeGeneratorARM64(HGraph* graph,
+                     const Arm64InstructionSetFeatures& isa_features,
+                     const CompilerOptions& compiler_options);
   virtual ~CodeGeneratorARM64() {}
 
   void GenerateFrameEntry() OVERRIDE;
@@ -273,6 +271,10 @@
     return InstructionSet::kArm64;
   }
 
+  const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const {
+    return isa_features_;
+  }
+
   void Initialize() OVERRIDE {
     HGraph* graph = GetGraph();
     int length = graph->GetBlocks().Size();
@@ -317,6 +319,7 @@
   InstructionCodeGeneratorARM64 instruction_visitor_;
   ParallelMoveResolverARM64 move_resolver_;
   Arm64Assembler assembler_;
+  const Arm64InstructionSetFeatures& isa_features_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
 };
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e0e0b4c..868fc5b 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -18,6 +18,7 @@
 
 #include "arch/instruction_set.h"
 #include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
 #include "base/macros.h"
 #include "builder.h"
 #include "code_generator_arm.h"
@@ -115,9 +116,9 @@
     Run(allocator, codegenX86, has_result, expected);
   }
 
-  std::unique_ptr<const ArmInstructionSetFeatures> features(
+  std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
       ArmInstructionSetFeatures::FromCppDefines());
-  TestCodeGeneratorARM codegenARM(graph, *features.get(), compiler_options);
+  TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
   codegenARM.CompileBaseline(&allocator, true);
   if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
     Run(allocator, codegenARM, has_result, expected);
@@ -129,7 +130,9 @@
     Run(allocator, codegenX86_64, has_result, expected);
   }
 
-  arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+  std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
+      Arm64InstructionSetFeatures::FromCppDefines());
+  arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
   codegenARM64.CompileBaseline(&allocator, true);
   if (kRuntimeISA == kArm64) {
     Run(allocator, codegenARM64, has_result, expected);
@@ -166,7 +169,9 @@
                                     compiler_options);
     RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
   } else if (kRuntimeISA == kArm64) {
-    arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+    arm64::CodeGeneratorARM64 codegenARM64(graph,
+                                           *Arm64InstructionSetFeatures::FromCppDefines(),
+                                           compiler_options);
     RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
   } else if (kRuntimeISA == kX86) {
     x86::CodeGeneratorX86 codegenX86(graph, compiler_options);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 8874edc..1ddff8a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -16,6 +16,7 @@
 
 #include "intrinsics_arm64.h"
 
+#include "arch/arm64/instruction_set_features_arm64.h"
 #include "code_generator_arm64.h"
 #include "common_arm64.h"
 #include "entrypoints/quick/quick_entrypoints.h"
@@ -682,10 +683,11 @@
   Register base = WRegisterFrom(locations->InAt(1));    // Object pointer.
   Register offset = XRegisterFrom(locations->InAt(2));  // Long offset.
   Register trg = RegisterFrom(locations->Out(), type);
+  bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
 
   MemOperand mem_op(base.X(), offset);
   if (is_volatile) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       codegen->LoadAcquire(invoke, trg, mem_op);
     } else {
       codegen->Load(type, trg, mem_op);
@@ -792,11 +794,12 @@
   Register base = WRegisterFrom(locations->InAt(1));    // Object pointer.
   Register offset = XRegisterFrom(locations->InAt(2));  // Long offset.
   Register value = RegisterFrom(locations->InAt(3), type);
+  bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
 
   MemOperand mem_op(base.X(), offset);
 
   if (is_volatile || is_ordered) {
-    if (kUseAcquireRelease) {
+    if (use_acquire_release) {
       codegen->StoreRelease(type, value, mem_op);
     } else {
       __ Dmb(InnerShareable, BarrierAll);
@@ -856,10 +859,7 @@
 }
 
 static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
-  // TODO: Currently we use acquire-release load-stores in the CAS loop. One could reasonably write
-  //       a version relying on simple exclusive load-stores and barriers instead.
-  static_assert(kUseAcquireRelease, "Non-acquire-release inlined CAS not implemented, yet.");
-
+  bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease();
   vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
 
   Register out = WRegisterFrom(locations->Out());                  // Boolean result.
@@ -889,15 +889,23 @@
   // result = tmp_value != 0;
 
   vixl::Label loop_head, exit_loop;
-  __ Bind(&loop_head);
-
-  __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
-  __ Cmp(tmp_value, expected);
-  __ B(&exit_loop, ne);
-
-  __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
-  __ Cbnz(tmp_32, &loop_head);
-
+  if (use_acquire_release) {
+    __ Bind(&loop_head);
+    __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
+    __ Cmp(tmp_value, expected);
+    __ B(&exit_loop, ne);
+    __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
+    __ Cbnz(tmp_32, &loop_head);
+  } else {
+    __ Dmb(InnerShareable, BarrierWrites);
+    __ Bind(&loop_head);
+    __ Ldxr(tmp_value, MemOperand(tmp_ptr));
+    __ Cmp(tmp_value, expected);
+    __ B(&exit_loop, ne);
+    __ Stxr(tmp_32, value, MemOperand(tmp_ptr));
+    __ Cbnz(tmp_32, &loop_head);
+    __ Dmb(InnerShareable, BarrierAll);
+  }
   __ Bind(&exit_loop);
   __ Cset(out, eq);
 }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b1f14d..22665ea 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1052,6 +1052,13 @@
     runtime_options.push_back(
         std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
 
+    // Only allow no boot image for the runtime if we're compiling one. When we compile an app,
+    // we don't want fallback mode, it will abort as we do not push a boot classpath (it might
+    // have been stripped in preopting, anyways).
+    if (!image_) {
+      runtime_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
+    }
+
     if (!CreateRuntime(runtime_options)) {
       return false;
     }
@@ -1637,9 +1644,13 @@
   }
 
   void LogCompletionTime() {
+    // Note: when creation of a runtime fails, e.g., when trying to compile an app but when there
+    //       is no image, there won't be a Runtime::Current().
     LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
               << " (threads: " << thread_count_ << ") "
-              << driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler));
+              << ((Runtime::Current() != nullptr) ?
+                  driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler)) :
+                  "");
   }
 
   std::unique_ptr<CompilerOptions> compiler_options_;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 7442c70..3d8a567 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -143,27 +143,31 @@
   { kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
 
   // Floating point.
-  { kFpMask,                kCop1 | 0, "add", "fdst" },
-  { kFpMask,                kCop1 | 1, "sub", "fdst" },
-  { kFpMask,                kCop1 | 2, "mul", "fdst" },
-  { kFpMask,                kCop1 | 3, "div", "fdst" },
-  { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
-  { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21) | 0, "mfc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x03 << 21) | 0, "mfhc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21) | 0, "mtc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x07 << 21) | 0, "mthc1", "Td" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
 };
 
 static uint32_t ReadU32(const uint8_t* ptr) {
@@ -206,6 +210,7 @@
             break;
           case 'D': args << 'r' << rd; break;
           case 'd': args << 'f' << rd; break;
+          case 'a': args << 'f' << sa; break;
           case 'f':  // Floating point "fmt".
             {
               size_t fmt = (instruction >> 21) & 0x7;  // TODO: other fmts?
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index b0c66b3..f6bfee7 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -61,6 +61,15 @@
       return fix_cortex_a53_835769_;
   }
 
+  // TODO: Tune this on a per CPU basis. For now, we pessimistically assume
+  // that all ARM64 CPUs prefer explicit memory barriers over acquire-release.
+  //
+  // NOTE: This should not be the case! However we want to exercise the
+  // explicit memory barriers code paths in the Optimizing Compiler.
+  bool PreferAcquireRelease() const {
+    return false;
+  }
+
   virtual ~Arm64InstructionSetFeatures() {}
 
  protected:
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 027e59c..753107b 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -30,6 +30,8 @@
   EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
   EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
   EXPECT_EQ(arm64_features->AsBitmap(), 3U);
+  // See the comments in instruction_set_features_arm64.h.
+  EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
 }
 
 }  // namespace art
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 92e0f07..69fe874 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -142,11 +142,11 @@
   std::ostringstream os;
   const uint16_t* insn = reinterpret_cast<const uint16_t*>(this);
   for (size_t i = 0; i < inst_length; i++) {
-    os << StringPrintf("%02x%02x", (uint8_t)(insn[i] & 0x00FF),
-                       (uint8_t)((insn[i] & 0xFF00)>>8)) << " ";
+    os << StringPrintf("%02x%02x", static_cast<uint8_t>(insn[i] & 0x00FF),
+                       static_cast<uint8_t>((insn[i] & 0xFF00) >> 8)) << " ";
   }
   for (size_t i = inst_length; i < instr_code_units; i++) {
-    os << "       ";
+    os << "     ";
   }
   return os.str();
 }
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index a29558e..3ab7f30 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -16,6 +16,8 @@
 
 #include "interpreter_common.h"
 
+#include <cmath>
+
 #include "mirror/array-inl.h"
 
 namespace art {
@@ -839,6 +841,23 @@
   result->SetL(found);
 }
 
+// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
+// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
+// ClassNotFoundException), so need to do the same. The only exception is if the exception is
+// actually InternalError. This must not be wrapped, as it signals an initialization abort.
+static void CheckExceptionGenerateClassNotFound(Thread* self)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (self->IsExceptionPending()) {
+    // If it is not an InternalError, wrap it.
+    std::string type(PrettyTypeOf(self->GetException(nullptr)));
+    if (type != "java.lang.InternalError") {
+      self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
+                                     "Ljava/lang/ClassNotFoundException;",
+                                     "ClassNotFoundException");
+    }
+  }
+}
+
 static void UnstartedRuntimeInvoke(Thread* self,  const DexFile::CodeItem* code_item,
                                    ShadowFrame* shadow_frame,
                                    JValue* result, size_t arg_offset) {
@@ -846,18 +865,34 @@
   // problems in core libraries.
   std::string name(PrettyMethod(shadow_frame->GetMethod()));
   if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
-    // TODO: Support for the other variants that take more arguments should also be added.
     mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
     StackHandleScope<1> hs(self);
     Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
     UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
-                              true, true);
-  } else if (name == "java.lang.Class java.lang.VMClassLoader.loadClass(java.lang.String, boolean)") {
+                              true, false);
+    CheckExceptionGenerateClassNotFound(self);
+  } else if (name == "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)") {
     mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
-    StackHandleScope<1> hs(self);
+    bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+    mirror::ClassLoader* class_loader =
+        down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+    StackHandleScope<2> hs(self);
     Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
-    UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
-                              false, true);
+    Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+    UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
+                              false);
+    CheckExceptionGenerateClassNotFound(self);
+  } else if (name == "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") {
+    mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+    bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+    mirror::ClassLoader* class_loader =
+        down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+    StackHandleScope<2> hs(self);
+    Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+    Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+    UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
+                              false);
+    CheckExceptionGenerateClassNotFound(self);
   } else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
     mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
     mirror::ClassLoader* class_loader =
@@ -866,17 +901,47 @@
     Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
     UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, false, false);
+    // This might have an error pending. But semantics are to just return null.
+    if (self->IsExceptionPending()) {
+      // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
+      std::string type(PrettyTypeOf(self->GetException(nullptr)));
+      if (type != "java.lang.InternalError") {
+        self->ClearException();
+      }
+    }
   } else if (name == "java.lang.Class java.lang.Void.lookupType()") {
     result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
   } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
+    StackHandleScope<2> hs(self);
     Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
-    ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
-    CHECK(c != NULL);
-    StackHandleScope<1> hs(self);
-    Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
-    CHECK(obj.Get() != NULL);
-    EnterInterpreterFromInvoke(self, c, obj.Get(), NULL, NULL);
-    result->SetL(obj.Get());
+    Handle<Class> h_klass(hs.NewHandle(klass));
+    // There are two situations in which we'll abort this run.
+    //  1) If the class isn't yet initialized and initialization fails.
+    //  2) If we can't find the default constructor. We'll postpone the exception to runtime.
+    // Note that 2) could likely be handled here, but for safety abort the transaction.
+    bool ok = false;
+    if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+      ArtMethod* c = h_klass->FindDeclaredDirectMethod("<init>", "()V");
+      if (c != nullptr) {
+        Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
+        CHECK(obj.Get() != nullptr);  // We don't expect OOM at compile-time.
+        EnterInterpreterFromInvoke(self, c, obj.Get(), nullptr, nullptr);
+        result->SetL(obj.Get());
+        ok = true;
+      } else {
+        self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
+                                 "Could not find default constructor for '%s'",
+                                 PrettyClass(h_klass.Get()).c_str());
+      }
+    }
+    if (!ok) {
+      std::string error_msg = StringPrintf("Failed in Class.newInstance for '%s' with %s",
+                                           PrettyClass(h_klass.Get()).c_str(),
+                                           PrettyTypeOf(self->GetException(nullptr)).c_str());
+      self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
+                                     "Ljava/lang/InternalError;",
+                                     error_msg.c_str());
+    }
   } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
     // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
     // going the reflective Dex way.
@@ -949,12 +1014,67 @@
                                "Unimplemented System.arraycopy for type '%s'",
                                PrettyDescriptor(ctype).c_str());
     }
-  } else  if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
+  } else if (name == "long java.lang.Double.doubleToRawLongBits(double)") {
+    double in = shadow_frame->GetVRegDouble(arg_offset);
+    result->SetJ(bit_cast<int64_t>(in));
+  } else if (name == "double java.lang.Math.ceil(double)") {
+    double in = shadow_frame->GetVRegDouble(arg_offset);
+    double out;
+    // Special cases:
+    // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
+    // -1 < in < 0 -> out := -0.
+    if (-1.0 < in && in < 0) {
+      out = -0.0;
+    } else {
+      out = ceil(in);
+    }
+    result->SetD(out);
+  } else if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
     std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+    bool ok = false;
     if (caller == "java.lang.String java.lang.IntegralToString.convertInt(java.lang.AbstractStringBuilder, int)") {
       // Allocate non-threadlocal buffer.
       result->SetL(mirror::CharArray::Alloc(self, 11));
-    } else {
+      ok = true;
+    } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
+      // Note: RealToString is implemented and used in a different fashion than IntegralToString.
+      // Conversion is done over an actual object of RealToString (the conversion method is an
+      // instance method). This means it is not as clear whether it is correct to return a new
+      // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
+      // stores the object for later use.
+      // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
+      if (shadow_frame->GetLink()->GetLink() != nullptr) {
+        std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
+        if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
+          // Allocate new object.
+          mirror::Class* real_to_string_class =
+              shadow_frame->GetLink()->GetMethod()->GetDeclaringClass();
+          mirror::Object* real_to_string_obj = real_to_string_class->AllocObject(self);
+          if (real_to_string_obj != nullptr) {
+            mirror::ArtMethod* init_method =
+                real_to_string_class->FindDirectMethod("<init>", "()V");
+            if (init_method == nullptr) {
+              real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+            }
+            JValue invoke_result;
+            // One arg, this.
+            uint32_t args = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(real_to_string_obj));
+            init_method->Invoke(self, &args, 4, &invoke_result, init_method->GetShorty());
+            if (!self->IsExceptionPending()) {
+              result->SetL(real_to_string_obj);
+              ok = true;
+            }
+          }
+
+          if (!ok) {
+            // We'll abort, so clear exception.
+            self->ClearException();
+          }
+        }
+      }
+    }
+
+    if (!ok) {
       self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
                               "Unimplemented ThreadLocal.get");
     }
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 6afc373..851eceb 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1264,14 +1264,6 @@
     return;
   }
 
-#if !defined(HAVE_ANDROID_OS)
-  if (GetTid() != tid) {
-    // TODO: dumping of other threads is disabled to avoid crashes during stress testing.
-    //       b/15446488.
-    return;
-  }
-#endif
-
   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
   if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
     os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
@@ -1519,14 +1511,16 @@
 std::string DexFilenameToOdexFilename(const std::string& location, const InstructionSet isa) {
   // location = /foo/bar/baz.jar
   // odex_location = /foo/bar/<isa>/baz.odex
-
-  CHECK_GE(location.size(), 4U) << location;  // must be at least .123
   std::string odex_location(location);
   InsertIsaDirectory(isa, &odex_location);
-  size_t dot_index = odex_location.size() - 3 - 1;  // 3=dex or zip or apk
-  CHECK_EQ('.', odex_location[dot_index]) << location;
+  size_t dot_index = odex_location.rfind('.');
+
+  // The location must have an extension, otherwise it's not clear what we
+  // should return.
+  CHECK_NE(dot_index, std::string::npos) << odex_location;
+  CHECK_EQ(std::string::npos, odex_location.find('/', dot_index)) << odex_location;
+
   odex_location.resize(dot_index + 1);
-  CHECK_EQ('.', odex_location[odex_location.size()-1]) << location << " " << odex_location;
   odex_location += "odex";
   return odex_location;
 }
diff --git a/runtime/utils.h b/runtime/utils.h
index 698d686..9d04d35 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -516,8 +516,9 @@
 // Returns the system location for an image
 std::string GetSystemImageFilename(const char* location, InstructionSet isa);
 
-// Returns an .odex file name next adjacent to the dex location.
+// Returns an .odex file name adjacent to the dex location.
 // For example, for "/foo/bar/baz.jar", return "/foo/bar/<isa>/baz.odex".
+// The dex location must include a directory component and have an extension.
 // Note: does not support multidex location strings.
 std::string DexFilenameToOdexFilename(const std::string& location, InstructionSet isa);
 
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index a3dd13c..5465762 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -374,6 +374,8 @@
 TEST_F(UtilsTest, DexFilenameToOdexFilename) {
   EXPECT_STREQ("/foo/bar/arm/baz.odex",
                DexFilenameToOdexFilename("/foo/bar/baz.jar", kArm).c_str());
+  EXPECT_STREQ("/foo/bar/arm/baz.odex",
+               DexFilenameToOdexFilename("/foo/bar/baz.funnyext", kArm).c_str());
 }
 
 TEST_F(UtilsTest, ExecSuccess) {
diff --git a/test/134-reg-promotion/expected.txt b/test/134-reg-promotion/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/134-reg-promotion/expected.txt
diff --git a/test/134-reg-promotion/info.txt b/test/134-reg-promotion/info.txt
new file mode 100644
index 0000000..6eff7eb
--- /dev/null
+++ b/test/134-reg-promotion/info.txt
@@ -0,0 +1,4 @@
+Test that a vreg value that was defined by a const 0 and is used is both ref
+and float operations is flushed to all home location.
+
+See: b/19417710, b/7250540 & b.android.com/147187
diff --git a/test/134-reg-promotion/smali/Test.smali b/test/134-reg-promotion/smali/Test.smali
new file mode 100644
index 0000000..6a35c45
--- /dev/null
+++ b/test/134-reg-promotion/smali/Test.smali
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+   .registers 3
+   new-instance v2, Ljava/lang/String;
+   invoke-direct {v2}, Ljava/lang/String;-><init>()V
+   const/4 v0, 0
+   move v1, v0
+   :start
+   invoke-static {}, LMain;->blowup()V
+   if-ne v1, v0, :end
+   const/4 v2, 1
+   invoke-static {v2}, Ljava/lang/Integer;->toString(I)Ljava/lang/String;
+   move v2, v0
+   # The call makes v2 float type.
+   invoke-static {v2}, Ljava/lang/Float;->isNaN(F)Z
+   const/4 v1, 1
+   goto :start
+   :end
+   return-void
+.end method
diff --git a/test/134-reg-promotion/src/Main.java b/test/134-reg-promotion/src/Main.java
new file mode 100644
index 0000000..d45ec66
--- /dev/null
+++ b/test/134-reg-promotion/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+    static char [][] holder;
+    static boolean sawOome;
+
+    static void blowup() {
+        try {
+            for (int i = 0; i < holder.length; ++i) {
+                holder[i] = new char[1024 * 1024];
+            }
+        } catch (OutOfMemoryError oome) {
+            sawOome = true;
+        }
+    }
+
+    public static void main(String args[]) throws Exception {
+        Class<?> c = Class.forName("Test");
+        Method m = c.getMethod("run", (Class[]) null);
+        for (int i = 0; i < 10; i++) {
+            holder = new char[128 * 1024][];
+            m.invoke(null, (Object[]) null);
+            holder = null;
+        }
+    }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e5a07d4..c764414 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -569,32 +569,38 @@
       endif
     endif
   endif
+  ifeq ($(4),jit)
+    # Use interpreter image for JIT.
+    image_suffix := interpreter
+  else
+    image_suffix := $(4)
+  endif
   ifeq ($(9),no-image)
     test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
     run_test_options += --no-image
     # Add the core dependency. This is required for pre-building.
     ifeq ($(1),host)
-      prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+      prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
     else
-      prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+      prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
     endif
   else
     ifeq ($(9),image)
       test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
       # Add the core dependency.
       ifeq ($(1),host)
-        prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+        prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
       else
-        prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+        prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
       endif
     else
       ifeq ($(9),picimage)
         test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
         run_test_options += --pic-image
         ifeq ($(1),host)
-          prereq_rule += $(HOST_CORE_IMAGE_$(4)_pic_$(12))
+          prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_pic_$(12))
         else
-          prereq_rule += $(TARGET_CORE_IMAGE_$(4)_pic_$(12))
+          prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_pic_$(12))
         endif
       else
         $$(error found $(9) expected $(IMAGE_TYPES))
diff --git a/test/run-test b/test/run-test
index 8bc4151..52f5e0c 100755
--- a/test/run-test
+++ b/test/run-test
@@ -195,6 +195,7 @@
         shift
     elif [ "x$1" = "x--jit" ]; then
         run_args="${run_args} --jit"
+        image_suffix="-interpreter"
         shift
     elif [ "x$1" = "x--optimizing" ]; then
         run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"