Merge changes I207718d6,I700ef52e

* changes:
  ART: Restrict some checks in the verifier to P+
  ART: Add API level to verifier
diff --git a/Android.mk b/Android.mk
index 19c65a1..7852be5 100644
--- a/Android.mk
+++ b/Android.mk
@@ -41,18 +41,18 @@
 
 .PHONY: clean-oat-target
 clean-oat-target:
-	adb root
-	adb wait-for-device remount
-	adb shell rm -rf $(ART_TARGET_NATIVETEST_DIR)
-	adb shell rm -rf $(ART_TARGET_TEST_DIR)
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/*
-	adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH)
-	adb shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH)
+	$(ADB) root
+	$(ADB) wait-for-device remount
+	$(ADB) shell rm -rf $(ART_TARGET_NATIVETEST_DIR)
+	$(ADB) shell rm -rf $(ART_TARGET_TEST_DIR)
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/*
+	$(ADB) shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH)
+	$(ADB) shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH)
 ifdef TARGET_2ND_ARCH
-	adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
-	adb shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
+	$(ADB) shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
+	$(ADB) shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)
 endif
-	adb shell rm -rf data/run-test/test-*/dalvik-cache/*
+	$(ADB) shell rm -rf data/run-test/test-*/dalvik-cache/*
 
 ########################################################################
 # cpplint rules to style check art source files
@@ -92,7 +92,7 @@
 # test rules
 
 # All the dependencies that must be built ahead of sync-ing them onto the target device.
-TEST_ART_TARGET_SYNC_DEPS :=
+TEST_ART_TARGET_SYNC_DEPS := $(ADB_EXECUTABLE)
 
 include $(art_path)/build/Android.common_test.mk
 include $(art_path)/build/Android.gtest.mk
@@ -100,14 +100,14 @@
 
 # Make sure /system is writable on the device.
 TEST_ART_ADB_ROOT_AND_REMOUNT := \
-    (adb root && \
-     adb wait-for-device remount && \
-     ((adb shell touch /system/testfile && \
-       (adb shell rm /system/testfile || true)) || \
-      (adb disable-verity && \
-       adb reboot && \
-       adb wait-for-device root && \
-       adb wait-for-device remount)))
+    ($(ADB) root && \
+     $(ADB) wait-for-device remount && \
+     (($(ADB) shell touch /system/testfile && \
+       ($(ADB) shell rm /system/testfile || true)) || \
+      ($(ADB) disable-verity && \
+       $(ADB) reboot && \
+       $(ADB) wait-for-device root && \
+       $(ADB) wait-for-device remount)))
 
 # Sync test files to the target, depends upon all things that must be pushed to the target.
 .PHONY: test-art-target-sync
@@ -121,25 +121,25 @@
 ifeq ($(ART_TEST_CHROOT),)
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
 	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
-	adb sync system && adb sync data
+	$(ADB) sync system && $(ADB) sync data
 else
 # TEST_ART_ADB_ROOT_AND_REMOUNT is not needed here, as we are only
 # pushing things to the chroot dir, which is expected to be under
 # /data on the device.
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	adb wait-for-device
-	adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
-	adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
+	$(ADB) wait-for-device
+	$(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
+	$(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
 endif
 else
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
 	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
-	adb wait-for-device
-	adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)$(ART_TEST_ANDROID_ROOT)
+	$(ADB) wait-for-device
+	$(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)$(ART_TEST_ANDROID_ROOT)
 # Push the contents of the `data` dir into `$(ART_TEST_CHROOT)/data` on the device (note
 # that $(ART_TEST_CHROOT) can be empty).  If `$(ART_TEST_CHROOT)/data` already exists on
 # the device, it is not overwritten, but its content is updated.
-	adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
+	$(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
 endif
 endif
 
@@ -493,90 +493,90 @@
 
 .PHONY: use-art
 use-art:
-	adb root
-	adb wait-for-device shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+	$(ADB) shell start
 
 .PHONY: use-artd
 use-artd:
-	adb root
-	adb wait-for-device shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+	$(ADB) shell start
 
 .PHONY: use-dalvik
 use-dalvik:
-	adb root
-	adb wait-for-device shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so
+	$(ADB) shell start
 
 .PHONY: use-art-full
 use-art-full:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter \"\"
-	adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
-	adb shell setprop dalvik.vm.usejit false
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter \"\"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+	$(ADB) shell setprop dalvik.vm.usejit false
+	$(ADB) shell start
 
 .PHONY: use-artd-full
 use-artd-full:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter \"\"
-	adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
-	adb shell setprop dalvik.vm.usejit false
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter \"\"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+	$(ADB) shell setprop dalvik.vm.usejit false
+	$(ADB) shell start
 
 .PHONY: use-art-jit
 use-art-jit:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime"
-	adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
-	adb shell setprop dalvik.vm.usejit true
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+	$(ADB) shell setprop dalvik.vm.usejit true
+	$(ADB) shell start
 
 .PHONY: use-art-interpret-only
 use-art-interpret-only:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
-	adb shell setprop dalvik.vm.usejit false
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+	$(ADB) shell setprop dalvik.vm.usejit false
+	$(ADB) shell start
 
 .PHONY: use-artd-interpret-only
 use-artd-interpret-only:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
-	adb shell setprop dalvik.vm.usejit false
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
+	$(ADB) shell setprop dalvik.vm.usejit false
+	$(ADB) shell start
 
 .PHONY: use-art-verify-none
 use-art-verify-none:
-	adb root
-	adb wait-for-device shell stop
-	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-filter "verify-none"
-	adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
-	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
-	adb shell setprop dalvik.vm.usejit false
-	adb shell start
+	$(ADB) root
+	$(ADB) wait-for-device shell stop
+	$(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
+	$(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-none"
+	$(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
+	$(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so
+	$(ADB) shell setprop dalvik.vm.usejit false
+	$(ADB) shell start
 
 ########################################################################
 
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index 04e39bf..c51f981 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -46,12 +46,12 @@
       : connection_(connection) {}
 
   // Begin running the debugger.
-  void StartDebugger() OVERRIDE;
+  void StartDebugger() override;
 
   // The debugger should begin shutting down since the runtime is ending.
-  void StopDebugger() OVERRIDE;
+  void StopDebugger() override;
 
-  bool IsDebuggerConfigured() OVERRIDE;
+  bool IsDebuggerConfigured() override;
 
  private:
   AdbConnectionState* connection_;
diff --git a/build/Android.common.mk b/build/Android.common.mk
index a6a9f0f..316ce64 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -92,4 +92,7 @@
   2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
 endif
 
+ADB_EXECUTABLE := $(HOST_OUT_EXECUTABLES)/adb
+ADB := $(ADB_EXECUTABLE)
+
 endif # ART_ANDROID_COMMON_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 81c82b7..20f20c9 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -447,7 +447,8 @@
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+    $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
 
 $$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
 $$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
@@ -460,14 +461,14 @@
 
 .PHONY: $$(gtest_rule)
 $$(gtest_rule): test-art-target-sync
-	$(hide) adb shell touch $$(PRIVATE_GTEST_WITNESS)
-	$(hide) adb shell rm $$(PRIVATE_GTEST_WITNESS)
-	$(hide) adb shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
+	$(hide) $(ADB) shell touch $$(PRIVATE_GTEST_WITNESS)
+	$(hide) $(ADB) shell rm $$(PRIVATE_GTEST_WITNESS)
+	$(hide) $(ADB) shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
 	$(hide) $$(call ART_TEST_SKIP,$$@) && \
-	  (adb shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
+	  ($(ADB) shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
 	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) $$(PRIVATE_TARGET_EXE) \
 	     && touch $$(PRIVATE_GTEST_WITNESS)" \
-	   && (adb pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
+	   && ($(ADB) pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
 	   || $$(call ART_TEST_FAILED,$$@))
 	$(hide) rm -f /tmp/$$@-$$$$PPID
 
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index a33d537..42c6a5f 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -558,13 +558,12 @@
 TEST_F(CmdlineParserTest, MultipleArguments) {
   EXPECT_TRUE(IsResultSuccessful(parser_->Parse(
       "-help -XX:ForegroundHeapGrowthMultiplier=0.5 "
-      "-Xnodex2oat -Xmethod-trace -XX:LargeObjectSpace=map")));
+      "-Xmethod-trace -XX:LargeObjectSpace=map")));
 
   auto&& map = parser_->ReleaseArgumentsMap();
-  EXPECT_EQ(5u, map.Size());
+  EXPECT_EQ(4u, map.Size());
   EXPECT_KEY_VALUE(map, M::Help, Unit{});
   EXPECT_KEY_VALUE(map, M::ForegroundHeapGrowthMultiplier, 0.5);
-  EXPECT_KEY_VALUE(map, M::Dex2Oat, false);
   EXPECT_KEY_VALUE(map, M::MethodTrace, Unit{});
   EXPECT_KEY_VALUE(map, M::LargeObjectSpace, gc::space::LargeObjectSpaceType::kMap);
 }  //  TEST_F
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 366489c..e6d1564 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -57,9 +57,9 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
-  void SetUp() OVERRIDE;
+  void SetUp() override;
 
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE;
+  void SetUpRuntimeOptions(RuntimeOptions* options) override;
 
   Compiler::Kind GetCompilerKind() const;
   void SetCompilerKind(Compiler::Kind compiler_kind);
@@ -73,7 +73,7 @@
     return CompilerFilter::kDefaultCompilerFilter;
   }
 
-  void TearDown() OVERRIDE;
+  void TearDown() override;
 
   void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index f880280..864ce58 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -104,7 +104,7 @@
   uint32_t packed_fields_;
 };
 
-class CompiledMethod FINAL : public CompiledCode {
+class CompiledMethod final : public CompiledCode {
  public:
   // Constructs a CompiledMethod.
   // Note: Consider using the static allocation methods below that will allocate the CompiledMethod
diff --git a/compiler/debug/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h
index cccca25..63a049b 100644
--- a/compiler/debug/dwarf/debug_abbrev_writer.h
+++ b/compiler/debug/dwarf/debug_abbrev_writer.h
@@ -37,7 +37,7 @@
 // determines all the attributes and their format.
 // It is possible to think of them as type definitions.
 template <typename Vector = std::vector<uint8_t>>
-class DebugAbbrevWriter FINAL : private Writer<Vector> {
+class DebugAbbrevWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/debug/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h
index 89d16f2..b198178 100644
--- a/compiler/debug/dwarf/debug_info_entry_writer.h
+++ b/compiler/debug/dwarf/debug_info_entry_writer.h
@@ -42,7 +42,7 @@
  *   EndTag();
  */
 template <typename Vector = std::vector<uint8_t>>
-class DebugInfoEntryWriter FINAL : private Writer<Vector> {
+class DebugInfoEntryWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h
index b4a4d63..bb4e87f 100644
--- a/compiler/debug/dwarf/debug_line_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_line_opcode_writer.h
@@ -31,7 +31,7 @@
 //  * Keep track of current state and convert absolute values to deltas.
 //  * Divide by header-defined factors as appropriate.
 template<typename Vector = std::vector<uint8_t>>
-class DebugLineOpCodeWriter FINAL : private Writer<Vector> {
+class DebugLineOpCodeWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 8a07e9c..b7117bd 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -26,7 +26,7 @@
 class DexFile;
 class VerificationResults;
 
-class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
+class QuickCompilerCallbacks final : public CompilerCallbacks {
  public:
   explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
       : CompilerCallbacks(mode), dex_files_(nullptr) {}
@@ -34,20 +34,20 @@
   ~QuickCompilerCallbacks() { }
 
   void MethodVerified(verifier::MethodVerifier* verifier)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) override;
 
-  void ClassRejected(ClassReference ref) OVERRIDE;
+  void ClassRejected(ClassReference ref) override;
 
   // We are running in an environment where we can call patchoat safely so we should.
-  bool IsRelocationPossible() OVERRIDE {
+  bool IsRelocationPossible() override {
     return true;
   }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+  verifier::VerifierDeps* GetVerifierDeps() const override {
     return verifier_deps_.get();
   }
 
-  void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+  void SetVerifierDeps(verifier::VerifierDeps* deps) override {
     verifier_deps_.reset(deps);
   }
 
@@ -55,18 +55,18 @@
     verification_results_ = verification_results;
   }
 
-  ClassStatus GetPreviousClassState(ClassReference ref) OVERRIDE;
+  ClassStatus GetPreviousClassState(ClassReference ref) override;
 
   void SetDoesClassUnloading(bool does_class_unloading, CompilerDriver* compiler_driver)
-      OVERRIDE {
+      override {
     does_class_unloading_ = does_class_unloading;
     compiler_driver_ = compiler_driver;
     DCHECK(!does_class_unloading || compiler_driver_ != nullptr);
   }
 
-  void UpdateClassState(ClassReference ref, ClassStatus state) OVERRIDE;
+  void UpdateClassState(ClassReference ref, ClassStatus state) override;
 
-  bool CanUseOatStatusForVerification(mirror::Class* klass) OVERRIDE
+  bool CanUseOatStatusForVerification(mirror::Class* klass) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetDexFiles(const std::vector<const DexFile*>* dex_files) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 3f28aa7..f6afe2c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -971,7 +971,7 @@
  public:
   ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
 
-  virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     classes_.push_back(c);
     return true;
   }
@@ -1034,7 +1034,7 @@
   explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
       : image_classes_(image_classes) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     std::string temp;
     image_classes_->insert(klass->GetDescriptor(&temp));
     return true;
@@ -1210,7 +1210,7 @@
         : data_(data),
           hs_(hs) {}
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
       StringPiece name(klass->GetDescriptor(&temp));
       if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1475,7 +1475,7 @@
           end_(end),
           fn_(fn) {}
 
-    void Run(Thread* self) OVERRIDE {
+    void Run(Thread* self) override {
       while (true) {
         const size_t index = manager_->NextIndex();
         if (UNLIKELY(index >= end_)) {
@@ -1486,7 +1486,7 @@
       }
     }
 
-    void Finalize() OVERRIDE {
+    void Finalize() override {
       delete this;
     }
 
@@ -1568,7 +1568,7 @@
   explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
       : manager_(manager) {}
 
-  void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+  void Visit(size_t class_def_index) override REQUIRES(!Locks::mutator_lock_) {
     ScopedTrace trace(__FUNCTION__);
     Thread* const self = Thread::Current();
     jobject jclass_loader = manager_->GetClassLoader();
@@ -1667,7 +1667,7 @@
  public:
   explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
   }
-  void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+  void Visit(size_t type_idx) override REQUIRES(!Locks::mutator_lock_) {
   // Class derived values are more complicated, they require the linker and loader.
     ScopedObjectAccess soa(Thread::Current());
     ClassLinker* class_linker = manager_->GetClassLinker();
@@ -1890,7 +1890,7 @@
        log_level_(log_level),
        sdk_version_(Runtime::Current()->GetTargetSdkVersion()) {}
 
-  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+  void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2024,7 +2024,7 @@
  public:
   explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+  void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2089,7 +2089,7 @@
  public:
   explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-  void Visit(size_t class_def_index) OVERRIDE {
+  void Visit(size_t class_def_index) override {
     ScopedTrace trace(__FUNCTION__);
     jobject jclass_loader = manager_->GetClassLoader();
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2474,7 +2474,7 @@
   explicit InitializeArrayClassesAndCreateConflictTablesVisitor(VariableSizedHandleScope& hs)
       : hs_(hs) {}
 
-  virtual bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE
+  bool operator()(ObjPtr<mirror::Class> klass) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
       return true;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 2eeb439..fe1568d 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -186,7 +186,7 @@
 
 class CompilerDriverProfileTest : public CompilerDriverTest {
  protected:
-  ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE {
+  ProfileCompilationInfo* GetProfileCompilationInfo() override {
     ScopedObjectAccess soa(Thread::Current());
     std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
 
@@ -200,7 +200,7 @@
     return &profile_info_;
   }
 
-  CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+  CompilerFilter::Filter GetCompilerFilter() const override {
     // Use a profile based filter.
     return CompilerFilter::kSpeedProfile;
   }
@@ -278,7 +278,7 @@
 // which will be used for OatClass.
 class CompilerDriverVerifyTest : public CompilerDriverTest {
  protected:
-  CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+  CompilerFilter::Filter GetCompilerFilter() const override {
     return CompilerFilter::kVerify;
   }
 
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 601c914..34aceba 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,7 @@
 enum class InstructionSet;
 class InstructionSetFeatures;
 
-class CompilerOptions FINAL {
+class CompilerOptions final {
  public:
   // Guide heuristics to determine whether to compile method if profile data not available.
   static const size_t kDefaultHugeMethodThreshold = 10000;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3cb4a65..92b9543 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -221,12 +221,12 @@
 
 class JniCompilerTest : public CommonCompilerTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonCompilerTest::SetUp();
     check_generic_jni_ = false;
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     android::ResetNativeLoader();
     CommonCompilerTest::TearDown();
   }
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 249f202..b327898 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -25,24 +25,24 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
 
-class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k32) {}
-  ~ArmManagedRuntimeCallingConvention() OVERRIDE {}
+  ~ArmManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -50,37 +50,37 @@
   DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
 };
 
-class ArmJniCallingConvention FINAL : public JniCallingConvention {
+class ArmJniCallingConvention final : public JniCallingConvention {
  public:
   ArmJniCallingConvention(bool is_static,
                           bool is_synchronized,
                           bool is_critical_native,
                           const char* shorty);
-  ~ArmJniCallingConvention() OVERRIDE {}
+  ~ArmJniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for AAPCS
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  void Next() override;  // Override default behavior for AAPCS
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // AAPCS mandates return values are extended.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   // Padding to ensure longs and doubles are not split in AAPCS
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 5618942..ed0ddeb 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -25,24 +25,24 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
 
-class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~Arm64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -50,36 +50,36 @@
   DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
 };
 
-class Arm64JniCallingConvention FINAL : public JniCallingConvention {
+class Arm64JniCallingConvention final : public JniCallingConvention {
  public:
   Arm64JniCallingConvention(bool is_static,
                             bool is_synchronized,
                             bool is_critical_native,
                             const char* shorty);
-  ~Arm64JniCallingConvention() OVERRIDE {}
+  ~Arm64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // aarch64 calling convention leaves upper bits undefined.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index ad3f118..165fc60 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -27,24 +27,24 @@
 static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
               "Invalid frame pointer size");
 
-class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class MipsManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k32) {}
-  ~MipsManagedRuntimeCallingConvention() OVERRIDE {}
+  ~MipsManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -52,37 +52,37 @@
   DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
 };
 
-class MipsJniCallingConvention FINAL : public JniCallingConvention {
+class MipsJniCallingConvention final : public JniCallingConvention {
  public:
   MipsJniCallingConvention(bool is_static,
                            bool is_synchronized,
                            bool is_critical_native,
                            const char* shorty);
-  ~MipsJniCallingConvention() OVERRIDE {}
+  ~MipsJniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for o32.
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  void Next() override;  // Override default behavior for o32.
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // Mips does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   // Padding to ensure longs and doubles are not split in o32.
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index faedaef..d87f73a 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -27,24 +27,24 @@
 static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
               "Invalid frame pointer size");
 
-class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Mips64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~Mips64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -52,36 +52,36 @@
   DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
 };
 
-class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+class Mips64JniCallingConvention final : public JniCallingConvention {
  public:
   Mips64JniCallingConvention(bool is_static,
                              bool is_synchronized,
                              bool is_critical_native,
                              const char* shorty);
-  ~Mips64JniCallingConvention() OVERRIDE {}
+  ~Mips64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // Mips64 does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index be83cda..d0c6198 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -25,7 +25,7 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
 
-class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
@@ -33,17 +33,17 @@
                                         shorty,
                                         PointerSize::k32),
         gpr_arg_count_(0) {}
-  ~X86ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~X86ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   int gpr_arg_count_;
@@ -53,36 +53,36 @@
 };
 
 // Implements the x86 cdecl calling convention.
-class X86JniCallingConvention FINAL : public JniCallingConvention {
+class X86JniCallingConvention final : public JniCallingConvention {
  public:
   X86JniCallingConvention(bool is_static,
                           bool is_synchronized,
                           bool is_critical_native,
                           const char* shorty);
-  ~X86JniCallingConvention() OVERRIDE {}
+  ~X86JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // x86 needs to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index cdba334..dfab41b 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -23,59 +23,59 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86_64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~X86_64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
  private:
   ManagedRegisterEntrySpills entry_spills_;
   DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
 };
 
-class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+class X86_64JniCallingConvention final : public JniCallingConvention {
  public:
   X86_64JniCallingConvention(bool is_static,
                              bool is_synchronized,
                              bool is_critical_native,
                              const char* shorty);
-  ~X86_64JniCallingConvention() OVERRIDE {}
+  ~X86_64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // x86-64 needs to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
diff --git a/compiler/linker/buffered_output_stream.h b/compiler/linker/buffered_output_stream.h
index 512409c..cb1c44b 100644
--- a/compiler/linker/buffered_output_stream.h
+++ b/compiler/linker/buffered_output_stream.h
@@ -26,17 +26,17 @@
 namespace art {
 namespace linker {
 
-class BufferedOutputStream FINAL : public OutputStream {
+class BufferedOutputStream final : public OutputStream {
  public:
   explicit BufferedOutputStream(std::unique_ptr<OutputStream> out);
 
-  ~BufferedOutputStream() OVERRIDE;
+  ~BufferedOutputStream() override;
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+  bool WriteFully(const void* buffer, size_t byte_count) override;
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE;
+  bool Flush() override;
 
  private:
   static const size_t kBufferSize = 8 * KB;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 974c590..81ecc17 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -75,7 +75,7 @@
 // The debug sections are written last for easier stripping.
 //
 template <typename ElfTypes>
-class ElfBuilder FINAL {
+class ElfBuilder final {
  public:
   static constexpr size_t kMaxProgramHeaders = 16;
   // SHA-1 digest.  Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid
@@ -173,21 +173,21 @@
 
     // This function always succeeds to simplify code.
     // Use builder's Good() to check the actual status.
-    bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+    bool WriteFully(const void* buffer, size_t byte_count) override {
       CHECK(owner_->current_section_ == this);
       return owner_->stream_.WriteFully(buffer, byte_count);
     }
 
     // This function always succeeds to simplify code.
     // Use builder's Good() to check the actual status.
-    off_t Seek(off_t offset, Whence whence) OVERRIDE {
+    off_t Seek(off_t offset, Whence whence) override {
       // Forward the seek as-is and trust the caller to use it reasonably.
       return owner_->stream_.Seek(offset, whence);
     }
 
     // This function flushes the output and returns whether it succeeded.
     // If there was a previous failure, this does nothing and returns false, i.e. failed.
-    bool Flush() OVERRIDE {
+    bool Flush() override {
       return owner_->stream_.Flush();
     }
 
@@ -271,7 +271,7 @@
   };
 
   // Writer of .dynstr section.
-  class CachedStringSection FINAL : public CachedSection {
+  class CachedStringSection final : public CachedSection {
    public:
     CachedStringSection(ElfBuilder<ElfTypes>* owner,
                         const std::string& name,
@@ -295,7 +295,7 @@
   };
 
   // Writer of .strtab and .shstrtab sections.
-  class StringSection FINAL : public Section {
+  class StringSection final : public Section {
    public:
     StringSection(ElfBuilder<ElfTypes>* owner,
                   const std::string& name,
@@ -338,7 +338,7 @@
   };
 
   // Writer of .dynsym and .symtab sections.
-  class SymbolSection FINAL : public Section {
+  class SymbolSection final : public Section {
    public:
     SymbolSection(ElfBuilder<ElfTypes>* owner,
                   const std::string& name,
@@ -410,7 +410,7 @@
     std::vector<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
   };
 
-  class AbiflagsSection FINAL : public Section {
+  class AbiflagsSection final : public Section {
    public:
     // Section with Mips abiflag info.
     static constexpr uint8_t MIPS_AFL_REG_NONE =         0;  // no registers
@@ -480,7 +480,7 @@
     } abiflags_;
   };
 
-  class BuildIdSection FINAL : public Section {
+  class BuildIdSection final : public Section {
    public:
     BuildIdSection(ElfBuilder<ElfTypes>* owner,
                    const std::string& name,
diff --git a/compiler/linker/error_delaying_output_stream.h b/compiler/linker/error_delaying_output_stream.h
index 659f1dc..cadd71c 100644
--- a/compiler/linker/error_delaying_output_stream.h
+++ b/compiler/linker/error_delaying_output_stream.h
@@ -27,7 +27,7 @@
 namespace linker {
 
 // OutputStream wrapper that delays reporting an error until Flush().
-class ErrorDelayingOutputStream FINAL : public OutputStream {
+class ErrorDelayingOutputStream final : public OutputStream {
  public:
   explicit ErrorDelayingOutputStream(OutputStream* output)
       : OutputStream(output->GetLocation()),
@@ -37,7 +37,7 @@
 
   // This function always succeeds to simplify code.
   // Use Good() to check the actual status of the output stream.
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     if (output_good_) {
       if (!output_->WriteFully(buffer, byte_count)) {
         PLOG(ERROR) << "Failed to write " << byte_count
@@ -51,7 +51,7 @@
 
   // This function always succeeds to simplify code.
   // Use Good() to check the actual status of the output stream.
-  off_t Seek(off_t offset, Whence whence) OVERRIDE {
+  off_t Seek(off_t offset, Whence whence) override {
     // We keep shadow copy of the offset so that we return
     // the expected value even if the output stream failed.
     off_t new_offset;
@@ -81,7 +81,7 @@
 
   // Flush the output and return whether all operations have succeeded.
   // Do nothing if we already have a pending error.
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     if (output_good_) {
       output_good_ = output_->Flush();
     }
diff --git a/compiler/linker/file_output_stream.h b/compiler/linker/file_output_stream.h
index deb051f..1417132 100644
--- a/compiler/linker/file_output_stream.h
+++ b/compiler/linker/file_output_stream.h
@@ -24,17 +24,17 @@
 namespace art {
 namespace linker {
 
-class FileOutputStream FINAL : public OutputStream {
+class FileOutputStream final : public OutputStream {
  public:
   explicit FileOutputStream(File* file);
 
-  ~FileOutputStream() OVERRIDE {}
+  ~FileOutputStream() override {}
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+  bool WriteFully(const void* buffer, size_t byte_count) override;
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE;
+  bool Flush() override;
 
  private:
   File* const file_;
diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc
index f93ea7a..bcb129c 100644
--- a/compiler/linker/output_stream_test.cc
+++ b/compiler/linker/output_stream_test.cc
@@ -106,20 +106,20 @@
     CheckingOutputStream()
         : OutputStream("dummy"),
           flush_called(false) { }
-    ~CheckingOutputStream() OVERRIDE {}
+    ~CheckingOutputStream() override {}
 
     bool WriteFully(const void* buffer ATTRIBUTE_UNUSED,
-                    size_t byte_count ATTRIBUTE_UNUSED) OVERRIDE {
+                    size_t byte_count ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
     }
 
-    off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) OVERRIDE {
+    off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
     }
 
-    bool Flush() OVERRIDE {
+    bool Flush() override {
       flush_called = true;
       return true;
     }
diff --git a/compiler/linker/vector_output_stream.h b/compiler/linker/vector_output_stream.h
index 92caf59..0d34da6 100644
--- a/compiler/linker/vector_output_stream.h
+++ b/compiler/linker/vector_output_stream.h
@@ -26,13 +26,13 @@
 namespace art {
 namespace linker {
 
-class VectorOutputStream FINAL : public OutputStream {
+class VectorOutputStream final : public OutputStream {
  public:
   VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
 
-  ~VectorOutputStream() OVERRIDE {}
+  ~VectorOutputStream() override {}
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     if (static_cast<size_t>(offset_) == vector_->size()) {
       const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
       vector_->insert(vector_->end(), &start[0], &start[byte_count]);
@@ -46,9 +46,9 @@
     return true;
   }
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     return true;
   }
 
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dfefa52..1c3660c 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -388,10 +388,10 @@
     return induction_variable_->GetBlock();
   }
 
-  MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
+  MonotonicValueRange* AsMonotonicValueRange() override { return this; }
 
   // If it's certain that this value range fits in other_range.
-  bool FitsIn(ValueRange* other_range) const OVERRIDE {
+  bool FitsIn(ValueRange* other_range) const override {
     if (other_range == nullptr) {
       return true;
     }
@@ -402,7 +402,7 @@
   // Try to narrow this MonotonicValueRange given another range.
   // Ideally it will return a normal ValueRange. But due to
   // possible overflow/underflow, that may not be possible.
-  ValueRange* Narrow(ValueRange* range) OVERRIDE {
+  ValueRange* Narrow(ValueRange* range) override {
     if (range == nullptr) {
       return this;
     }
@@ -530,7 +530,7 @@
         induction_range_(induction_analysis),
         next_(nullptr) {}
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     DCHECK(!IsAddedBlock(block));
     first_index_bounds_check_map_.clear();
     // Visit phis and instructions using a safe iterator. The iteration protects
@@ -820,7 +820,7 @@
     }
   }
 
-  void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
     HBasicBlock* block = bounds_check->GetBlock();
     HInstruction* index = bounds_check->InputAt(0);
     HInstruction* array_length = bounds_check->InputAt(1);
@@ -945,7 +945,7 @@
     return true;
   }
 
-  void VisitPhi(HPhi* phi) OVERRIDE {
+  void VisitPhi(HPhi* phi) override {
     if (phi->IsLoopHeaderPhi()
         && (phi->GetType() == DataType::Type::kInt32)
         && HasSameInputAtBackEdges(phi)) {
@@ -992,14 +992,14 @@
     }
   }
 
-  void VisitIf(HIf* instruction) OVERRIDE {
+  void VisitIf(HIf* instruction) override {
     if (instruction->InputAt(0)->IsCondition()) {
       HCondition* cond = instruction->InputAt(0)->AsCondition();
       HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition());
     }
   }
 
-  void VisitAdd(HAdd* add) OVERRIDE {
+  void VisitAdd(HAdd* add) override {
     HInstruction* right = add->GetRight();
     if (right->IsIntConstant()) {
       ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
@@ -1013,7 +1013,7 @@
     }
   }
 
-  void VisitSub(HSub* sub) OVERRIDE {
+  void VisitSub(HSub* sub) override {
     HInstruction* left = sub->GetLeft();
     HInstruction* right = sub->GetRight();
     if (right->IsIntConstant()) {
@@ -1115,19 +1115,19 @@
     }
   }
 
-  void VisitDiv(HDiv* div) OVERRIDE {
+  void VisitDiv(HDiv* div) override {
     FindAndHandlePartialArrayLength(div);
   }
 
-  void VisitShr(HShr* shr) OVERRIDE {
+  void VisitShr(HShr* shr) override {
     FindAndHandlePartialArrayLength(shr);
   }
 
-  void VisitUShr(HUShr* ushr) OVERRIDE {
+  void VisitUShr(HUShr* ushr) override {
     FindAndHandlePartialArrayLength(ushr);
   }
 
-  void VisitAnd(HAnd* instruction) OVERRIDE {
+  void VisitAnd(HAnd* instruction) override {
     if (instruction->GetRight()->IsIntConstant()) {
       int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
       if (constant > 0) {
@@ -1142,7 +1142,7 @@
     }
   }
 
-  void VisitRem(HRem* instruction) OVERRIDE {
+  void VisitRem(HRem* instruction) override {
     HInstruction* left = instruction->GetLeft();
     HInstruction* right = instruction->GetRight();
 
@@ -1202,7 +1202,7 @@
     }
   }
 
-  void VisitNewArray(HNewArray* new_array) OVERRIDE {
+  void VisitNewArray(HNewArray* new_array) override {
     HInstruction* len = new_array->GetLength();
     if (!len->IsIntConstant()) {
       HInstruction *left;
@@ -1240,7 +1240,7 @@
     * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
     * unsafe to hoist array references across their deoptimization instruction inside a loop.
     */
-  void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+  void VisitArrayGet(HArrayGet* array_get) override {
     if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
       HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
       if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 92ab798..ef08877 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -34,7 +34,7 @@
         side_effects_(side_effects),
         induction_analysis_(induction_analysis) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kBoundsCheckEliminationPassName = "BCE";
 
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index bdc395b..c6232ef 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -44,9 +44,9 @@
     GetGraph()->SetNumberOfCHAGuards(0);
   }
 
-  void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) OVERRIDE;
+  void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override;
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
  private:
   void RemoveGuard(HShouldDeoptimizeFlag* flag);
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index d2c5a34..440d51a 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -30,7 +30,7 @@
                                 const char* name = kCHAGuardOptimizationPassName)
       : HOptimization(graph, name) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization";
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a460f77..d56f7aa 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -247,7 +247,7 @@
  public:
   explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
@@ -273,9 +273,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
@@ -285,16 +285,16 @@
  public:
   explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
@@ -308,7 +308,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -349,7 +349,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathARM64"; }
 
  private:
   // The class this slow path will load.
@@ -363,7 +363,7 @@
   explicit LoadStringSlowPathARM64(HLoadString* instruction)
       : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -384,7 +384,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
@@ -394,7 +394,7 @@
  public:
   explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -408,9 +408,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
@@ -421,7 +421,7 @@
   SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeARM64(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -445,7 +445,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; }
 
  private:
   // If not null, the block to branch to after the suspend check.
@@ -462,7 +462,7 @@
   TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
 
     DCHECK(instruction_->IsCheckCast()
@@ -503,8 +503,8 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -517,7 +517,7 @@
   explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
       : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -529,7 +529,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
@@ -539,7 +539,7 @@
  public:
   explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -570,7 +570,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
@@ -628,7 +628,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
@@ -754,7 +754,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -794,7 +794,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
     DCHECK(locations->CanCall());
@@ -831,7 +831,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4f6a44f..2e7a20b 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -125,8 +125,8 @@
   vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
   vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
 
-  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
-  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
 
  private:
   vixl::aarch64::Label entry_label_;
@@ -216,11 +216,11 @@
   InvokeDexCallingConventionVisitorARM64() {}
   virtual ~InvokeDexCallingConventionVisitorARM64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type return_type) const OVERRIDE {
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type return_type) const override {
     return calling_convention.GetReturnLocation(return_type);
   }
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -232,22 +232,22 @@
  public:
   FieldAccessCallingConventionARM64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return helpers::LocationFrom(vixl::aarch64::x1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return helpers::LocationFrom(vixl::aarch64::x0);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return helpers::LocationFrom(vixl::aarch64::x0);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
-                               bool is_instance) const OVERRIDE {
+                               bool is_instance) const override {
     return is_instance
         ? helpers::LocationFrom(vixl::aarch64::x2)
         : helpers::LocationFrom(vixl::aarch64::x1);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return helpers::LocationFrom(vixl::aarch64::d0);
   }
 
@@ -260,7 +260,7 @@
   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -268,7 +268,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -360,7 +360,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -368,7 +368,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -393,11 +393,11 @@
       : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
 
  protected:
-  void PrepareForEmitNativeCode() OVERRIDE;
-  void FinishEmitNativeCode() OVERRIDE;
-  Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
-  void FreeScratchLocation(Location loc) OVERRIDE;
-  void EmitMove(size_t index) OVERRIDE;
+  void PrepareForEmitNativeCode() override;
+  void FinishEmitNativeCode() override;
+  Location AllocateScratchLocationFor(Location::Kind kind) override;
+  void FreeScratchLocation(Location loc) override;
+  void EmitMove(size_t index) override;
 
  private:
   Arm64Assembler* GetAssembler() const;
@@ -418,39 +418,39 @@
                      OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorARM64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
   vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
   vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
   vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
     block = FirstNonEmptyBlock(block);
     return &(block_labels_[block->GetBlockId()]);
   }
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kArm64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kArm64WordSize   // 16 bytes == 2 arm64 words for each spill
         : 1 * kArm64WordSize;  //  8 bytes == 1 arm64 words for each spill
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
     DCHECK(block_entry_label->IsBound());
     return block_entry_label->GetLocation();
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  Arm64Assembler* GetAssembler() override { return &assembler_; }
+  const Arm64Assembler& GetAssembler() const override { return assembler_; }
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
 
   // Emit a write barrier.
@@ -462,12 +462,12 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // The number of registers that can be allocated. The register allocator may
   // decide to reserve and not use a few of them.
@@ -479,35 +479,35 @@
   static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm64;
   }
 
   const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_.resize(GetGraph()->GetBlocks().size());
   }
 
   // We want to use the STP and LDP instructions to spill and restore registers for slow paths.
   // These instructions can only encode offsets that are multiples of the register size accessed.
-  uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
+  uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
 
   JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
     return jump_tables_.back().get();
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
   void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
   void Load(DataType::Type type,
             vixl::aarch64::CPURegister dst,
@@ -529,7 +529,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -537,35 +537,35 @@
                                            HInstruction* instruction,
                                            SlowPathCode* slow_path);
 
-  ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return false;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL);
   }
 
@@ -652,13 +652,13 @@
   void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
   void EmitThunkCode(const linker::LinkerPatch& patch,
                      /*out*/ ArenaVector<uint8_t>* code,
-                     /*out*/ std::string* debug_name) OVERRIDE;
+                     /*out*/ std::string* debug_name) override;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Generate a GC root reference load:
   //
@@ -765,10 +765,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop() OVERRIDE;
+  void GenerateNop() override;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
  private:
   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 8c5eafd..3580975 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -383,7 +383,7 @@
  public:
   explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -397,9 +397,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL);
@@ -410,16 +410,16 @@
   explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
@@ -430,7 +430,7 @@
   SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeARMVIXL(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
@@ -451,7 +451,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; }
 
  private:
   // If not null, the block to branch to after the suspend check.
@@ -468,7 +468,7 @@
   explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
 
@@ -495,9 +495,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
@@ -511,7 +511,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -549,7 +549,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; }
 
  private:
   // The class this slow path will load.
@@ -563,7 +563,7 @@
   explicit LoadStringSlowPathARMVIXL(HLoadString* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -585,7 +585,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL);
@@ -596,7 +596,7 @@
   TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -640,9 +640,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -655,7 +655,7 @@
   explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
         LocationSummary* locations = instruction_->GetLocations();
@@ -668,7 +668,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
@@ -678,7 +678,7 @@
  public:
   explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -709,7 +709,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
@@ -744,7 +744,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     vixl32::Register reg_out = RegisterFrom(out_);
@@ -868,7 +868,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathARMVIXL";
   }
 
@@ -910,7 +910,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     vixl32::Register reg_out = RegisterFrom(out_);
     DCHECK(locations->CanCall());
@@ -936,7 +936,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index cb131a7..33502d4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -178,9 +178,9 @@
   InvokeDexCallingConventionVisitorARMVIXL() {}
   virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConventionARMVIXL calling_convention;
@@ -193,25 +193,25 @@
  public:
   FieldAccessCallingConventionARMVIXL() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return helpers::LocationFrom(vixl::aarch32::r1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return helpers::LocationFrom(vixl::aarch32::r0);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
         : helpers::LocationFrom(vixl::aarch32::r0);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
         : (is_instance
             ? helpers::LocationFrom(vixl::aarch32::r2)
             : helpers::LocationFrom(vixl::aarch32::r1));
   }
-  Location GetFpuLocation(DataType::Type type) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
         : helpers::LocationFrom(vixl::aarch32::s0);
@@ -229,8 +229,8 @@
   vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
   vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
 
-  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
-  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
 
  private:
   vixl::aarch32::Label entry_label_;
@@ -244,10 +244,10 @@
   ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   ArmVIXLAssembler* GetAssembler() const;
 
@@ -266,7 +266,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -274,7 +274,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -304,7 +304,7 @@
   InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -312,7 +312,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -432,48 +432,48 @@
                        OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorARMVIXL() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return static_cast<size_t>(kArmPointerSize);
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+  size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
 
-  ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+  ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
 
-  const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+  const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
 
   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
     DCHECK(block_entry_label->IsBound());
     return block_entry_label->GetLocation();
   }
 
   void FixJumpTables();
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
 
   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -495,7 +495,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -519,42 +519,42 @@
 
   vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_.resize(GetGraph()->GetBlocks().size());
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
   }
 
-  void ComputeSpillMask() OVERRIDE;
+  void ComputeSpillMask() override;
 
   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
@@ -604,13 +604,13 @@
   void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
   void EmitThunkCode(const linker::LinkerPatch& patch,
                      /*out*/ ArenaVector<uint8_t>* code,
-                     /*out*/ std::string* debug_name) OVERRIDE;
+                     /*out*/ std::string* debug_name) override;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Generate a GC root reference load:
   //
@@ -722,10 +722,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop() OVERRIDE;
+  void GenerateNop() override;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index aed334b..d74a7a7 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -176,7 +176,7 @@
  public:
   explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
@@ -201,9 +201,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
@@ -213,16 +213,16 @@
  public:
   explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
@@ -236,7 +236,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -280,7 +280,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; }
 
  private:
   // The class this slow path will load.
@@ -294,7 +294,7 @@
   explicit LoadStringSlowPathMIPS(HLoadString* instruction)
       : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -318,7 +318,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
@@ -328,7 +328,7 @@
  public:
   explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -342,9 +342,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
@@ -355,7 +355,7 @@
   SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeMIPS(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
@@ -375,7 +375,7 @@
     return &return_label_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; }
 
   HBasicBlock* GetSuccessor() const {
     return successor_;
@@ -396,7 +396,7 @@
   explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
@@ -435,9 +435,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -450,7 +450,7 @@
   explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
     : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -462,7 +462,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
@@ -472,7 +472,7 @@
  public:
   explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -503,7 +503,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
@@ -533,9 +533,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -627,11 +627,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -798,7 +798,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
@@ -922,7 +922,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -965,7 +965,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -995,7 +995,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 4830ac9..bf95893 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -81,9 +81,9 @@
   InvokeDexCallingConventionVisitorMIPS() {}
   virtual ~InvokeDexCallingConventionVisitorMIPS() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -110,23 +110,23 @@
  public:
   FieldAccessCallingConventionMIPS() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(A1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(A0);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(V0, V1)
         : Location::RegisterLocation(V0);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(A2, A3)
         : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(F0);
   }
 
@@ -139,10 +139,10 @@
   ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   void Exchange(int index1, int index2, bool double_slot);
   void ExchangeQuadSlots(int index1, int index2);
@@ -176,14 +176,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -210,14 +210,14 @@
   InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -374,35 +374,35 @@
                     OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorMIPS() {}
 
-  void ComputeSpillMask() OVERRIDE;
-  bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void ComputeSpillMask() override;
+  bool HasAllocatedCalleeSaveRegisters() const override;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
   void MoveConstant(Location location, HConstant* c);
 
-  size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+  size_t GetWordSize() const override { return kMipsWordSize; }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kMipsDoublewordSize   // 16 bytes for each spill.
         : 1 * kMipsDoublewordSize;  //  8 bytes for each spill.
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return assembler_.GetLabelLocation(GetLabelOf(block));
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  MipsAssembler* GetAssembler() override { return &assembler_; }
+  const MipsAssembler& GetAssembler() const override { return assembler_; }
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -493,20 +493,20 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
   void ClobberRA() {
     clobbered_ra_ = true;
   }
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; }
 
   const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -514,25 +514,25 @@
     return CommonGetLabelOf<MipsLabel>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<MipsLabel>();
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
 
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
 
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
 
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -543,41 +543,41 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kInt64;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 72318e9..7c89808 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -128,7 +128,7 @@
  public:
   explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -153,9 +153,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
@@ -166,16 +166,16 @@
   explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction)
       : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
     mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
@@ -189,7 +189,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -233,7 +233,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; }
 
  private:
   // The class this slow path will load.
@@ -247,7 +247,7 @@
   explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
       : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -274,7 +274,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
@@ -284,7 +284,7 @@
  public:
   explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -298,9 +298,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
@@ -311,7 +311,7 @@
   SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeMIPS64(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -331,7 +331,7 @@
     return &return_label_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; }
 
   HBasicBlock* GetSuccessor() const {
     return successor_;
@@ -352,7 +352,7 @@
   explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
 
     uint32_t dex_pc = instruction_->GetDexPc();
@@ -392,9 +392,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -407,7 +407,7 @@
   explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
     : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
       LocationSummary* locations = instruction_->GetLocations();
@@ -419,7 +419,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
@@ -429,7 +429,7 @@
  public:
   explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -460,7 +460,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
@@ -490,9 +490,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
     DCHECK(locations->CanCall());
@@ -583,11 +583,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
     DCHECK(locations->CanCall());
@@ -744,7 +744,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
@@ -864,7 +864,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathMIPS64";
   }
 
@@ -909,7 +909,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
     GpuRegister reg_out = out_.AsRegister<GpuRegister>();
@@ -938,7 +938,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index fc0908b..ddc154d 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -79,9 +79,9 @@
   InvokeDexCallingConventionVisitorMIPS64() {}
   virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -108,22 +108,22 @@
  public:
   FieldAccessCallingConventionMIPS64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(A1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(A0);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::RegisterLocation(V0);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
-                               bool is_instance) const OVERRIDE {
+                               bool is_instance) const override {
     return is_instance
         ? Location::RegisterLocation(A2)
         : Location::RegisterLocation(A1);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(F0);
   }
 
@@ -136,10 +136,10 @@
   ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   void Exchange(int index1, int index2, bool double_slot);
   void ExchangeQuadSlots(int index1, int index2);
@@ -173,14 +173,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -207,14 +207,14 @@
   InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -356,31 +356,31 @@
                       OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorMIPS64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
-  size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
+  size_t GetWordSize() const override { return kMips64DoublewordSize; }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kMips64DoublewordSize   // 16 bytes for each spill.
         : 1 * kMips64DoublewordSize;  //  8 bytes for each spill.
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return assembler_.GetLabelLocation(GetLabelOf(block));
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  Mips64Assembler* GetAssembler() override { return &assembler_; }
+  const Mips64Assembler& GetAssembler() const override { return assembler_; }
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -471,17 +471,17 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; }
 
   const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -489,22 +489,22 @@
     return CommonGetLabelOf<Mips64Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Mips64Label>();
   }
 
   // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
   // at aligned locations.
-  uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+  uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
 
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
 
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
 
   void SwapLocations(Location loc1, Location loc2, DataType::Type type);
@@ -513,7 +513,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -523,39 +523,39 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index df00ec7..6a27081 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -72,7 +72,7 @@
  public:
   explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -86,9 +86,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
@@ -98,16 +98,16 @@
  public:
   explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
@@ -118,7 +118,7 @@
   DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div)
       : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     __ Bind(GetEntryLabel());
     if (is_div_) {
       __ negl(reg_);
@@ -128,7 +128,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86"; }
+  const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; }
 
  private:
   Register reg_;
@@ -140,7 +140,7 @@
  public:
   explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -187,9 +187,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
@@ -200,7 +200,7 @@
   SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCode(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -224,7 +224,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; }
 
  private:
   HBasicBlock* const successor_;
@@ -237,7 +237,7 @@
  public:
   explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -256,7 +256,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
@@ -270,7 +270,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -308,7 +308,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathX86"; }
 
  private:
   // The class this slow path will load.
@@ -322,7 +322,7 @@
   TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
       : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -375,8 +375,8 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; }
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathX86"; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -389,7 +389,7 @@
   explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
     : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -402,7 +402,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
@@ -412,7 +412,7 @@
  public:
   explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -443,7 +443,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
@@ -471,9 +471,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -558,9 +558,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -724,7 +724,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
@@ -843,7 +843,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -883,7 +883,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -909,7 +909,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; }
 
  private:
   const Location out_;
@@ -8100,7 +8100,7 @@
   HX86ComputeBaseMethodAddress* base_method_address_;
 
  private:
-  void Process(const MemoryRegion& region, int pos) OVERRIDE {
+  void Process(const MemoryRegion& region, int pos) override {
     // Patch the correct offset for the instruction.  The place to patch is the
     // last 4 bytes of the instruction.
     // The value to patch is the distance from the offset in the constant area
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index cb58e92..6154771 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -83,9 +83,9 @@
   InvokeDexCallingConventionVisitorX86() {}
   virtual ~InvokeDexCallingConventionVisitorX86() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -97,18 +97,18 @@
  public:
   FieldAccessCallingConventionX86() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(ECX);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(EAX);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(EAX, EDX)
         : Location::RegisterLocation(EAX);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? (is_instance
             ? Location::RegisterPairLocation(EDX, EBX)
@@ -117,7 +117,7 @@
             ? Location::RegisterLocation(EDX)
             : Location::RegisterLocation(ECX));
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(XMM0);
   }
 
@@ -130,10 +130,10 @@
   ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   X86Assembler* GetAssembler() const;
 
@@ -155,14 +155,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -186,14 +186,14 @@
   InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -320,23 +320,23 @@
                    OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorX86() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -346,46 +346,46 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kX86WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 4 * kX86WordSize   // 16 bytes == 4 words for each spill
         : 2 * kX86WordSize;  //  8 bytes == 2 words for each spill
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() override {
     return &instruction_visitor_;
   }
 
-  X86Assembler* GetAssembler() OVERRIDE {
+  X86Assembler* GetAssembler() override {
     return &assembler_;
   }
 
-  const X86Assembler& GetAssembler() const OVERRIDE {
+  const X86Assembler& GetAssembler() const override {
     return assembler_;
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return GetLabelOf(block)->Position();
   }
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
+  ParallelMoveResolverX86* GetMoveResolver() override {
     return &move_resolver_;
   }
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86;
   }
 
@@ -399,25 +399,25 @@
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   // Generate a call to a static or direct method.
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   // Generate a call to a virtual method.
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
                                      uint32_t intrinsic_data);
@@ -442,16 +442,16 @@
                               dex::TypeIndex type_index,
                               Handle<mirror::Class> handle);
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
 
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
                        const PatchInfo<Label>& info,
                        uint64_t index_in_table) const;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Emit a write barrier.
   void MarkGCCard(Register temp,
@@ -466,15 +466,15 @@
     return CommonGetLabelOf<Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Label>();
   }
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kInt64;
   }
 
-  bool ShouldSplitLongMoves() const OVERRIDE { return true; }
+  bool ShouldSplitLongMoves() const override { return true; }
 
   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
@@ -513,7 +513,7 @@
 
   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -609,9 +609,9 @@
     }
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ae2a000..489652b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -71,7 +71,7 @@
  public:
   explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -85,9 +85,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
@@ -97,16 +97,16 @@
  public:
   explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
@@ -117,7 +117,7 @@
   DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div)
       : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     __ Bind(GetEntryLabel());
     if (type_ == DataType::Type::kInt32) {
       if (is_div_) {
@@ -137,7 +137,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86_64"; }
 
  private:
   const CpuRegister cpu_reg_;
@@ -151,7 +151,7 @@
   SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCode(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -175,7 +175,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathX86_64"; }
 
  private:
   HBasicBlock* const successor_;
@@ -189,7 +189,7 @@
   explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
     : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -236,9 +236,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
@@ -252,7 +252,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -291,7 +291,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathX86_64"; }
 
  private:
   // The class this slow path will load.
@@ -304,7 +304,7 @@
  public:
   explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -326,7 +326,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
@@ -337,7 +337,7 @@
   TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
       : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
@@ -385,9 +385,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathX86_64"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -400,7 +400,7 @@
   explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
       : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -413,7 +413,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
@@ -423,7 +423,7 @@
  public:
   explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -454,7 +454,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
@@ -482,9 +482,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
     Register ref_reg = ref_cpu_reg.AsRegister();
@@ -573,11 +573,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
     Register ref_reg = ref_cpu_reg.AsRegister();
@@ -745,7 +745,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
 }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister reg_out = out_.AsRegister<CpuRegister>();
@@ -864,7 +864,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathX86_64";
   }
 
@@ -906,7 +906,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
@@ -931,7 +931,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86_64"; }
 
  private:
   const Location out_;
@@ -7395,7 +7395,7 @@
   CodeGeneratorX86_64* codegen_;
 
  private:
-  void Process(const MemoryRegion& region, int pos) OVERRIDE {
+  void Process(const MemoryRegion& region, int pos) override {
     // Patch the correct offset for the instruction.  We use the address of the
     // 'next' instruction, which is 'pos' (patch the 4 bytes before).
     int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 5ba7f9c..f77a5c8 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -83,22 +83,22 @@
  public:
   FieldAccessCallingConventionX86_64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(RSI);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(RDI);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::RegisterLocation(RAX);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance)
-      const OVERRIDE {
+      const override {
     return is_instance
         ? Location::RegisterLocation(RDX)
         : Location::RegisterLocation(RSI);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(XMM0);
   }
 
@@ -112,9 +112,9 @@
   InvokeDexCallingConventionVisitorX86_64() {}
   virtual ~InvokeDexCallingConventionVisitorX86_64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -129,10 +129,10 @@
   ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   X86_64Assembler* GetAssembler() const;
 
@@ -157,14 +157,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -188,14 +188,14 @@
   InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -300,23 +300,23 @@
                   OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorX86_64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -326,46 +326,46 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kX86_64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kX86_64WordSize   // 16 bytes == 2 x86_64 words for each spill
         : 1 * kX86_64WordSize;  //  8 bytes == 1 x86_64 words for each spill
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() override {
     return &instruction_visitor_;
   }
 
-  X86_64Assembler* GetAssembler() OVERRIDE {
+  X86_64Assembler* GetAssembler() override {
     return &assembler_;
   }
 
-  const X86_64Assembler& GetAssembler() const OVERRIDE {
+  const X86_64Assembler& GetAssembler() const override {
     return assembler_;
   }
 
-  ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
+  ParallelMoveResolverX86_64* GetMoveResolver() override {
     return &move_resolver_;
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return GetLabelOf(block)->Position();
   }
 
-  void SetupBlockedRegisters() const OVERRIDE;
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void SetupBlockedRegisters() const override;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
+  void Finalize(CodeAllocator* allocator) override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86_64;
   }
 
@@ -387,34 +387,34 @@
     return CommonGetLabelOf<Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Label>();
   }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return false;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
   void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
@@ -434,14 +434,14 @@
   void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
 
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
                        const PatchInfo<Label>& info,
                        uint64_t index_in_table) const;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -565,7 +565,7 @@
   // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
   void Store64BitValueToStack(Location dest, int64_t value);
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // Assign a 64 bit constant to an address.
   void MoveInt64ToAddress(const Address& addr_low,
@@ -585,9 +585,9 @@
     }
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 5db0b6d..8eb3a52 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -33,7 +33,7 @@
               const char* name = kCodeSinkingPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCodeSinkingPassName = "code_sinking";
 
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 8c062f0..0289e9c 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -101,7 +101,7 @@
     AddAllocatedRegister(Location::RegisterLocation(arm::R7));
   }
 
-  void SetupBlockedRegisters() const OVERRIDE {
+  void SetupBlockedRegisters() const override {
     arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
     blocked_core_registers_[arm::R4] = true;
     blocked_core_registers_[arm::R6] = false;
@@ -109,7 +109,7 @@
   }
 
   void MaybeGenerateMarkingRegisterCheck(int code ATTRIBUTE_UNUSED,
-                                         Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+                                         Location temp_loc ATTRIBUTE_UNUSED) override {
     // When turned on, the marking register checks in
     // CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck expects the
     // Thread Register and the Marking Register to be set to
@@ -141,7 +141,7 @@
       : arm64::CodeGeneratorARM64(graph, compiler_options) {}
 
   void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
-                                         Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+                                         Location temp_loc ATTRIBUTE_UNUSED) override {
     // When turned on, the marking register checks in
     // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the
     // Thread Register and the Marking Register to be set to
@@ -161,7 +161,7 @@
     AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
   }
 
-  void SetupBlockedRegisters() const OVERRIDE {
+  void SetupBlockedRegisters() const override {
     x86::CodeGeneratorX86::SetupBlockedRegisters();
     // ebx is a callee-save register in C, but caller-save for ART.
     blocked_core_registers_[x86::EBX] = true;
@@ -183,7 +183,7 @@
   }
 
   size_t GetSize() const { return size_; }
-  ArrayRef<const uint8_t> GetMemory() const OVERRIDE {
+  ArrayRef<const uint8_t> GetMemory() const override {
     return ArrayRef<const uint8_t>(memory_.get(), size_);
   }
 
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb78c23..09e7cab 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -26,13 +26,13 @@
       : HGraphDelegateVisitor(graph) {}
 
  private:
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
-  void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
-  void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+  void VisitUnaryOperation(HUnaryOperation* inst) override;
+  void VisitBinaryOperation(HBinaryOperation* inst) override;
 
-  void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
-  void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+  void VisitTypeConversion(HTypeConversion* inst) override;
+  void VisitDivZeroCheck(HDivZeroCheck* inst) override;
 
   DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
 };
@@ -47,24 +47,24 @@
  private:
   void VisitShift(HBinaryOperation* shift);
 
-  void VisitEqual(HEqual* instruction) OVERRIDE;
-  void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
+  void VisitEqual(HEqual* instruction) override;
+  void VisitNotEqual(HNotEqual* instruction) override;
 
-  void VisitAbove(HAbove* instruction) OVERRIDE;
-  void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
-  void VisitBelow(HBelow* instruction) OVERRIDE;
-  void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
+  void VisitAbove(HAbove* instruction) override;
+  void VisitAboveOrEqual(HAboveOrEqual* instruction) override;
+  void VisitBelow(HBelow* instruction) override;
+  void VisitBelowOrEqual(HBelowOrEqual* instruction) override;
 
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitCompare(HCompare* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitRem(HRem* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitSub(HSub* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitCompare(HCompare* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitRem(HRem* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitSub(HSub* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
 };
 
 
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index f4dbc80..72bd95b 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -41,7 +41,7 @@
  public:
   HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kConstantFoldingPassName = "constant_folding";
 
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 54bff22..3cb8bf2 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -34,7 +34,7 @@
         candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
         stats_(stats) {}
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // Visit all instructions in block.
     HGraphVisitor::VisitBasicBlock(block);
 
@@ -43,7 +43,7 @@
     MergeCandidateFences();
   }
 
-  void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE {
+  void VisitConstructorFence(HConstructorFence* constructor_fence) override {
     candidate_fences_.push_back(constructor_fence);
 
     for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
@@ -51,29 +51,29 @@
     }
   }
 
-  void VisitBoundType(HBoundType* bound_type) OVERRIDE {
+  void VisitBoundType(HBoundType* bound_type) override {
     VisitAlias(bound_type);
   }
 
-  void VisitNullCheck(HNullCheck* null_check) OVERRIDE {
+  void VisitNullCheck(HNullCheck* null_check) override {
     VisitAlias(null_check);
   }
 
-  void VisitSelect(HSelect* select) OVERRIDE {
+  void VisitSelect(HSelect* select) override {
     VisitAlias(select);
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HInstruction* value = instruction->InputAt(1);
     VisitSetLocation(instruction, value);
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     HInstruction* value = instruction->InputAt(1);
     VisitSetLocation(instruction, value);
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     HInstruction* value = instruction->InputAt(2);
     VisitSetLocation(instruction, value);
   }
@@ -83,46 +83,46 @@
     MergeCandidateFences();
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+  void VisitInvokeInterface(HInvokeInterface* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+  void VisitClinitCheck(HClinitCheck* clinit) override {
     HandleInvoke(clinit);
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index 367d9f2..014b342 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -52,7 +52,7 @@
                                         const char* name = kCFREPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
 
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 90caa53..799721a 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -32,7 +32,7 @@
   HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
 
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 293c1ab..63a370a 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -27,7 +27,7 @@
 
 class EmitSwapMipsTest : public OptimizingUnitTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     instruction_set_ = InstructionSet::kMips;
     instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
     OptimizingUnitTest::SetUp();
@@ -46,7 +46,7 @@
                                         GetAssemblyHeader()));
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();
     codegen_.reset();
     graph_ = nullptr;
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3a2bb7a..d085609 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -44,30 +44,30 @@
   // and return value pass along the observed graph sizes.
   size_t Run(bool pass_change = true, size_t last_size = 0);
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE;
-  void VisitPhi(HPhi* phi) OVERRIDE;
+  void VisitInstruction(HInstruction* instruction) override;
+  void VisitPhi(HPhi* phi) override;
 
-  void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
-  void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
-  void VisitBoundType(HBoundType* instruction) OVERRIDE;
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  void VisitCheckCast(HCheckCast* check) OVERRIDE;
-  void VisitCondition(HCondition* op) OVERRIDE;
-  void VisitConstant(HConstant* instruction) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
-  void VisitIf(HIf* instruction) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
-  void VisitLoadException(HLoadException* load) OVERRIDE;
-  void VisitNeg(HNeg* instruction) OVERRIDE;
-  void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
-  void VisitReturn(HReturn* ret) OVERRIDE;
-  void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
-  void VisitSelect(HSelect* instruction) OVERRIDE;
-  void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+  void VisitBinaryOperation(HBinaryOperation* op) override;
+  void VisitBooleanNot(HBooleanNot* instruction) override;
+  void VisitBoundType(HBoundType* instruction) override;
+  void VisitBoundsCheck(HBoundsCheck* check) override;
+  void VisitCheckCast(HCheckCast* check) override;
+  void VisitCondition(HCondition* op) override;
+  void VisitConstant(HConstant* instruction) override;
+  void VisitDeoptimize(HDeoptimize* instruction) override;
+  void VisitIf(HIf* instruction) override;
+  void VisitInstanceOf(HInstanceOf* check) override;
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+  void VisitLoadException(HLoadException* load) override;
+  void VisitNeg(HNeg* instruction) override;
+  void VisitPackedSwitch(HPackedSwitch* instruction) override;
+  void VisitReturn(HReturn* ret) override;
+  void VisitReturnVoid(HReturnVoid* ret) override;
+  void VisitSelect(HSelect* instruction) override;
+  void VisitTryBoundary(HTryBoundary* try_boundary) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
 
   void CheckTypeCheckBitstringInput(HTypeCheckInstruction* check,
                                     size_t input_pos,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d65ad40..31db8c2 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -333,7 +333,7 @@
     return output_;
   }
 
-  void VisitParallelMove(HParallelMove* instruction) OVERRIDE {
+  void VisitParallelMove(HParallelMove* instruction) override {
     StartAttributeStream("liveness") << instruction->GetLifetimePosition();
     StringList moves;
     for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
@@ -346,36 +346,36 @@
     StartAttributeStream("moves") <<  moves;
   }
 
-  void VisitIntConstant(HIntConstant* instruction) OVERRIDE {
+  void VisitIntConstant(HIntConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitLongConstant(HLongConstant* instruction) OVERRIDE {
+  void VisitLongConstant(HLongConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE {
+  void VisitFloatConstant(HFloatConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE {
+  void VisitDoubleConstant(HDoubleConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitPhi(HPhi* phi) OVERRIDE {
+  void VisitPhi(HPhi* phi) override {
     StartAttributeStream("reg") << phi->GetRegNumber();
     StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha;
   }
 
-  void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
+  void VisitMemoryBarrier(HMemoryBarrier* barrier) override {
     StartAttributeStream("kind") << barrier->GetBarrierKind();
   }
 
-  void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE {
+  void VisitMonitorOperation(HMonitorOperation* monitor) override {
     StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit");
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     StartAttributeStream("load_kind") << load_class->GetLoadKind();
     const char* descriptor = load_class->GetDexFile().GetTypeDescriptor(
         load_class->GetDexFile().GetTypeId(load_class->GetTypeIndex()));
@@ -386,19 +386,19 @@
         << load_class->NeedsAccessCheck() << std::noboolalpha;
   }
 
-  void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE {
+  void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) override {
     StartAttributeStream("load_kind") << "RuntimeCall";
     StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex();
   }
 
-  void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE {
+  void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
     StartAttributeStream("load_kind") << "RuntimeCall";
     const DexFile& dex_file = load_method_type->GetDexFile();
     const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
     StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     StartAttributeStream("load_kind") << load_string->GetLoadKind();
   }
 
@@ -413,15 +413,15 @@
     }
   }
 
-  void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+  void VisitCheckCast(HCheckCast* check_cast) override {
     HandleTypeCheckInstruction(check_cast);
   }
 
-  void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+  void VisitInstanceOf(HInstanceOf* instance_of) override {
     HandleTypeCheckInstruction(instance_of);
   }
 
-  void VisitArrayLength(HArrayLength* array_length) OVERRIDE {
+  void VisitArrayLength(HArrayLength* array_length) override {
     StartAttributeStream("is_string_length") << std::boolalpha
         << array_length->IsStringLength() << std::noboolalpha;
     if (array_length->IsEmittedAtUseSite()) {
@@ -429,31 +429,31 @@
     }
   }
 
-  void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
     StartAttributeStream("is_string_char_at") << std::boolalpha
         << bounds_check->IsStringCharAt() << std::noboolalpha;
   }
 
-  void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+  void VisitArrayGet(HArrayGet* array_get) override {
     StartAttributeStream("is_string_char_at") << std::boolalpha
         << array_get->IsStringCharAt() << std::noboolalpha;
   }
 
-  void VisitArraySet(HArraySet* array_set) OVERRIDE {
+  void VisitArraySet(HArraySet* array_set) override {
     StartAttributeStream("value_can_be_null") << std::boolalpha
         << array_set->GetValueCanBeNull() << std::noboolalpha;
     StartAttributeStream("needs_type_check") << std::boolalpha
         << array_set->NeedsTypeCheck() << std::noboolalpha;
   }
 
-  void VisitCompare(HCompare* compare) OVERRIDE {
+  void VisitCompare(HCompare* compare) override {
     ComparisonBias bias = compare->GetBias();
     StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias
                                      ? "gt"
                                      : (bias == ComparisonBias::kLtBias ? "lt" : "none"));
   }
 
-  void VisitInvoke(HInvoke* invoke) OVERRIDE {
+  void VisitInvoke(HInvoke* invoke) override {
     StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
     ArtMethod* method = invoke->GetResolvedMethod();
     // We don't print signatures, which conflict with c1visualizer format.
@@ -470,12 +470,12 @@
                                           << std::noboolalpha;
   }
 
-  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("invoke_type") << invoke->GetInvokeType();
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("method_load_kind") << invoke->GetMethodLoadKind();
     StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
@@ -484,96 +484,96 @@
     }
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
   }
 
-  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("invoke_type") << "InvokePolymorphic";
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
     StartAttributeStream("field_name") <<
         iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << iget->GetFieldType();
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* iset) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* iset) override {
     StartAttributeStream("field_name") <<
         iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << iset->GetFieldType();
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* sget) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* sget) override {
     StartAttributeStream("field_name") <<
         sget->GetFieldInfo().GetDexFile().PrettyField(sget->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << sget->GetFieldType();
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* sset) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* sset) override {
     StartAttributeStream("field_name") <<
         sset->GetFieldInfo().GetDexFile().PrettyField(sset->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << sset->GetFieldType();
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
+  void VisitTryBoundary(HTryBoundary* try_boundary) override {
     StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
   }
 
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
+  void VisitDeoptimize(HDeoptimize* deoptimize) override {
     StartAttributeStream("kind") << deoptimize->GetKind();
   }
 
-  void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE {
+  void VisitVecOperation(HVecOperation* vec_operation) override {
     StartAttributeStream("packed_type") << vec_operation->GetPackedType();
   }
 
-  void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+  void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) override {
     StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
   }
 
-  void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
+  void VisitVecHalvingAdd(HVecHalvingAdd* hadd) override {
     VisitVecBinaryOperation(hadd);
     StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
   }
 
-  void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
+  void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) override {
     VisitVecOperation(instruction);
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
 #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
-  void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
+  void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
-  void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE {
+  void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) override {
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
-  void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) OVERRIDE {
+  void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) override {
     StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
     if (HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
       StartAttributeStream("shift") << instruction->GetShiftAmount();
@@ -814,7 +814,7 @@
     Flush();
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     StartTag("block");
     PrintProperty("name", "B", block->GetBlockId());
     if (block->GetLifetimeStart() != kNoLifetime) {
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 75cfff2..bbf2265 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -31,7 +31,7 @@
                   const char* pass_name = kGlobalValueNumberingPassName)
       : HOptimization(graph, pass_name), side_effects_(side_effects) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kGlobalValueNumberingPassName = "GVN";
 
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 89fed2e..a48aa90 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -37,7 +37,7 @@
  public:
   explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kInductionPassName = "induction_var_analysis";
 
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 2fdf6a1..6fd0c20 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -60,7 +60,7 @@
         handles_(handles),
         inline_stats_(nullptr) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kInlinerPassName = "inliner";
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f493b66..2757f7b 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -66,44 +66,44 @@
   bool TryCombineVecMultiplyAccumulate(HVecMul* mul);
 
   void VisitShift(HBinaryOperation* shift);
-  void VisitEqual(HEqual* equal) OVERRIDE;
-  void VisitNotEqual(HNotEqual* equal) OVERRIDE;
-  void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
-  void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE;
-  void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE;
-  void VisitArraySet(HArraySet* equal) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
-  void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
-  void VisitAbs(HAbs* instruction) OVERRIDE;
-  void VisitAdd(HAdd* instruction) OVERRIDE;
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitCondition(HCondition* instruction) OVERRIDE;
-  void VisitGreaterThan(HGreaterThan* condition) OVERRIDE;
-  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE;
-  void VisitLessThan(HLessThan* condition) OVERRIDE;
-  void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE;
-  void VisitBelow(HBelow* condition) OVERRIDE;
-  void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE;
-  void VisitAbove(HAbove* condition) OVERRIDE;
-  void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE;
-  void VisitDiv(HDiv* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitNeg(HNeg* instruction) OVERRIDE;
-  void VisitNot(HNot* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitSub(HSub* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
-  void VisitSelect(HSelect* select) OVERRIDE;
-  void VisitIf(HIf* instruction) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
-  void VisitInvoke(HInvoke* invoke) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
-  void VisitVecMul(HVecMul* instruction) OVERRIDE;
+  void VisitEqual(HEqual* equal) override;
+  void VisitNotEqual(HNotEqual* equal) override;
+  void VisitBooleanNot(HBooleanNot* bool_not) override;
+  void VisitInstanceFieldSet(HInstanceFieldSet* equal) override;
+  void VisitStaticFieldSet(HStaticFieldSet* equal) override;
+  void VisitArraySet(HArraySet* equal) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitNullCheck(HNullCheck* instruction) override;
+  void VisitArrayLength(HArrayLength* instruction) override;
+  void VisitCheckCast(HCheckCast* instruction) override;
+  void VisitAbs(HAbs* instruction) override;
+  void VisitAdd(HAdd* instruction) override;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitCondition(HCondition* instruction) override;
+  void VisitGreaterThan(HGreaterThan* condition) override;
+  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) override;
+  void VisitLessThan(HLessThan* condition) override;
+  void VisitLessThanOrEqual(HLessThanOrEqual* condition) override;
+  void VisitBelow(HBelow* condition) override;
+  void VisitBelowOrEqual(HBelowOrEqual* condition) override;
+  void VisitAbove(HAbove* condition) override;
+  void VisitAboveOrEqual(HAboveOrEqual* condition) override;
+  void VisitDiv(HDiv* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitNeg(HNeg* instruction) override;
+  void VisitNot(HNot* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitSub(HSub* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
+  void VisitSelect(HSelect* select) override;
+  void VisitIf(HIf* instruction) override;
+  void VisitInstanceOf(HInstanceOf* instruction) override;
+  void VisitInvoke(HInvoke* invoke) override;
+  void VisitDeoptimize(HDeoptimize* deoptimize) override;
+  void VisitVecMul(HVecMul* instruction) override;
 
   bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
 
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 2d134e0..982a24a 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -46,7 +46,7 @@
 
   static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 37fcdb9..24fbb6c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -56,7 +56,7 @@
    * (2) Since statements can be removed in a "forward" fashion,
    *     the visitor should test if each statement is still there.
    */
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // TODO: fragile iteration, provide more robust iterators?
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* instruction = it.Current();
@@ -66,15 +66,15 @@
     }
   }
 
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index f1a16ef..fca9341 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 };
 
 }  // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index e0a6279..b536cb4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -58,7 +58,7 @@
    * (2) Since statements can be removed in a "forward" fashion,
    *     the visitor should test if each statement is still there.
    */
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // TODO: fragile iteration, provide more robust iterators?
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* instruction = it.Current();
@@ -69,18 +69,18 @@
   }
 
   // HInstruction visitors, sorted alphabetically.
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
-  void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
-  void VisitVecStore(HVecStore* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
+  void VisitVecLoad(HVecLoad* instruction) override;
+  void VisitVecStore(HVecStore* instruction) override;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 8659c1f..8d93c01 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 };
 
 }  // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 3bdf90f..5d0c63b 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -39,8 +39,8 @@
   bool TryExtractArrayAccessIndex(HInstruction* access,
                                   HInstruction* index,
                                   DataType::Type packed_type);
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
 
   OptimizingCompilerStats* stats_;
   CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 94ef73d..b431334 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -35,7 +35,7 @@
 
   static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 993648f..06e2fbb 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -42,7 +42,7 @@
                        const char* name = kIntrinsicsRecognizerPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   // Static helper that recognizes intrinsic call. Returns true on success.
   // If it fails due to invoke type mismatch, wrong_invoke_type is set.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a657b58..1abfcb0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@
   explicit IntrinsicSlowPathARM64(HInvoke* invoke)
       : SlowPathCodeARM64(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
@@ -145,7 +145,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathARM64"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathARM64"; }
 
  private:
   // The instruction where this slow path is happening.
@@ -163,7 +163,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -216,7 +216,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
 
  private:
   Location tmp_;
@@ -1006,9 +1006,9 @@
   explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
       : SlowPathCodeARM64(invoke) {}
 
-  const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+  const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARM64"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     Arm64Assembler* assembler = arm64_codegen->GetAssembler();
     MacroAssembler* masm = assembler->GetVIXLAssembler();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 033a644..9c46efd 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -37,7 +37,7 @@
 
 class CodeGeneratorARM64;
 
-class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARM64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
       : allocator_(allocator), codegen_(codegen) {}
@@ -45,7 +45,7 @@
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -63,14 +63,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
 };
 
-class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorARM64(CodeGeneratorARM64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 74a779d..1127fb8 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -85,7 +85,7 @@
     return calling_convention_visitor.GetMethodLocation();
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
     __ Bind(GetEntryLabel());
 
@@ -111,7 +111,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPath"; }
 
  private:
   // The instruction where this slow path is happening.
@@ -173,7 +173,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
     LocationSummary* locations = instruction_->GetLocations();
@@ -233,7 +233,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
   }
 
@@ -969,9 +969,9 @@
   explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke)
       : SlowPathCodeARMVIXL(invoke) {}
 
-  const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARMVIXL"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
     __ Bind(GetEntryLabel());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 9c02d0a..1fea776 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -27,14 +27,14 @@
 class ArmVIXLAssembler;
 class CodeGeneratorARMVIXL;
 
-class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARMVIXL final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -54,14 +54,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
 };
 
-class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 01d9f96..771714b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -108,7 +108,7 @@
  public:
   explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
 
     __ Bind(GetEntryLabel());
@@ -137,7 +137,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 1c1ba40..08d4e82 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -30,14 +30,14 @@
 class CodeGeneratorMIPS;
 class MipsAssembler;
 
-class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
 };
 
-class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0bd69c6..4a1bd5b 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -97,7 +97,7 @@
   explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
      : SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
 
     __ Bind(GetEntryLabel());
@@ -126,7 +126,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 748b0b0..ca8bc8f 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -30,14 +30,14 @@
 class CodeGeneratorMIPS64;
 class Mips64Assembler;
 
-class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
 };
 
-class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 8c69d9b..41947f1 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -47,7 +47,7 @@
     return calling_convention_visitor.GetMethodLocation();
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     Assembler* assembler = codegen->GetAssembler();
     assembler->Bind(GetEntryLabel());
 
@@ -73,7 +73,7 @@
     assembler->Jump(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPath"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5c7be54..d33c0c3 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -82,7 +82,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -160,7 +160,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86);
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index e3555e7..ae150da 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -30,14 +30,14 @@
 class CodeGeneratorX86;
 class X86Assembler;
 
-class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
 };
 
-class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index b5afe93..ae88974 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -80,7 +80,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -118,7 +118,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 5cb601e..199cfed 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -30,14 +30,14 @@
 class CodeGeneratorX86_64;
 class X86_64Assembler;
 
-class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86_64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
 };
 
-class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorX86_64(CodeGeneratorX86_64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index f72d195..9cafddb 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -33,7 +33,7 @@
       : HOptimization(graph, name, stats),
         side_effects_(side_effects) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
 
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 769a3f1..08d9309 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -492,12 +492,12 @@
                             HeapLocation::kDeclaringClassDefIndexForArrays);
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     has_heap_stores_ = true;
     if (location->GetReferenceInfo()->IsSingleton()) {
@@ -523,12 +523,12 @@
     }
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     has_heap_stores_ = true;
   }
@@ -536,7 +536,7 @@
   // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
   // since we cannot accurately track the fields.
 
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+  void VisitArrayGet(HArrayGet* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetType();
@@ -544,7 +544,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetComponentType();
@@ -552,7 +552,7 @@
     has_heap_stores_ = true;
   }
 
-  void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+  void VisitVecLoad(HVecLoad* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetPackedType();
@@ -560,7 +560,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitVecStore(HVecStore* instruction) OVERRIDE {
+  void VisitVecStore(HVecStore* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetPackedType();
@@ -568,7 +568,7 @@
     has_heap_stores_ = true;
   }
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     // Any new-instance or new-array cannot alias with references that
     // pre-exist the new-instance/new-array. We append entries into
     // ref_info_array_ which keeps track of the order of creation
@@ -580,7 +580,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) override {
     has_monitor_operations_ = true;
   }
 
@@ -605,7 +605,7 @@
     return heap_location_collector_;
   }
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
 
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 28ac942..7f71745 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -107,7 +107,7 @@
         singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // Populate the heap_values array for this block.
     // TODO: try to reuse the heap_values array from one predecessor if possible.
     if (block->IsLoopHeader()) {
@@ -656,13 +656,13 @@
     }
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
     HInstruction* object = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field));
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HInstruction* object = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     HInstruction* value = instruction->InputAt(1);
@@ -670,24 +670,24 @@
     VisitSetLocation(instruction, idx, value);
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
     HInstruction* cls = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field));
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     HInstruction* cls = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field);
     VisitSetLocation(instruction, idx, instruction->InputAt(1));
   }
 
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+  void VisitArrayGet(HArrayGet* instruction) override {
     VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
     VisitSetLocation(instruction, idx, instruction->InputAt(2));
   }
@@ -743,15 +743,15 @@
     }
   }
 
-  void VisitReturn(HReturn* instruction) OVERRIDE {
+  void VisitReturn(HReturn* instruction) override {
     HandleExit(instruction->GetBlock());
   }
 
-  void VisitReturnVoid(HReturnVoid* return_void) OVERRIDE {
+  void VisitReturnVoid(HReturnVoid* return_void) override {
     HandleExit(return_void->GetBlock());
   }
 
-  void VisitThrow(HThrow* throw_instruction) OVERRIDE {
+  void VisitThrow(HThrow* throw_instruction) override {
     HandleExit(throw_instruction->GetBlock());
   }
 
@@ -777,35 +777,35 @@
     }
   }
 
-  void VisitInvoke(HInvoke* invoke) OVERRIDE {
+  void VisitInvoke(HInvoke* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+  void VisitClinitCheck(HClinitCheck* clinit) override {
     HandleInvoke(clinit);
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+  void VisitNewInstance(HNewInstance* new_instance) override {
     ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
     if (ref_info == nullptr) {
       // new_instance isn't used for field accesses. No need to process it.
@@ -829,7 +829,7 @@
     }
   }
 
-  void VisitNewArray(HNewArray* new_array) OVERRIDE {
+  void VisitNewArray(HNewArray* new_array) override {
     ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
     if (ref_info == nullptr) {
       // new_array isn't used for array accesses. No need to process it.
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 408386b..f7ba41a 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -35,7 +35,7 @@
         side_effects_(side_effects),
         lsa_(lsa) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
 
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index d355ced..2ae3683 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -87,14 +87,14 @@
   // Maximum number of instructions to be created as a result of full unrolling.
   static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
 
-  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
     return analysis_info->HasLongTypeInstructions() ||
            IsLoopTooBig(analysis_info,
                         kScalarHeuristicMaxBodySizeInstr,
                         kScalarHeuristicMaxBodySizeBlocks);
   }
 
-  uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
     int64_t trip_count = analysis_info->GetTripCount();
     // Unroll only loops with known trip count.
     if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
@@ -108,9 +108,9 @@
     return desired_unrolling_factor;
   }
 
-  bool IsLoopPeelingEnabled() const OVERRIDE { return true; }
+  bool IsLoopPeelingEnabled() const override { return true; }
 
-  bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
     int64_t trip_count = analysis_info->GetTripCount();
     // We assume that trip count is known.
     DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
@@ -144,7 +144,7 @@
   // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
   static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
 
-  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
+  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
     return IsLoopTooBig(loop_analysis_info,
                         kArm64ScalarHeuristicMaxBodySizeInstr,
                         kArm64ScalarHeuristicMaxBodySizeBlocks);
@@ -153,7 +153,7 @@
   uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
                                   int64_t trip_count,
                                   uint32_t max_peel,
-                                  uint32_t vector_length) const OVERRIDE {
+                                  uint32_t vector_length) const override {
     // Don't unroll with insufficient iterations.
     // TODO: Unroll loops with unknown trip count.
     DCHECK_NE(vector_length, 0u);
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 644b740..2b202fd 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -43,7 +43,7 @@
                     OptimizingCompilerStats* stats,
                     const char* name = kLoopOptimizationPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoopOptimizationPassName = "loop_optimization";
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d88b036..5feffa0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1529,12 +1529,12 @@
   private:                                                                \
   H##type& operator=(const H##type&) = delete;                            \
   public:                                                                 \
-  const char* DebugName() const OVERRIDE { return #type; }                \
-  HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE {             \
+  const char* DebugName() const override { return #type; }                \
+  HInstruction* Clone(ArenaAllocator* arena) const override {             \
     DCHECK(IsClonable());                                                 \
     return new (arena) H##type(*this->As##type());                        \
   }                                                                       \
-  void Accept(HGraphVisitor* visitor) OVERRIDE
+  void Accept(HGraphVisitor* visitor) override
 
 #define DECLARE_ABSTRACT_INSTRUCTION(type)                              \
   private:                                                              \
@@ -2595,7 +2595,7 @@
 class HVariableInputSizeInstruction : public HInstruction {
  public:
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
     return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
   }
 
@@ -2645,7 +2645,7 @@
   virtual ~HExpression() {}
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
   }
 
@@ -2667,7 +2667,7 @@
   virtual ~HExpression() {}
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>();
   }
 
@@ -2680,13 +2680,13 @@
 
 // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
 // instruction that branches to the exit block.
-class HReturnVoid FINAL : public HExpression<0> {
+class HReturnVoid final : public HExpression<0> {
  public:
   explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
       : HExpression(kReturnVoid, SideEffects::None(), dex_pc) {
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(ReturnVoid);
 
@@ -2696,14 +2696,14 @@
 
 // Represents dex's RETURN opcodes. A HReturn is a control flow
 // instruction that branches to the exit block.
-class HReturn FINAL : public HExpression<1> {
+class HReturn final : public HExpression<1> {
  public:
   explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
       : HExpression(kReturn, SideEffects::None(), dex_pc) {
     SetRawInputAt(0, value);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(Return);
 
@@ -2711,7 +2711,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Return);
 };
 
-class HPhi FINAL : public HVariableInputSizeInstruction {
+class HPhi final : public HVariableInputSizeInstruction {
  public:
   HPhi(ArenaAllocator* allocator,
        uint32_t reg_number,
@@ -2735,7 +2735,7 @@
     SetPackedFlag<kFlagCanBeNull>(true);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
   static DataType::Type ToPhiType(DataType::Type type) {
@@ -2755,7 +2755,7 @@
     SetPackedField<TypeField>(new_type);
   }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
   void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   uint32_t GetRegNumber() const { return reg_number_; }
@@ -2813,13 +2813,13 @@
 // The exit instruction is the only instruction of the exit block.
 // Instructions aborting the method (HThrow and HReturn) must branch to the
 // exit block.
-class HExit FINAL : public HExpression<0> {
+class HExit final : public HExpression<0> {
  public:
   explicit HExit(uint32_t dex_pc = kNoDexPc)
       : HExpression(kExit, SideEffects::None(), dex_pc) {
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(Exit);
 
@@ -2828,14 +2828,14 @@
 };
 
 // Jumps from one block to another.
-class HGoto FINAL : public HExpression<0> {
+class HGoto final : public HExpression<0> {
  public:
   explicit HGoto(uint32_t dex_pc = kNoDexPc)
       : HExpression(kGoto, SideEffects::None(), dex_pc) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool IsControlFlow() const override { return true; }
 
   HBasicBlock* GetSuccessor() const {
     return GetBlock()->GetSingleSuccessor();
@@ -2853,7 +2853,7 @@
       : HExpression(kind, type, SideEffects::None(), dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   // Is this constant -1 in the arithmetic sense?
   virtual bool IsMinusOne() const { return false; }
@@ -2872,15 +2872,15 @@
   DEFAULT_COPY_CONSTRUCTOR(Constant);
 };
 
-class HNullConstant FINAL : public HConstant {
+class HNullConstant final : public HConstant {
  public:
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+  uint64_t GetValueAsUint64() const override { return 0; }
 
-  size_t ComputeHashCode() const OVERRIDE { return 0; }
+  size_t ComputeHashCode() const override { return 0; }
 
   // The null constant representation is a 0-bit pattern.
   virtual bool IsZeroBitPattern() const { return true; }
@@ -2900,25 +2900,25 @@
 
 // Constants of the type int. Those can be from Dex instructions, or
 // synthesized (for example with the if-eqz instruction).
-class HIntConstant FINAL : public HConstant {
+class HIntConstant final : public HConstant {
  public:
   int32_t GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE {
+  uint64_t GetValueAsUint64() const override {
     return static_cast<uint64_t>(static_cast<uint32_t>(value_));
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsIntConstant()) << other->DebugName();
     return other->AsIntConstant()->value_ == value_;
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
+  size_t ComputeHashCode() const override { return GetValue(); }
 
-  bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
-  bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
-  bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
-  bool IsOne() const OVERRIDE { return GetValue() == 1; }
+  bool IsMinusOne() const override { return GetValue() == -1; }
+  bool IsArithmeticZero() const override { return GetValue() == 0; }
+  bool IsZeroBitPattern() const override { return GetValue() == 0; }
+  bool IsOne() const override { return GetValue() == 1; }
 
   // Integer constants are used to encode Boolean values as well,
   // where 1 means true and 0 means false.
@@ -2946,23 +2946,23 @@
   ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
 };
 
-class HLongConstant FINAL : public HConstant {
+class HLongConstant final : public HConstant {
  public:
   int64_t GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+  uint64_t GetValueAsUint64() const override { return value_; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsLongConstant()) << other->DebugName();
     return other->AsLongConstant()->value_ == value_;
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
-  bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
-  bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
-  bool IsOne() const OVERRIDE { return GetValue() == 1; }
+  bool IsMinusOne() const override { return GetValue() == -1; }
+  bool IsArithmeticZero() const override { return GetValue() == 0; }
+  bool IsZeroBitPattern() const override { return GetValue() == 0; }
+  bool IsOne() const override { return GetValue() == 1; }
 
   DECLARE_INSTRUCTION(LongConstant);
 
@@ -2980,25 +2980,25 @@
   friend class HGraph;
 };
 
-class HFloatConstant FINAL : public HConstant {
+class HFloatConstant final : public HConstant {
  public:
   float GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE {
+  uint64_t GetValueAsUint64() const override {
     return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsFloatConstant()) << other->DebugName();
     return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE {
+  bool IsMinusOne() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
   }
-  bool IsArithmeticZero() const OVERRIDE {
+  bool IsArithmeticZero() const override {
     return std::fpclassify(value_) == FP_ZERO;
   }
   bool IsArithmeticPositiveZero() const {
@@ -3007,10 +3007,10 @@
   bool IsArithmeticNegativeZero() const {
     return IsArithmeticZero() && std::signbit(value_);
   }
-  bool IsZeroBitPattern() const OVERRIDE {
+  bool IsZeroBitPattern() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f);
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
   }
   bool IsNaN() const {
@@ -3039,23 +3039,23 @@
   friend class HGraph;
 };
 
-class HDoubleConstant FINAL : public HConstant {
+class HDoubleConstant final : public HConstant {
  public:
   double GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+  uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsDoubleConstant()) << other->DebugName();
     return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE {
+  bool IsMinusOne() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
   }
-  bool IsArithmeticZero() const OVERRIDE {
+  bool IsArithmeticZero() const override {
     return std::fpclassify(value_) == FP_ZERO;
   }
   bool IsArithmeticPositiveZero() const {
@@ -3064,10 +3064,10 @@
   bool IsArithmeticNegativeZero() const {
     return IsArithmeticZero() && std::signbit(value_);
   }
-  bool IsZeroBitPattern() const OVERRIDE {
+  bool IsZeroBitPattern() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0));
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
   }
   bool IsNaN() const {
@@ -3098,15 +3098,15 @@
 
 // Conditional branch. A block ending with an HIf instruction must have
 // two successors.
-class HIf FINAL : public HExpression<1> {
+class HIf final : public HExpression<1> {
  public:
   explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HExpression(kIf, SideEffects::None(), dex_pc) {
     SetRawInputAt(0, input);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool IsControlFlow() const override { return true; }
 
   HBasicBlock* IfTrueSuccessor() const {
     return GetBlock()->GetSuccessors()[0];
@@ -3128,7 +3128,7 @@
 // non-exceptional control flow.
 // Normal-flow successor is stored at index zero, exception handlers under
 // higher indices in no particular order.
-class HTryBoundary FINAL : public HExpression<0> {
+class HTryBoundary final : public HExpression<0> {
  public:
   enum class BoundaryKind {
     kEntry,
@@ -3141,7 +3141,7 @@
     SetPackedField<BoundaryKindField>(kind);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   // Returns the block's non-exceptional successor (index zero).
   HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
@@ -3187,7 +3187,7 @@
 };
 
 // Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HVariableInputSizeInstruction {
+class HDeoptimize final : public HVariableInputSizeInstruction {
  public:
   // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
   // across.
@@ -3207,7 +3207,7 @@
     SetRawInputAt(0, cond);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Use this constructor when the `HDeoptimize` guards an instruction, and any user
   // that relies on the deoptimization to pass should have its input be the `HDeoptimize`
@@ -3233,15 +3233,15 @@
     SetRawInputAt(1, guard);
   }
 
-  bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+  bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
 
@@ -3281,7 +3281,7 @@
 // if it's true, starts to do deoptimization.
 // It has a 4-byte slot on stack.
 // TODO: allocate a register for this flag.
-class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
+class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction {
  public:
   // CHA guards are only optimized in a separate pass and it has no side effects
   // with regard to other passes.
@@ -3299,7 +3299,7 @@
   // further guard elimination/motion since a guard might have been used for justification
   // of the elimination of another guard. Therefore, we pretend this guard cannot be moved
   // to avoid other optimizations trying to move it.
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
 
@@ -3310,7 +3310,7 @@
 // Represents the ArtMethod that was passed as a first argument to
 // the method. It is used by instructions that depend on it, like
 // instructions that work with the dex cache.
-class HCurrentMethod FINAL : public HExpression<0> {
+class HCurrentMethod final : public HExpression<0> {
  public:
   explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc)
       : HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) {
@@ -3324,7 +3324,7 @@
 
 // Fetches an ArtMethod from the virtual table or the interface method table
 // of a class.
-class HClassTableGet FINAL : public HExpression<1> {
+class HClassTableGet final : public HExpression<1> {
  public:
   enum class TableKind {
     kVTable,
@@ -3342,9 +3342,9 @@
     SetRawInputAt(0, cls);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return other->AsClassTableGet()->GetIndex() == index_ &&
         other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
   }
@@ -3373,7 +3373,7 @@
 // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
 // have one successor for each entry in the switch table, and the final successor
 // will be the block containing the next Dex opcode.
-class HPackedSwitch FINAL : public HExpression<1> {
+class HPackedSwitch final : public HExpression<1> {
  public:
   HPackedSwitch(int32_t start_value,
                 uint32_t num_entries,
@@ -3385,9 +3385,9 @@
     SetRawInputAt(0, input);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
@@ -3418,13 +3418,13 @@
   }
 
   // All of the UnaryOperation instructions are clonable.
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   HInstruction* GetInput() const { return InputAt(0); }
   DataType::Type GetResultType() const { return GetType(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -3459,7 +3459,7 @@
   }
 
   // All of the BinaryOperation instructions are clonable.
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   HInstruction* GetLeft() const { return InputAt(0); }
   HInstruction* GetRight() const { return InputAt(1); }
@@ -3499,8 +3499,8 @@
     }
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -3581,7 +3581,7 @@
   ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
   void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return GetPackedFields() == other->AsCondition()->GetPackedFields();
   }
 
@@ -3638,42 +3638,42 @@
 };
 
 // Instruction to check if two inputs are equal to each other.
-class HEqual FINAL : public HCondition {
+class HEqual final : public HCondition {
  public:
   HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kEqual, first, second, dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
-                      HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HNullConstant* y ATTRIBUTE_UNUSED) const override {
     return MakeConstantCondition(true, GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HEqual instruction; evaluate it as
   // `Compare(x, y) == 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0),
                                  GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(Equal);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondEQ;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondNE;
   }
 
@@ -3684,42 +3684,42 @@
   template <typename T> static bool Compute(T x, T y) { return x == y; }
 };
 
-class HNotEqual FINAL : public HCondition {
+class HNotEqual final : public HCondition {
  public:
   HNotEqual(HInstruction* first, HInstruction* second,
             uint32_t dex_pc = kNoDexPc)
       : HCondition(kNotEqual, first, second, dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
-                      HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HNullConstant* y ATTRIBUTE_UNUSED) const override {
     return MakeConstantCondition(false, GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HNotEqual instruction; evaluate it as
   // `Compare(x, y) != 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(NotEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondNE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondEQ;
   }
 
@@ -3730,36 +3730,36 @@
   template <typename T> static bool Compute(T x, T y) { return x != y; }
 };
 
-class HLessThan FINAL : public HCondition {
+class HLessThan final : public HCondition {
  public:
   HLessThan(HInstruction* first, HInstruction* second,
             uint32_t dex_pc = kNoDexPc)
       : HCondition(kLessThan, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HLessThan instruction; evaluate it as
   // `Compare(x, y) < 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(LessThan);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondLT;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondGE;
   }
 
@@ -3770,36 +3770,36 @@
   template <typename T> static bool Compute(T x, T y) { return x < y; }
 };
 
-class HLessThanOrEqual FINAL : public HCondition {
+class HLessThanOrEqual final : public HCondition {
  public:
   HLessThanOrEqual(HInstruction* first, HInstruction* second,
                    uint32_t dex_pc = kNoDexPc)
       : HCondition(kLessThanOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HLessThanOrEqual instruction; evaluate it as
   // `Compare(x, y) <= 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(LessThanOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondLE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondGT;
   }
 
@@ -3810,35 +3810,35 @@
   template <typename T> static bool Compute(T x, T y) { return x <= y; }
 };
 
-class HGreaterThan FINAL : public HCondition {
+class HGreaterThan final : public HCondition {
  public:
   HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kGreaterThan, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HGreaterThan instruction; evaluate it as
   // `Compare(x, y) > 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(GreaterThan);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondGT;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondLE;
   }
 
@@ -3849,35 +3849,35 @@
   template <typename T> static bool Compute(T x, T y) { return x > y; }
 };
 
-class HGreaterThanOrEqual FINAL : public HCondition {
+class HGreaterThanOrEqual final : public HCondition {
  public:
   HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kGreaterThanOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HGreaterThanOrEqual instruction; evaluate it as
   // `Compare(x, y) >= 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(GreaterThanOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondGE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondLT;
   }
 
@@ -3888,36 +3888,36 @@
   template <typename T> static bool Compute(T x, T y) { return x >= y; }
 };
 
-class HBelow FINAL : public HCondition {
+class HBelow final : public HCondition {
  public:
   HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kBelow, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(Below);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondB;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondAE;
   }
 
@@ -3930,36 +3930,36 @@
   }
 };
 
-class HBelowOrEqual FINAL : public HCondition {
+class HBelowOrEqual final : public HCondition {
  public:
   HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kBelowOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(BelowOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondBE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondA;
   }
 
@@ -3972,36 +3972,36 @@
   }
 };
 
-class HAbove FINAL : public HCondition {
+class HAbove final : public HCondition {
  public:
   HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kAbove, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(Above);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondA;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondBE;
   }
 
@@ -4014,36 +4014,36 @@
   }
 };
 
-class HAboveOrEqual FINAL : public HCondition {
+class HAboveOrEqual final : public HCondition {
  public:
   HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kAboveOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(AboveOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondAE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondB;
   }
 
@@ -4058,7 +4058,7 @@
 
 // Instruction to check how two inputs compare to each other.
 // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
-class HCompare FINAL : public HBinaryOperation {
+class HCompare final : public HBinaryOperation {
  public:
   // Note that `comparison_type` is the type of comparison performed
   // between the comparison's inputs, not the type of the instantiated
@@ -4090,7 +4090,7 @@
     return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     // Note that there is no "cmp-int" Dex instruction so we shouldn't
     // reach this code path when processing a freshly built HIR
     // graph. However HCompare integer instructions can be synthesized
@@ -4098,17 +4098,17 @@
     // IntegerSignum intrinsics, so we have to handle this case.
     return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return GetPackedFields() == other->AsCompare()->GetPackedFields();
   }
 
@@ -4147,7 +4147,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Compare);
 };
 
-class HNewInstance FINAL : public HExpression<1> {
+class HNewInstance final : public HExpression<1> {
  public:
   HNewInstance(HInstruction* cls,
                uint32_t dex_pc,
@@ -4166,16 +4166,16 @@
     SetRawInputAt(0, cls);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   dex::TypeIndex GetTypeIndex() const { return type_index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
 
   // Calls runtime so needs an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
   // Can throw errors when out-of-memory or if it's not instantiable/accessible.
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   bool NeedsChecks() const {
     return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4183,7 +4183,7 @@
 
   bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
 
@@ -4237,7 +4237,7 @@
 
 class HInvoke : public HVariableInputSizeInstruction {
  public:
-  bool NeedsEnvironment() const OVERRIDE;
+  bool NeedsEnvironment() const override;
 
   void SetArgumentAt(size_t index, HInstruction* argument) {
     SetRawInputAt(index, argument);
@@ -4270,15 +4270,15 @@
 
   void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
 
-  bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+  bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); }
 
   void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
 
-  bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+  bool AlwaysThrows() const override { return GetPackedFlag<kFlagAlwaysThrows>(); }
 
-  bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
+  bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
   }
 
@@ -4344,7 +4344,7 @@
   uint32_t intrinsic_optimizations_;
 };
 
-class HInvokeUnresolved FINAL : public HInvoke {
+class HInvokeUnresolved final : public HInvoke {
  public:
   HInvokeUnresolved(ArenaAllocator* allocator,
                     uint32_t number_of_arguments,
@@ -4363,7 +4363,7 @@
                 invoke_type) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokeUnresolved);
 
@@ -4371,7 +4371,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
 };
 
-class HInvokePolymorphic FINAL : public HInvoke {
+class HInvokePolymorphic final : public HInvoke {
  public:
   HInvokePolymorphic(ArenaAllocator* allocator,
                      uint32_t number_of_arguments,
@@ -4389,7 +4389,7 @@
                 kVirtual) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokePolymorphic);
 
@@ -4397,7 +4397,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
 };
 
-class HInvokeCustom FINAL : public HInvoke {
+class HInvokeCustom final : public HInvoke {
  public:
   HInvokeCustom(ArenaAllocator* allocator,
                 uint32_t number_of_arguments,
@@ -4418,7 +4418,7 @@
 
   uint32_t GetCallSiteIndex() const { return call_site_index_; }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokeCustom);
 
@@ -4429,7 +4429,7 @@
   uint32_t call_site_index_;
 };
 
-class HInvokeStaticOrDirect FINAL : public HInvoke {
+class HInvokeStaticOrDirect final : public HInvoke {
  public:
   // Requirements of this method call regarding the class
   // initialization (clinit) check of its declaring class.
@@ -4518,7 +4518,7 @@
     SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetDispatchInfo(const DispatchInfo& dispatch_info) {
     bool had_current_method_input = HasCurrentMethodInput();
@@ -4548,7 +4548,7 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
     ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
     if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) {
       DCHECK(!input_records.empty());
@@ -4566,13 +4566,13 @@
     return input_records;
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // We access the method via the dex cache so we can't do an implicit null check.
     // TODO: for intrinsics we can generate implicit null checks.
     return false;
   }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     return GetType() == DataType::Type::kReference && !IsStringInit();
   }
 
@@ -4587,7 +4587,7 @@
   MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
   CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
   bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
+  bool NeedsDexCacheOfDeclaringClass() const override;
   bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
   bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
   bool HasPcRelativeMethodLoadKind() const {
@@ -4688,7 +4688,7 @@
 std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
 std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
 
-class HInvokeVirtual FINAL : public HInvoke {
+class HInvokeVirtual final : public HInvoke {
  public:
   HInvokeVirtual(ArenaAllocator* allocator,
                  uint32_t number_of_arguments,
@@ -4709,9 +4709,9 @@
         vtable_index_(vtable_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     switch (GetIntrinsic()) {
       case Intrinsics::kThreadCurrentThread:
       case Intrinsics::kStringBufferAppend:
@@ -4724,7 +4724,7 @@
     }
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     // TODO: Add implicit null checks in intrinsics.
     return (obj == InputAt(0)) && !IsIntrinsic();
   }
@@ -4741,7 +4741,7 @@
   const uint32_t vtable_index_;
 };
 
-class HInvokeInterface FINAL : public HInvoke {
+class HInvokeInterface final : public HInvoke {
  public:
   HInvokeInterface(ArenaAllocator* allocator,
                    uint32_t number_of_arguments,
@@ -4762,14 +4762,14 @@
         imt_index_(imt_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     // TODO: Add implicit null checks in intrinsics.
     return (obj == InputAt(0)) && !IsIntrinsic();
   }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     // The assembly stub currently needs it.
     return true;
   }
@@ -4786,7 +4786,7 @@
   const uint32_t imt_index_;
 };
 
-class HNeg FINAL : public HUnaryOperation {
+class HNeg final : public HUnaryOperation {
  public:
   HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kNeg, result_type, input, dex_pc) {
@@ -4795,16 +4795,16 @@
 
   template <typename T> static T Compute(T x) { return -x; }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc());
   }
 
@@ -4814,7 +4814,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Neg);
 };
 
-class HNewArray FINAL : public HExpression<2> {
+class HNewArray final : public HExpression<2> {
  public:
   HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
       : HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
@@ -4822,15 +4822,15 @@
     SetRawInputAt(1, length);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Calls runtime so needs an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
   // May throw NegativeArraySizeException, OutOfMemoryError, etc.
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   HLoadClass* GetLoadClass() const {
     DCHECK(InputAt(0)->IsLoadClass());
@@ -4847,7 +4847,7 @@
   DEFAULT_COPY_CONSTRUCTOR(NewArray);
 };
 
-class HAdd FINAL : public HBinaryOperation {
+class HAdd final : public HBinaryOperation {
  public:
   HAdd(DataType::Type result_type,
        HInstruction* left,
@@ -4856,23 +4856,23 @@
       : HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x + y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4883,7 +4883,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Add);
 };
 
-class HSub FINAL : public HBinaryOperation {
+class HSub final : public HBinaryOperation {
  public:
   HSub(DataType::Type result_type,
        HInstruction* left,
@@ -4894,19 +4894,19 @@
 
   template <typename T> static T Compute(T x, T y) { return x - y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4917,7 +4917,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Sub);
 };
 
-class HMul FINAL : public HBinaryOperation {
+class HMul final : public HBinaryOperation {
  public:
   HMul(DataType::Type result_type,
        HInstruction* left,
@@ -4926,23 +4926,23 @@
       : HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x * y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4953,7 +4953,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Mul);
 };
 
-class HDiv FINAL : public HBinaryOperation {
+class HDiv final : public HBinaryOperation {
  public:
   HDiv(DataType::Type result_type,
        HInstruction* left,
@@ -4978,19 +4978,19 @@
     return x / y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -5001,7 +5001,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Div);
 };
 
-class HRem FINAL : public HBinaryOperation {
+class HRem final : public HBinaryOperation {
  public:
   HRem(DataType::Type result_type,
        HInstruction* left,
@@ -5026,19 +5026,19 @@
     return std::fmod(x, y);
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -5049,7 +5049,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Rem);
 };
 
-class HMin FINAL : public HBinaryOperation {
+class HMin final : public HBinaryOperation {
  public:
   HMin(DataType::Type result_type,
        HInstruction* left,
@@ -5057,26 +5057,26 @@
        uint32_t dex_pc)
       : HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   // Evaluation for integral values.
   template <typename T> static T ComputeIntegral(T x, T y) {
     return (x <= y) ? x : y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // TODO: Evaluation for floating-point values.
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
 
   DECLARE_INSTRUCTION(Min);
 
@@ -5084,7 +5084,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Min);
 };
 
-class HMax FINAL : public HBinaryOperation {
+class HMax final : public HBinaryOperation {
  public:
   HMax(DataType::Type result_type,
        HInstruction* left,
@@ -5092,26 +5092,26 @@
        uint32_t dex_pc)
       : HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   // Evaluation for integral values.
   template <typename T> static T ComputeIntegral(T x, T y) {
     return (x >= y) ? x : y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // TODO: Evaluation for floating-point values.
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
 
   DECLARE_INSTRUCTION(Max);
 
@@ -5119,7 +5119,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Max);
 };
 
-class HAbs FINAL : public HUnaryOperation {
+class HAbs final : public HUnaryOperation {
  public:
   HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kAbs, result_type, input, dex_pc) {}
@@ -5139,17 +5139,17 @@
     return bit_cast<T, S>(bits & std::numeric_limits<S>::max());
   }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP<float, int32_t>(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP<double, int64_t>(x->GetValue()), GetDexPc());
   }
@@ -5160,7 +5160,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Abs);
 };
 
-class HDivZeroCheck FINAL : public HExpression<1> {
+class HDivZeroCheck final : public HExpression<1> {
  public:
   // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
   // constructor.
@@ -5169,15 +5169,15 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DECLARE_INSTRUCTION(DivZeroCheck);
 
@@ -5185,7 +5185,7 @@
   DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
 };
 
-class HShl FINAL : public HBinaryOperation {
+class HShl final : public HBinaryOperation {
  public:
   HShl(DataType::Type result_type,
        HInstruction* value,
@@ -5201,26 +5201,26 @@
     return value << (distance & max_shift_distance);
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5231,7 +5231,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Shl);
 };
 
-class HShr FINAL : public HBinaryOperation {
+class HShr final : public HBinaryOperation {
  public:
   HShr(DataType::Type result_type,
        HInstruction* value,
@@ -5247,26 +5247,26 @@
     return value >> (distance & max_shift_distance);
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5277,7 +5277,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Shr);
 };
 
-class HUShr FINAL : public HBinaryOperation {
+class HUShr final : public HBinaryOperation {
  public:
   HUShr(DataType::Type result_type,
         HInstruction* value,
@@ -5295,26 +5295,26 @@
     return static_cast<T>(ux >> (distance & max_shift_distance));
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5325,7 +5325,7 @@
   DEFAULT_COPY_CONSTRUCTOR(UShr);
 };
 
-class HAnd FINAL : public HBinaryOperation {
+class HAnd final : public HBinaryOperation {
  public:
   HAnd(DataType::Type result_type,
        HInstruction* left,
@@ -5334,25 +5334,25 @@
       : HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x & y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5363,7 +5363,7 @@
   DEFAULT_COPY_CONSTRUCTOR(And);
 };
 
-class HOr FINAL : public HBinaryOperation {
+class HOr final : public HBinaryOperation {
  public:
   HOr(DataType::Type result_type,
       HInstruction* left,
@@ -5372,25 +5372,25 @@
       : HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x | y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5401,7 +5401,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Or);
 };
 
-class HXor FINAL : public HBinaryOperation {
+class HXor final : public HBinaryOperation {
  public:
   HXor(DataType::Type result_type,
        HInstruction* left,
@@ -5410,25 +5410,25 @@
       : HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x ^ y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5439,7 +5439,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Xor);
 };
 
-class HRor FINAL : public HBinaryOperation {
+class HRor final : public HBinaryOperation {
  public:
   HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
       : HBinaryOperation(kRor, result_type, value, distance) {
@@ -5460,26 +5460,26 @@
     }
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5492,7 +5492,7 @@
 
 // The value of a parameter in this method. Its location depends on
 // the calling convention.
-class HParameterValue FINAL : public HExpression<0> {
+class HParameterValue final : public HExpression<0> {
  public:
   HParameterValue(const DexFile& dex_file,
                   dex::TypeIndex type_index,
@@ -5512,7 +5512,7 @@
   uint8_t GetIndex() const { return index_; }
   bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
   void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   DECLARE_INSTRUCTION(ParameterValue);
@@ -5535,30 +5535,30 @@
   const uint8_t index_;
 };
 
-class HNot FINAL : public HUnaryOperation {
+class HNot final : public HUnaryOperation {
  public:
   HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kNot, result_type, input, dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
   template <typename T> static T Compute(T x) { return ~x; }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5569,14 +5569,14 @@
   DEFAULT_COPY_CONSTRUCTOR(Not);
 };
 
-class HBooleanNot FINAL : public HUnaryOperation {
+class HBooleanNot final : public HUnaryOperation {
  public:
   explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -5585,18 +5585,18 @@
     return !x;
   }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for long values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5607,7 +5607,7 @@
   DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
 };
 
-class HTypeConversion FINAL : public HExpression<1> {
+class HTypeConversion final : public HExpression<1> {
  public:
   // Instantiate a type conversion of `input` to `result_type`.
   HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
@@ -5621,9 +5621,9 @@
   DataType::Type GetInputType() const { return GetInput()->GetType(); }
   DataType::Type GetResultType() const { return GetType(); }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -5639,7 +5639,7 @@
 
 static constexpr uint32_t kNoRegNumber = -1;
 
-class HNullCheck FINAL : public HExpression<1> {
+class HNullCheck final : public HExpression<1> {
  public:
   // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
   // constructor.
@@ -5648,17 +5648,17 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   DECLARE_INSTRUCTION(NullCheck);
 
@@ -5703,7 +5703,7 @@
   const DexFile& dex_file_;
 };
 
-class HInstanceFieldGet FINAL : public HExpression<1> {
+class HInstanceFieldGet final : public HExpression<1> {
  public:
   HInstanceFieldGet(HInstruction* value,
                     ArtField* field,
@@ -5728,19 +5728,19 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return !IsVolatile(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     const HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
     return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
   }
 
-  size_t ComputeHashCode() const OVERRIDE {
+  size_t ComputeHashCode() const override {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
@@ -5765,7 +5765,7 @@
   const FieldInfo field_info_;
 };
 
-class HInstanceFieldSet FINAL : public HExpression<2> {
+class HInstanceFieldSet final : public HExpression<2> {
  public:
   HInstanceFieldSet(HInstruction* object,
                     HInstruction* value,
@@ -5792,9 +5792,9 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
   }
 
@@ -5820,7 +5820,7 @@
   const FieldInfo field_info_;
 };
 
-class HArrayGet FINAL : public HExpression<2> {
+class HArrayGet final : public HExpression<2> {
  public:
   HArrayGet(HInstruction* array,
             HInstruction* index,
@@ -5846,12 +5846,12 @@
     SetRawInputAt(1, index);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // TODO: We can be smarter here.
     // Currently, unless the array is the result of NewArray, the array access is always
     // preceded by some form of null NullCheck necessary for the bounds check, usually
@@ -5911,7 +5911,7 @@
                 "Too many packed fields.");
 };
 
-class HArraySet FINAL : public HExpression<3> {
+class HArraySet final : public HExpression<3> {
  public:
   HArraySet(HInstruction* array,
             HInstruction* index,
@@ -5943,17 +5943,17 @@
     SetRawInputAt(2, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     // We call a runtime method to throw ArrayStoreException.
     return NeedsTypeCheck();
   }
 
   // Can throw ArrayStoreException.
-  bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); }
+  bool CanThrow() const override { return NeedsTypeCheck(); }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // TODO: Same as for ArrayGet.
     return false;
   }
@@ -6030,7 +6030,7 @@
       BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
 };
 
-class HArrayLength FINAL : public HExpression<1> {
+class HArrayLength final : public HExpression<1> {
  public:
   HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
       : HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) {
@@ -6040,12 +6040,12 @@
     SetRawInputAt(0, array);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return obj == InputAt(0);
   }
 
@@ -6068,7 +6068,7 @@
                 "Too many packed fields.");
 };
 
-class HBoundsCheck FINAL : public HExpression<2> {
+class HBoundsCheck final : public HExpression<2> {
  public:
   // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
   // constructor.
@@ -6083,15 +6083,15 @@
     SetRawInputAt(1, length);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
 
@@ -6106,16 +6106,16 @@
   static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
 };
 
-class HSuspendCheck FINAL : public HExpression<0> {
+class HSuspendCheck final : public HExpression<0> {
  public:
   explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
       : HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc),
         slow_path_(nullptr) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return true;
   }
 
@@ -6141,7 +6141,7 @@
       : HExpression<0>(kNativeDebugInfo, SideEffects::None(), dex_pc) {
   }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return true;
   }
 
@@ -6154,7 +6154,7 @@
 /**
  * Instruction to load a Class object.
  */
-class HLoadClass FINAL : public HInstruction {
+class HLoadClass final : public HInstruction {
  public:
   // Determines how to load the Class.
   enum class LoadKind {
@@ -6217,7 +6217,7 @@
     SetPackedFlag<kFlagValidLoadedClassRTI>(false);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetLoadKind(LoadKind load_kind);
 
@@ -6231,15 +6231,15 @@
            GetLoadKind() == LoadKind::kBssEntry;
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   bool InstructionDataEquals(const HInstruction* other) const;
 
-  size_t ComputeHashCode() const OVERRIDE { return type_index_.index_; }
+  size_t ComputeHashCode() const override { return type_index_.index_; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return CanCallRuntime();
   }
 
@@ -6257,7 +6257,7 @@
            GetLoadKind() == LoadKind::kBssEntry;
   }
 
-  bool CanThrow() const OVERRIDE {
+  bool CanThrow() const override {
     return NeedsAccessCheck() ||
            MustGenerateClinitCheck() ||
            // If the class is in the boot image, the lookup in the runtime call cannot throw.
@@ -6284,7 +6284,7 @@
   dex::TypeIndex GetTypeIndex() const { return type_index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     return GetLoadKind() == LoadKind::kRuntimeCall;
   }
 
@@ -6311,7 +6311,7 @@
   void AddSpecialInput(HInstruction* special_input);
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
@@ -6392,7 +6392,7 @@
   special_input->AddUseAt(this, 0);
 }
 
-class HLoadString FINAL : public HInstruction {
+class HLoadString final : public HInstruction {
  public:
   // Determines how to load the String.
   enum class LoadKind {
@@ -6436,7 +6436,7 @@
     SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetLoadKind(LoadKind load_kind);
 
@@ -6466,15 +6466,15 @@
     string_ = str;
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
+  bool InstructionDataEquals(const HInstruction* other) const override;
 
-  size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
+  size_t ComputeHashCode() const override { return string_index_.index_; }
 
   // Will call the runtime if we need to load the string through
   // the dex cache and the string is not guaranteed to be there yet.
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     LoadKind load_kind = GetLoadKind();
     if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
         load_kind == LoadKind::kBootImageRelRo ||
@@ -6485,12 +6485,12 @@
     return true;
   }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     return GetLoadKind() == LoadKind::kRuntimeCall;
   }
 
-  bool CanBeNull() const OVERRIDE { return false; }
-  bool CanThrow() const OVERRIDE { return NeedsEnvironment(); }
+  bool CanBeNull() const override { return false; }
+  bool CanThrow() const override { return NeedsEnvironment(); }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
     return SideEffects::CanTriggerGC();
@@ -6499,7 +6499,7 @@
   void AddSpecialInput(HInstruction* special_input);
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
@@ -6561,7 +6561,7 @@
   special_input->AddUseAt(this, 0);
 }
 
-class HLoadMethodHandle FINAL : public HInstruction {
+class HLoadMethodHandle final : public HInstruction {
  public:
   HLoadMethodHandle(HCurrentMethod* current_method,
                     uint16_t method_handle_idx,
@@ -6577,12 +6577,12 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
 
@@ -6605,7 +6605,7 @@
   const DexFile& dex_file_;
 };
 
-class HLoadMethodType FINAL : public HInstruction {
+class HLoadMethodType final : public HInstruction {
  public:
   HLoadMethodType(HCurrentMethod* current_method,
                   dex::ProtoIndex proto_index,
@@ -6621,12 +6621,12 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
 
@@ -6652,7 +6652,7 @@
 /**
  * Performs an initialization check on its Class object input.
  */
-class HClinitCheck FINAL : public HExpression<1> {
+class HClinitCheck final : public HExpression<1> {
  public:
   HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
       : HExpression(
@@ -6663,17 +6663,17 @@
     SetRawInputAt(0, constant);
   }
   // TODO: Make ClinitCheck clonable.
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     // May call runtime to initialize the class.
     return true;
   }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   HLoadClass* GetLoadClass() const {
     DCHECK(InputAt(0)->IsLoadClass());
@@ -6687,7 +6687,7 @@
   DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
 };
 
-class HStaticFieldGet FINAL : public HExpression<1> {
+class HStaticFieldGet final : public HExpression<1> {
  public:
   HStaticFieldGet(HInstruction* cls,
                   ArtField* field,
@@ -6713,15 +6713,15 @@
   }
 
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return !IsVolatile(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     const HStaticFieldGet* other_get = other->AsStaticFieldGet();
     return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
-  size_t ComputeHashCode() const OVERRIDE {
+  size_t ComputeHashCode() const override {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
@@ -6746,7 +6746,7 @@
   const FieldInfo field_info_;
 };
 
-class HStaticFieldSet FINAL : public HExpression<2> {
+class HStaticFieldSet final : public HExpression<2> {
  public:
   HStaticFieldSet(HInstruction* cls,
                   HInstruction* value,
@@ -6773,7 +6773,7 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
   const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6797,7 +6797,7 @@
   const FieldInfo field_info_;
 };
 
-class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
+class HUnresolvedInstanceFieldGet final : public HExpression<1> {
  public:
   HUnresolvedInstanceFieldGet(HInstruction* obj,
                               DataType::Type field_type,
@@ -6811,9 +6811,9 @@
     SetRawInputAt(0, obj);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetType(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6827,7 +6827,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
+class HUnresolvedInstanceFieldSet final : public HExpression<2> {
  public:
   HUnresolvedInstanceFieldSet(HInstruction* obj,
                               HInstruction* value,
@@ -6842,9 +6842,9 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6867,7 +6867,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
+class HUnresolvedStaticFieldGet final : public HExpression<0> {
  public:
   HUnresolvedStaticFieldGet(DataType::Type field_type,
                             uint32_t field_index,
@@ -6879,9 +6879,9 @@
         field_index_(field_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetType(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6895,7 +6895,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
+class HUnresolvedStaticFieldSet final : public HExpression<1> {
  public:
   HUnresolvedStaticFieldSet(HInstruction* value,
                             DataType::Type field_type,
@@ -6908,9 +6908,9 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6934,13 +6934,13 @@
 };
 
 // Implement the move-exception DEX instruction.
-class HLoadException FINAL : public HExpression<0> {
+class HLoadException final : public HExpression<0> {
  public:
   explicit HLoadException(uint32_t dex_pc = kNoDexPc)
       : HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) {
   }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   DECLARE_INSTRUCTION(LoadException);
 
@@ -6950,7 +6950,7 @@
 
 // Implicit part of move-exception which clears thread-local exception storage.
 // Must not be removed because the runtime expects the TLS to get cleared.
-class HClearException FINAL : public HExpression<0> {
+class HClearException final : public HExpression<0> {
  public:
   explicit HClearException(uint32_t dex_pc = kNoDexPc)
       : HExpression(kClearException, SideEffects::AllWrites(), dex_pc) {
@@ -6962,20 +6962,20 @@
   DEFAULT_COPY_CONSTRUCTOR(ClearException);
 };
 
-class HThrow FINAL : public HExpression<1> {
+class HThrow final : public HExpression<1> {
  public:
   HThrow(HInstruction* exception, uint32_t dex_pc)
       : HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) {
     SetRawInputAt(0, exception);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool AlwaysThrows() const OVERRIDE { return true; }
+  bool AlwaysThrows() const override { return true; }
 
   DECLARE_INSTRUCTION(Throw);
 
@@ -7062,10 +7062,10 @@
     return static_cast<uint32_t>(mask->AsIntConstant()->GetValue());
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName();
     return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields();
   }
@@ -7110,7 +7110,7 @@
   Handle<mirror::Class> klass_;
 };
 
-class HInstanceOf FINAL : public HTypeCheckInstruction {
+class HInstanceOf final : public HTypeCheckInstruction {
  public:
   HInstanceOf(HInstruction* object,
               HInstruction* target_class_or_null,
@@ -7132,9 +7132,9 @@
                               bitstring_mask,
                               SideEffectsForArchRuntimeCalls(check_kind)) {}
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return CanCallRuntime(GetTypeCheckKind());
   }
 
@@ -7153,7 +7153,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
 };
 
-class HBoundType FINAL : public HExpression<1> {
+class HBoundType final : public HExpression<1> {
  public:
   explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc),
@@ -7164,8 +7164,8 @@
     SetRawInputAt(0, input);
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
-  bool IsClonable() const OVERRIDE { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override;
+  bool IsClonable() const override { return true; }
 
   // {Get,Set}Upper* should only be used in reference type propagation.
   const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
@@ -7177,7 +7177,7 @@
     SetPackedFlag<kFlagCanBeNull>(can_be_null);
   }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
 
   DECLARE_INSTRUCTION(BoundType);
 
@@ -7201,7 +7201,7 @@
   ReferenceTypeInfo upper_bound_;
 };
 
-class HCheckCast FINAL : public HTypeCheckInstruction {
+class HCheckCast final : public HTypeCheckInstruction {
  public:
   HCheckCast(HInstruction* object,
              HInstruction* target_class_or_null,
@@ -7223,13 +7223,13 @@
                               bitstring_mask,
                               SideEffects::CanTriggerGC()) {}
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override {
     // Instruction may throw a CheckCastError.
     return true;
   }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   DECLARE_INSTRUCTION(CheckCast);
 
@@ -7263,7 +7263,7 @@
 };
 std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
 
-class HMemoryBarrier FINAL : public HExpression<0> {
+class HMemoryBarrier final : public HExpression<0> {
  public:
   explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
       : HExpression(kMemoryBarrier,
@@ -7272,7 +7272,7 @@
     SetPackedField<BarrierKindField>(barrier_kind);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
 
@@ -7348,7 +7348,7 @@
 // * CompilerDriver::RequiresConstructorBarrier
 // * QuasiAtomic::ThreadFenceForConstructor
 //
-class HConstructorFence FINAL : public HVariableInputSizeInstruction {
+class HConstructorFence final : public HVariableInputSizeInstruction {
                                   // A fence has variable inputs because the inputs can be removed
                                   // after prepare_for_register_allocation phase.
                                   // (TODO: In the future a fence could freeze multiple objects
@@ -7445,7 +7445,7 @@
   DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
 };
 
-class HMonitorOperation FINAL : public HExpression<1> {
+class HMonitorOperation final : public HExpression<1> {
  public:
   enum class OperationKind {
     kEnter,
@@ -7462,9 +7462,9 @@
   }
 
   // Instruction may go into runtime, so we need an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE {
+  bool CanThrow() const override {
     // Verifier guarantees that monitor-exit cannot throw.
     // This is important because it allows the HGraphBuilder to remove
     // a dead throw-catch loop generated for `synchronized` blocks/methods.
@@ -7490,7 +7490,7 @@
   using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
 };
 
-class HSelect FINAL : public HExpression<3> {
+class HSelect final : public HExpression<3> {
  public:
   HSelect(HInstruction* condition,
           HInstruction* true_value,
@@ -7508,17 +7508,17 @@
     SetRawInputAt(2, condition);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
   HInstruction* GetFalseValue() const { return InputAt(0); }
   HInstruction* GetTrueValue() const { return InputAt(1); }
   HInstruction* GetCondition() const { return InputAt(2); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull();
   }
 
@@ -7606,7 +7606,7 @@
 
 static constexpr size_t kDefaultNumberOfMoves = 4;
 
-class HParallelMove FINAL : public HExpression<0> {
+class HParallelMove final : public HExpression<0> {
  public:
   explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
       : HExpression(kParallelMove, SideEffects::None(), dex_pc),
@@ -7668,7 +7668,7 @@
 // never used across anything that can trigger GC.
 // The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
 // So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
+class HIntermediateAddress final : public HExpression<2> {
  public:
   HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
       : HExpression(kIntermediateAddress,
@@ -7682,12 +7682,12 @@
     SetRawInputAt(1, offset);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetBaseAddress() const { return InputAt(0); }
   HInstruction* GetOffset() const { return InputAt(1); }
@@ -7760,7 +7760,7 @@
 
   // Visit functions that delegate to to super class.
 #define DECLARE_VISIT_INSTRUCTION(name, super)                                        \
-  void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+  void Visit##name(H##name* instr) override { Visit##super(instr); }
 
   FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -7782,7 +7782,7 @@
   explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
       : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     if (instruction->IsClonable()) {
       ReplaceInstrOrPhiByClone(instruction);
       instr_replaced_by_clones_count_++;
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 05b27a7..4993f57 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,7 +30,7 @@
                     kNoDexPc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
 
@@ -39,7 +39,7 @@
 };
 
 // Mips version of HPackedSwitch that holds a pointer to the base method address.
-class HMipsPackedSwitch FINAL : public HExpression<2> {
+class HMipsPackedSwitch final : public HExpression<2> {
  public:
   HMipsPackedSwitch(int32_t start_value,
                     int32_t num_entries,
@@ -53,7 +53,7 @@
     SetRawInputAt(1, method_base);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
@@ -91,7 +91,7 @@
 //
 // Note: as the instruction doesn't involve base array address into computations it has no side
 // effects.
-class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
+class HIntermediateArrayAddressIndex final : public HExpression<2> {
  public:
   HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
       : HExpression(kIntermediateArrayAddressIndex,
@@ -102,11 +102,11 @@
     SetRawInputAt(1, shift);
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetIndex() const { return InputAt(0); }
   HInstruction* GetShift() const { return InputAt(1); }
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 29358e1..7dcac17 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -24,7 +24,7 @@
 
 namespace art {
 
-class HMultiplyAccumulate FINAL : public HExpression<3> {
+class HMultiplyAccumulate final : public HExpression<3> {
  public:
   HMultiplyAccumulate(DataType::Type type,
                       InstructionKind op,
@@ -39,14 +39,14 @@
     SetRawInputAt(kInputMulRightIndex, mul_right);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   static constexpr int kInputAccumulatorIndex = 0;
   static constexpr int kInputMulLeftIndex = 1;
   static constexpr int kInputMulRightIndex = 2;
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return op_kind_ == other->AsMultiplyAccumulate()->op_kind_;
   }
 
@@ -62,7 +62,7 @@
   const InstructionKind op_kind_;
 };
 
-class HBitwiseNegatedRight FINAL : public HBinaryOperation {
+class HBitwiseNegatedRight final : public HBinaryOperation {
  public:
   HBitwiseNegatedRight(DataType::Type result_type,
                        InstructionKind op,
@@ -97,21 +97,21 @@
     }
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -145,7 +145,7 @@
 //
 // Note: as the instruction doesn't involve base array address into computations it has no side
 // effects (in comparison of HIntermediateAddress).
-class HIntermediateAddressIndex FINAL : public HExpression<3> {
+class HIntermediateAddressIndex final : public HExpression<3> {
  public:
   HIntermediateAddressIndex(
       HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc)
@@ -158,12 +158,12 @@
     SetRawInputAt(2, shift);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetIndex() const { return InputAt(0); }
   HInstruction* GetOffset() const { return InputAt(1); }
@@ -175,7 +175,7 @@
   DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
 };
 
-class HDataProcWithShifterOp FINAL : public HExpression<2> {
+class HDataProcWithShifterOp final : public HExpression<2> {
  public:
   enum OpKind {
     kLSL,   // Logical shift left.
@@ -212,9 +212,9 @@
     SetRawInputAt(1, right);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other_instr) const override {
     const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
     return instr_kind_ == other->instr_kind_ &&
         op_kind_ == other->op_kind_ &&
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 95fb5ab..c7539f2 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -117,12 +117,12 @@
   // Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be
   // altered to return true if the instruction might reside outside the SIMD loop body since SIMD
   // registers are not kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   // Tests if all data of a vector node (vector length and packed type) is equal.
   // Each concrete implementation that adds more fields should test equality of
   // those fields in its own method *and* call all super methods.
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecOperation());
     const HVecOperation* o = other->AsVecOperation();
     return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
@@ -280,7 +280,7 @@
   HInstruction* GetArray() const { return InputAt(0); }
   HInstruction* GetIndex() const { return InputAt(1); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecMemoryOperation());
     const HVecMemoryOperation* o = other->AsVecMemoryOperation();
     return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment();
@@ -315,7 +315,7 @@
 
 // Replicates the given scalar into a vector,
 // viz. replicate(x) = [ x, .. , x ].
-class HVecReplicateScalar FINAL : public HVecUnaryOperation {
+class HVecReplicateScalar final : public HVecUnaryOperation {
  public:
   HVecReplicateScalar(ArenaAllocator* allocator,
                       HInstruction* scalar,
@@ -329,7 +329,7 @@
 
   // A replicate needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecReplicateScalar);
 
@@ -341,7 +341,7 @@
 // viz. extract[ x1, .. , xn ] = x_i.
 //
 // TODO: for now only i == 1 case supported.
-class HVecExtractScalar FINAL : public HVecUnaryOperation {
+class HVecExtractScalar final : public HVecUnaryOperation {
  public:
   HVecExtractScalar(ArenaAllocator* allocator,
                     HInstruction* input,
@@ -361,7 +361,7 @@
 
   // An extract needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecExtractScalar);
 
@@ -372,7 +372,7 @@
 // Reduces the given vector into the first element as sum/min/max,
 // viz. sum-reduce[ x1, .. , xn ] = [ y, ---- ], where y = sum xi
 // and the "-" denotes "don't care" (implementation dependent).
-class HVecReduce FINAL : public HVecUnaryOperation {
+class HVecReduce final : public HVecUnaryOperation {
  public:
   enum ReductionKind {
     kSum = 1,
@@ -393,9 +393,9 @@
 
   ReductionKind GetKind() const { return kind_; }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecReduce());
     const HVecReduce* o = other->AsVecReduce();
     return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
@@ -412,7 +412,7 @@
 
 // Converts every component in the vector,
 // viz. cnv[ x1, .. , xn ]  = [ cnv(x1), .. , cnv(xn) ].
-class HVecCnv FINAL : public HVecUnaryOperation {
+class HVecCnv final : public HVecUnaryOperation {
  public:
   HVecCnv(ArenaAllocator* allocator,
           HInstruction* input,
@@ -427,7 +427,7 @@
   DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
   DataType::Type GetResultType() const { return GetPackedType(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecCnv);
 
@@ -437,7 +437,7 @@
 
 // Negates every component in the vector,
 // viz. neg[ x1, .. , xn ]  = [ -x1, .. , -xn ].
-class HVecNeg FINAL : public HVecUnaryOperation {
+class HVecNeg final : public HVecUnaryOperation {
  public:
   HVecNeg(ArenaAllocator* allocator,
           HInstruction* input,
@@ -448,7 +448,7 @@
     DCHECK(HasConsistentPackedTypes(input, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecNeg);
 
@@ -459,7 +459,7 @@
 // Takes absolute value of every component in the vector,
 // viz. abs[ x1, .. , xn ]  = [ |x1|, .. , |xn| ]
 // for signed operand x.
-class HVecAbs FINAL : public HVecUnaryOperation {
+class HVecAbs final : public HVecUnaryOperation {
  public:
   HVecAbs(ArenaAllocator* allocator,
           HInstruction* input,
@@ -470,7 +470,7 @@
     DCHECK(HasConsistentPackedTypes(input, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAbs);
 
@@ -481,7 +481,7 @@
 // Bitwise- or boolean-nots every component in the vector,
 // viz. not[ x1, .. , xn ]  = [ ~x1, .. , ~xn ], or
 //      not[ x1, .. , xn ]  = [ !x1, .. , !xn ] for boolean.
-class HVecNot FINAL : public HVecUnaryOperation {
+class HVecNot final : public HVecUnaryOperation {
  public:
   HVecNot(ArenaAllocator* allocator,
           HInstruction* input,
@@ -492,7 +492,7 @@
     DCHECK(input->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecNot);
 
@@ -506,7 +506,7 @@
 
 // Adds every component in the two vectors,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
-class HVecAdd FINAL : public HVecBinaryOperation {
+class HVecAdd final : public HVecBinaryOperation {
  public:
   HVecAdd(ArenaAllocator* allocator,
           HInstruction* left,
@@ -519,7 +519,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAdd);
 
@@ -530,7 +530,7 @@
 // Adds every component in the two vectors using saturation arithmetic,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 +_sat y1, .. , xn +_sat yn ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationAdd FINAL : public HVecBinaryOperation {
+class HVecSaturationAdd final : public HVecBinaryOperation {
  public:
   HVecSaturationAdd(ArenaAllocator* allocator,
                     HInstruction* left,
@@ -544,7 +544,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSaturationAdd);
 
@@ -556,7 +556,7 @@
 // rounded   [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
 // truncated [ x1, .. , xn ] hadd  [ y1, .. , yn ] = [ (x1 + y1)     >> 1, .. , (xn + yn )    >> 1 ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecHalvingAdd FINAL : public HVecBinaryOperation {
+class HVecHalvingAdd final : public HVecBinaryOperation {
  public:
   HVecHalvingAdd(ArenaAllocator* allocator,
                  HInstruction* left,
@@ -574,9 +574,9 @@
 
   bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecHalvingAdd());
     const HVecHalvingAdd* o = other->AsVecHalvingAdd();
     return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
@@ -596,7 +596,7 @@
 
 // Subtracts every component in the two vectors,
 // viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
-class HVecSub FINAL : public HVecBinaryOperation {
+class HVecSub final : public HVecBinaryOperation {
  public:
   HVecSub(ArenaAllocator* allocator,
           HInstruction* left,
@@ -609,7 +609,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSub);
 
@@ -620,7 +620,7 @@
 // Subtracts every component in the two vectors using saturation arithmetic,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 -_sat y1, .. , xn -_sat yn ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationSub FINAL : public HVecBinaryOperation {
+class HVecSaturationSub final : public HVecBinaryOperation {
  public:
   HVecSaturationSub(ArenaAllocator* allocator,
                     HInstruction* left,
@@ -634,7 +634,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSaturationSub);
 
@@ -644,7 +644,7 @@
 
 // Multiplies every component in the two vectors,
 // viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
-class HVecMul FINAL : public HVecBinaryOperation {
+class HVecMul final : public HVecBinaryOperation {
  public:
   HVecMul(ArenaAllocator* allocator,
           HInstruction* left,
@@ -657,7 +657,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMul);
 
@@ -667,7 +667,7 @@
 
 // Divides every component in the two vectors,
 // viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
-class HVecDiv FINAL : public HVecBinaryOperation {
+class HVecDiv final : public HVecBinaryOperation {
  public:
   HVecDiv(ArenaAllocator* allocator,
           HInstruction* left,
@@ -680,7 +680,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecDiv);
 
@@ -691,7 +691,7 @@
 // Takes minimum of every component in the two vectors,
 // viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMin FINAL : public HVecBinaryOperation {
+class HVecMin final : public HVecBinaryOperation {
  public:
   HVecMin(ArenaAllocator* allocator,
           HInstruction* left,
@@ -704,7 +704,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMin);
 
@@ -715,7 +715,7 @@
 // Takes maximum of every component in the two vectors,
 // viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMax FINAL : public HVecBinaryOperation {
+class HVecMax final : public HVecBinaryOperation {
  public:
   HVecMax(ArenaAllocator* allocator,
           HInstruction* left,
@@ -728,7 +728,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMax);
 
@@ -738,7 +738,7 @@
 
 // Bitwise-ands every component in the two vectors,
 // viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
-class HVecAnd FINAL : public HVecBinaryOperation {
+class HVecAnd final : public HVecBinaryOperation {
  public:
   HVecAnd(ArenaAllocator* allocator,
           HInstruction* left,
@@ -750,7 +750,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAnd);
 
@@ -760,7 +760,7 @@
 
 // Bitwise-and-nots every component in the two vectors,
 // viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
-class HVecAndNot FINAL : public HVecBinaryOperation {
+class HVecAndNot final : public HVecBinaryOperation {
  public:
   HVecAndNot(ArenaAllocator* allocator,
              HInstruction* left,
@@ -773,7 +773,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAndNot);
 
@@ -783,7 +783,7 @@
 
 // Bitwise-ors every component in the two vectors,
 // viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
-class HVecOr FINAL : public HVecBinaryOperation {
+class HVecOr final : public HVecBinaryOperation {
  public:
   HVecOr(ArenaAllocator* allocator,
          HInstruction* left,
@@ -795,7 +795,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecOr);
 
@@ -805,7 +805,7 @@
 
 // Bitwise-xors every component in the two vectors,
 // viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
-class HVecXor FINAL : public HVecBinaryOperation {
+class HVecXor final : public HVecBinaryOperation {
  public:
   HVecXor(ArenaAllocator* allocator,
           HInstruction* left,
@@ -817,7 +817,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecXor);
 
@@ -827,7 +827,7 @@
 
 // Logically shifts every component in the vector left by the given distance,
 // viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
-class HVecShl FINAL : public HVecBinaryOperation {
+class HVecShl final : public HVecBinaryOperation {
  public:
   HVecShl(ArenaAllocator* allocator,
           HInstruction* left,
@@ -839,7 +839,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecShl);
 
@@ -849,7 +849,7 @@
 
 // Arithmetically shifts every component in the vector right by the given distance,
 // viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
-class HVecShr FINAL : public HVecBinaryOperation {
+class HVecShr final : public HVecBinaryOperation {
  public:
   HVecShr(ArenaAllocator* allocator,
           HInstruction* left,
@@ -861,7 +861,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecShr);
 
@@ -871,7 +871,7 @@
 
 // Logically shifts every component in the vector right by the given distance,
 // viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
-class HVecUShr FINAL : public HVecBinaryOperation {
+class HVecUShr final : public HVecBinaryOperation {
  public:
   HVecUShr(ArenaAllocator* allocator,
            HInstruction* left,
@@ -883,7 +883,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecUShr);
 
@@ -898,7 +898,7 @@
 // Assigns the given scalar elements to a vector,
 // viz. set( array(x1, .. , xn) ) = [ x1, .. ,            xn ] if n == m,
 //      set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m <  n.
-class HVecSetScalars FINAL : public HVecOperation {
+class HVecSetScalars final : public HVecOperation {
  public:
   HVecSetScalars(ArenaAllocator* allocator,
                  HInstruction* scalars[],
@@ -921,7 +921,7 @@
 
   // Setting scalars needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecSetScalars);
 
@@ -934,7 +934,7 @@
 // For floating point types, Java rounding behavior must be preserved; the products are rounded to
 // the proper precision before being added. "Fused" multiply-add operations available on several
 // architectures are not usable since they would violate Java language rules.
-class HVecMultiplyAccumulate FINAL : public HVecOperation {
+class HVecMultiplyAccumulate final : public HVecOperation {
  public:
   HVecMultiplyAccumulate(ArenaAllocator* allocator,
                          InstructionKind op,
@@ -964,9 +964,9 @@
     SetRawInputAt(2, mul_right);
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecMultiplyAccumulate());
     const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate();
     return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind();
@@ -989,7 +989,7 @@
 // viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) =
 //          [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ],
 //      for m <= n, non-overlapping sums, and signed operands x, y.
-class HVecSADAccumulate FINAL : public HVecOperation {
+class HVecSADAccumulate final : public HVecOperation {
  public:
   HVecSADAccumulate(ArenaAllocator* allocator,
                     HInstruction* accumulator,
@@ -1023,7 +1023,7 @@
 
 // Loads a vector from memory, viz. load(mem, 1)
 // yield the vector [ mem(1), .. , mem(n) ].
-class HVecLoad FINAL : public HVecMemoryOperation {
+class HVecLoad final : public HVecMemoryOperation {
  public:
   HVecLoad(ArenaAllocator* allocator,
            HInstruction* base,
@@ -1047,9 +1047,9 @@
 
   bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecLoad());
     const HVecLoad* o = other->AsVecLoad();
     return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt();
@@ -1069,7 +1069,7 @@
 
 // Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
 // sets mem(1) = x1, .. , mem(n) = xn.
-class HVecStore FINAL : public HVecMemoryOperation {
+class HVecStore final : public HVecMemoryOperation {
  public:
   HVecStore(ArenaAllocator* allocator,
             HInstruction* base,
@@ -1093,7 +1093,7 @@
   }
 
   // A store needs to stay in place.
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecStore);
 
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index d1e7f68..a551104 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -20,7 +20,7 @@
 namespace art {
 
 // Compute the address of the method for X86 Constant area support.
-class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
+class HX86ComputeBaseMethodAddress final : public HExpression<0> {
  public:
   // Treat the value as an int32_t, but it is really a 32 bit native pointer.
   HX86ComputeBaseMethodAddress()
@@ -30,7 +30,7 @@
                     kNoDexPc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
 
@@ -39,7 +39,7 @@
 };
 
 // Load a constant value from the constant table.
-class HX86LoadFromConstantTable FINAL : public HExpression<2> {
+class HX86LoadFromConstantTable final : public HExpression<2> {
  public:
   HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
                             HConstant* constant)
@@ -66,7 +66,7 @@
 };
 
 // Version of HNeg with access to the constant table for FP types.
-class HX86FPNeg FINAL : public HExpression<2> {
+class HX86FPNeg final : public HExpression<2> {
  public:
   HX86FPNeg(DataType::Type result_type,
             HInstruction* input,
@@ -89,7 +89,7 @@
 };
 
 // X86 version of HPackedSwitch that holds a pointer to the base method address.
-class HX86PackedSwitch FINAL : public HExpression<2> {
+class HX86PackedSwitch final : public HExpression<2> {
  public:
   HX86PackedSwitch(int32_t start_value,
                    int32_t num_entries,
@@ -103,7 +103,7 @@
     SetRawInputAt(1, method_base);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 04301f5..be1f7ea 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -133,7 +133,7 @@
       return memory_.data();
     }
 
-    ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+    ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
 
    private:
     std::vector<uint8_t> memory_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f52b96d..0a74705 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -74,7 +74,7 @@
 /**
  * Used by the code generator, to allocate the code in a vector.
  */
-class CodeVectorAllocator FINAL : public CodeAllocator {
+class CodeVectorAllocator final : public CodeAllocator {
  public:
   explicit CodeVectorAllocator(ArenaAllocator* allocator)
       : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
@@ -84,7 +84,7 @@
     return &memory_[0];
   }
 
-  ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+  ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
   uint8_t* GetData() { return memory_.data(); }
 
  private:
@@ -264,12 +264,12 @@
   PassObserver* const pass_observer_;
 };
 
-class OptimizingCompiler FINAL : public Compiler {
+class OptimizingCompiler final : public Compiler {
  public:
   explicit OptimizingCompiler(CompilerDriver* driver);
-  ~OptimizingCompiler() OVERRIDE;
+  ~OptimizingCompiler() override;
 
-  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
+  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
 
   CompiledMethod* Compile(const DexFile::CodeItem* code_item,
                           uint32_t access_flags,
@@ -278,29 +278,29 @@
                           uint32_t method_idx,
                           Handle<mirror::ClassLoader> class_loader,
                           const DexFile& dex_file,
-                          Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+                          Handle<mirror::DexCache> dex_cache) const override;
 
   CompiledMethod* JniCompile(uint32_t access_flags,
                              uint32_t method_idx,
                              const DexFile& dex_file,
-                             Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+                             Handle<mirror::DexCache> dex_cache) const override;
 
-  uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
+  uintptr_t GetEntryPointOf(ArtMethod* method) const override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
         InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet())));
   }
 
-  void Init() OVERRIDE;
+  void Init() override;
 
-  void UnInit() const OVERRIDE;
+  void UnInit() const override;
 
   bool JitCompile(Thread* self,
                   jit::JitCodeCache* code_cache,
                   ArtMethod* method,
                   bool osr,
                   jit::JitLogger* jit_logger)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index e6e069f..5fadcab 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -58,7 +58,7 @@
   virtual ~ParallelMoveResolverWithSwap() {}
 
   // Resolve a set of parallel moves, emitting assembler instructions.
-  void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+  void EmitNativeCode(HParallelMove* parallel_move) override;
 
  protected:
   class ScratchRegisterScope : public ValueObject {
@@ -133,7 +133,7 @@
   virtual ~ParallelMoveResolverNoSwap() {}
 
   // Resolve a set of parallel moves, emitting assembler instructions.
-  void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+  void EmitNativeCode(HParallelMove* parallel_move) override;
 
  protected:
   // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index be35201..399a6d8 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -56,7 +56,7 @@
   explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator)
       : ParallelMoveResolverWithSwap(allocator) {}
 
-  void EmitMove(size_t index) OVERRIDE {
+  void EmitMove(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
@@ -68,7 +68,7 @@
     message_ << ")";
   }
 
-  void EmitSwap(size_t index) OVERRIDE {
+  void EmitSwap(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
@@ -80,8 +80,8 @@
     message_ << ")";
   }
 
-  void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
-  void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+  void SpillScratch(int reg ATTRIBUTE_UNUSED) override {}
+  void RestoreScratch(int reg ATTRIBUTE_UNUSED) override {}
 
   std::string GetMessage() const {
     return  message_.str();
@@ -99,13 +99,13 @@
   explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator)
       : ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {}
 
-  void PrepareForEmitNativeCode() OVERRIDE {
+  void PrepareForEmitNativeCode() override {
     scratch_index_ = kScratchRegisterStartIndexForTest;
   }
 
-  void FinishEmitNativeCode() OVERRIDE {}
+  void FinishEmitNativeCode() override {}
 
-  Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE {
+  Location AllocateScratchLocationFor(Location::Kind kind) override {
     if (kind == Location::kStackSlot || kind == Location::kFpuRegister ||
         kind == Location::kRegister) {
       kind = Location::kRegister;
@@ -125,9 +125,9 @@
     return scratch;
   }
 
-  void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
+  void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) override {}
 
-  void EmitMove(size_t index) OVERRIDE {
+  void EmitMove(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index a7e97a1..05208ff 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -58,7 +58,7 @@
     DCHECK(base_ != nullptr);
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     // If this is an invoke with PC-relative load kind,
     // we need to add the base as the special input.
     if (invoke->HasPcRelativeMethodLoadKind() &&
@@ -70,7 +70,7 @@
     }
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
     switch (load_kind) {
       case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
@@ -86,7 +86,7 @@
     }
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     HLoadString::LoadKind load_kind = load_string->GetLoadKind();
     switch (load_kind) {
       case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
@@ -102,7 +102,7 @@
     }
   }
 
-  void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+  void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
     if (switch_insn->GetNumEntries() <=
         InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
       return;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 6dd1ee0..872370b 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 41f2f77..4b07d5b 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -42,53 +42,53 @@
   }
 
  private:
-  void VisitAdd(HAdd* add) OVERRIDE {
+  void VisitAdd(HAdd* add) override {
     BinaryFP(add);
   }
 
-  void VisitSub(HSub* sub) OVERRIDE {
+  void VisitSub(HSub* sub) override {
     BinaryFP(sub);
   }
 
-  void VisitMul(HMul* mul) OVERRIDE {
+  void VisitMul(HMul* mul) override {
     BinaryFP(mul);
   }
 
-  void VisitDiv(HDiv* div) OVERRIDE {
+  void VisitDiv(HDiv* div) override {
     BinaryFP(div);
   }
 
-  void VisitCompare(HCompare* compare) OVERRIDE {
+  void VisitCompare(HCompare* compare) override {
     BinaryFP(compare);
   }
 
-  void VisitReturn(HReturn* ret) OVERRIDE {
+  void VisitReturn(HReturn* ret) override {
     HConstant* value = ret->InputAt(0)->AsConstant();
     if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) {
       ReplaceInput(ret, value, 0, true);
     }
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+  void VisitInvokeInterface(HInvokeInterface* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     if (load_class->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class);
       load_class->AddSpecialInput(method_address);
     }
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     if (load_string->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string);
       load_string->AddSpecialInput(method_address);
@@ -102,31 +102,31 @@
     }
   }
 
-  void VisitEqual(HEqual* cond) OVERRIDE {
+  void VisitEqual(HEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitNotEqual(HNotEqual* cond) OVERRIDE {
+  void VisitNotEqual(HNotEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitLessThan(HLessThan* cond) OVERRIDE {
+  void VisitLessThan(HLessThan* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE {
+  void VisitLessThanOrEqual(HLessThanOrEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitGreaterThan(HGreaterThan* cond) OVERRIDE {
+  void VisitGreaterThan(HGreaterThan* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE {
+  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitNeg(HNeg* neg) OVERRIDE {
+  void VisitNeg(HNeg* neg) override {
     if (DataType::IsFloatingPointType(neg->GetType())) {
       // We need to replace the HNeg with a HX86FPNeg in order to address the constant area.
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
@@ -141,7 +141,7 @@
     }
   }
 
-  void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+  void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
     if (switch_insn->GetNumEntries() <=
         InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
       return;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index db56b7f..3b470a6 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsX86PassName  = "pc_relative_fixups_x86";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 2978add..a8ab256 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -43,18 +43,18 @@
       "prepare_for_register_allocation";
 
  private:
-  void VisitCheckCast(HCheckCast* check_cast) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE;
-  void VisitNullCheck(HNullCheck* check) OVERRIDE;
-  void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  void VisitBoundType(HBoundType* bound_type) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
-  void VisitCondition(HCondition* condition) OVERRIDE;
-  void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE;
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
+  void VisitCheckCast(HCheckCast* check_cast) override;
+  void VisitInstanceOf(HInstanceOf* instance_of) override;
+  void VisitNullCheck(HNullCheck* check) override;
+  void VisitDivZeroCheck(HDivZeroCheck* check) override;
+  void VisitBoundsCheck(HBoundsCheck* check) override;
+  void VisitBoundType(HBoundType* bound_type) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitClinitCheck(HClinitCheck* check) override;
+  void VisitCondition(HCondition* condition) override;
+  void VisitConstructorFence(HConstructorFence* constructor_fence) override;
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+  void VisitDeoptimize(HDeoptimize* deoptimize) override;
 
   bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
   bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index c6579dc..8ef9ce4 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -33,7 +33,7 @@
     PrintString(": ");
   }
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     PrintPreInstruction(instruction);
     PrintString(instruction->DebugName());
     PrintPostInstruction(instruction);
@@ -70,7 +70,7 @@
     PrintNewLine();
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     PrintString("BasicBlock ");
     PrintInt(block->GetBlockId());
     const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -108,15 +108,15 @@
   explicit StringPrettyPrinter(HGraph* graph)
       : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
 
-  void PrintInt(int value) OVERRIDE {
+  void PrintInt(int value) override {
     str_ += android::base::StringPrintf("%d", value);
   }
 
-  void PrintString(const char* value) OVERRIDE {
+  void PrintString(const char* value) override {
     str_ += value;
   }
 
-  void PrintNewLine() OVERRIDE {
+  void PrintNewLine() override {
     str_ += '\n';
   }
 
@@ -124,12 +124,12 @@
 
   std::string str() const { return str_; }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     current_block_ = block;
     HPrettyPrinter::VisitBasicBlock(block);
   }
 
-  void VisitGoto(HGoto* gota) OVERRIDE {
+  void VisitGoto(HGoto* gota) override {
     PrintString("  ");
     PrintInt(gota->GetId());
     PrintString(": Goto ");
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0d62248..a9d5902 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -94,26 +94,26 @@
     worklist_.reserve(kDefaultWorklistSize);
   }
 
-  void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
-  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE;
-  void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
-  void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE;
-  void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE;
-  void VisitLoadString(HLoadString* instr) OVERRIDE;
-  void VisitLoadException(HLoadException* instr) OVERRIDE;
-  void VisitNewArray(HNewArray* instr) OVERRIDE;
-  void VisitParameterValue(HParameterValue* instr) OVERRIDE;
-  void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
-  void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
-  void VisitInvoke(HInvoke* instr) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instr) OVERRIDE;
-  void VisitCheckCast(HCheckCast* instr) OVERRIDE;
-  void VisitBoundType(HBoundType* instr) OVERRIDE;
-  void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+  void VisitDeoptimize(HDeoptimize* deopt) override;
+  void VisitNewInstance(HNewInstance* new_instance) override;
+  void VisitLoadClass(HLoadClass* load_class) override;
+  void VisitInstanceOf(HInstanceOf* load_class) override;
+  void VisitClinitCheck(HClinitCheck* clinit_check) override;
+  void VisitLoadMethodHandle(HLoadMethodHandle* instr) override;
+  void VisitLoadMethodType(HLoadMethodType* instr) override;
+  void VisitLoadString(HLoadString* instr) override;
+  void VisitLoadException(HLoadException* instr) override;
+  void VisitNewArray(HNewArray* instr) override;
+  void VisitParameterValue(HParameterValue* instr) override;
+  void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
+  void VisitStaticFieldGet(HStaticFieldGet* instr) override;
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) override;
+  void VisitInvoke(HInvoke* instr) override;
+  void VisitArrayGet(HArrayGet* instr) override;
+  void VisitCheckCast(HCheckCast* instr) override;
+  void VisitBoundType(HBoundType* instr) override;
+  void VisitNullCheck(HNullCheck* instr) override;
   void VisitPhi(HPhi* phi);
 
   void VisitBasicBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index d36d592..7c6a048 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,7 +40,7 @@
   // Visit a single instruction.
   void Visit(HInstruction* instruction);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   // Returns true if klass is admissible to the propagation: non-null and resolved.
   // For an array type, we also check if the component type is admissible.
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3072c92..16131e1 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -90,9 +90,9 @@
                               CodeGenerator* codegen,
                               const SsaLivenessAnalysis& analysis,
                               bool iterative_move_coalescing = true);
-  ~RegisterAllocatorGraphColor() OVERRIDE;
+  ~RegisterAllocatorGraphColor() override;
 
-  void AllocateRegisters() OVERRIDE;
+  void AllocateRegisters() override;
 
   bool Validate(bool log_fatal_on_failure);
 
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 216fb57..1e00003 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -312,7 +312,7 @@
 
   for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
     HInstruction* safepoint = safepoints_[safepoint_index - 1u];
-    size_t safepoint_position = safepoint->GetLifetimePosition();
+    size_t safepoint_position = SafepointPosition::ComputePosition(safepoint);
 
     // Test that safepoints are ordered in the optimal way.
     DCHECK(safepoint_index == safepoints_.size() ||
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 36788b7..4d445c7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -42,11 +42,11 @@
   RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
                               CodeGenerator* codegen,
                               const SsaLivenessAnalysis& analysis);
-  ~RegisterAllocatorLinearScan() OVERRIDE;
+  ~RegisterAllocatorLinearScan() override;
 
-  void AllocateRegisters() OVERRIDE;
+  void AllocateRegisters() override;
 
-  bool Validate(bool log_fatal_on_failure) OVERRIDE {
+  bool Validate(bool log_fatal_on_failure) override {
     processing_core_registers_ = true;
     if (!ValidateInternal(log_fatal_on_failure)) {
       return false;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7144775..db6a760 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -40,7 +40,7 @@
 
 class RegisterAllocatorTest : public OptimizingUnitTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // This test is using the x86 ISA.
     OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
     OptimizingUnitTest::SetUp();
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index fd48d84..48e80f5 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -339,7 +339,7 @@
         last_visited_latency_(0),
         last_visited_internal_latency_(0) {}
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
         "Architecture-specific scheduling latency visitors must handle all instructions"
         " (potentially by overriding the generic `VisitInstruction()`.";
@@ -392,7 +392,7 @@
   }
 
   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
-                                         const SchedulingGraph& graph) OVERRIDE {
+                                         const SchedulingGraph& graph) override {
     UNUSED(graph);
     DCHECK(!nodes->empty());
     size_t select = rand_r(&seed_) % nodes->size();
@@ -412,9 +412,9 @@
  public:
   CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
 
-  void Reset() OVERRIDE { prev_select_ = nullptr; }
+  void Reset() override { prev_select_ = nullptr; }
   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
-                                         const SchedulingGraph& graph) OVERRIDE;
+                                         const SchedulingGraph& graph) override;
 
  protected:
   SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
@@ -492,7 +492,7 @@
         codegen_(cg),
         instruction_set_(instruction_set) {}
 
-  bool Run() OVERRIDE {
+  bool Run() override {
     return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
   }
 
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 2f36948..875593b 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -100,7 +100,7 @@
   M(DataProcWithShifterOp, unused)
 
 #define DECLARE_VISIT_INSTRUCTION(type, unused)  \
-  void Visit##type(H##type* instruction) OVERRIDE;
+  void Visit##type(H##type* instruction) override;
 
   FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -140,9 +140,9 @@
   HSchedulerARM(SchedulingNodeSelector* selector,
                 SchedulingLatencyVisitorARM* arm_latency_visitor)
       : HScheduler(arm_latency_visitor, selector) {}
-  ~HSchedulerARM() OVERRIDE {}
+  ~HSchedulerARM() override {}
 
-  bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+  bool IsSchedulable(const HInstruction* instruction) const override {
 #define CASE_INSTRUCTION_KIND(type, unused) case \
   HInstruction::InstructionKind::k##type:
     switch (instruction->GetKind()) {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 0d2f8d9..7f6549d 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -118,7 +118,7 @@
   M(DataProcWithShifterOp, unused)
 
 #define DECLARE_VISIT_INSTRUCTION(type, unused)  \
-  void Visit##type(H##type* instruction) OVERRIDE;
+  void Visit##type(H##type* instruction) override;
 
   FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -136,9 +136,9 @@
  public:
   explicit HSchedulerARM64(SchedulingNodeSelector* selector)
       : HScheduler(&arm64_latency_visitor_, selector) {}
-  ~HSchedulerARM64() OVERRIDE {}
+  ~HSchedulerARM64() override {}
 
-  bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+  bool IsSchedulable(const HInstruction* instruction) const override {
 #define CASE_INSTRUCTION_KIND(type, unused) case \
   HInstruction::InstructionKind::k##type:
     switch (instruction->GetKind()) {
@@ -160,7 +160,7 @@
   // SIMD&FP registers are callee saved) so don't reorder such vector instructions.
   //
   // TODO: remove this when a proper support of SIMD registers is introduced to the compiler.
-  bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE {
+  bool IsSchedulingBarrier(const HInstruction* instr) const override {
     return HScheduler::IsSchedulingBarrier(instr) ||
            instr->IsVecReduce() ||
            instr->IsVecExtractScalar() ||
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index d24d226..2889166 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -68,7 +68,7 @@
                    OptimizingCompilerStats* stats,
                    const char* name = kSelectGeneratorPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSelectGeneratorPassName = "select_generator";
 
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index cbac361..dc55eea 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -37,7 +37,7 @@
       : HOptimization(graph, name),
         codegen_(codegen) { }
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSharpeningPassName = "sharpening";
 
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index cebd4ad..92d0b08 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -60,7 +60,7 @@
  * A live range contains the start and end of a range where an instruction or a temporary
  * is live.
  */
-class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
+class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
     DCHECK_LT(start, end);
@@ -230,12 +230,25 @@
       : instruction_(instruction),
         next_(nullptr) {}
 
+  static size_t ComputePosition(HInstruction* instruction) {
+    // We special case instructions emitted at use site, as their
+    // safepoint position needs to be at their use.
+    if (instruction->IsEmittedAtUseSite()) {
+      // Currently only applies to implicit null checks, which are emitted
+      // at the next instruction.
+      DCHECK(instruction->IsNullCheck()) << instruction->DebugName();
+      return instruction->GetLifetimePosition() + 2;
+    } else {
+      return instruction->GetLifetimePosition();
+    }
+  }
+
   void SetNext(SafepointPosition* next) {
     next_ = next;
   }
 
   size_t GetPosition() const {
-    return instruction_->GetLifetimePosition();
+    return ComputePosition(instruction_);
   }
 
   SafepointPosition* GetNext() const {
@@ -922,7 +935,7 @@
     if (first_safepoint_ == nullptr) {
       first_safepoint_ = last_safepoint_ = safepoint;
     } else {
-      DCHECK_LT(last_safepoint_->GetPosition(), safepoint->GetPosition());
+      DCHECK_LE(last_safepoint_->GetPosition(), safepoint->GetPosition());
       last_safepoint_->SetNext(safepoint);
       last_safepoint_ = safepoint;
     }
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index a683c69..4b52553 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -29,7 +29,7 @@
 
 class SsaLivenessAnalysisTest : public OptimizingUnitTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     OptimizingUnitTest::SetUp();
     graph_ = CreateGraph();
     codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index ee859e8..c5cc752 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@
   explicit SsaDeadPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   void MarkDeadPhis();
   void EliminateDeadPhis();
@@ -53,7 +53,7 @@
   explicit SsaRedundantPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
 
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 85ed06e..e679893 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -38,15 +38,15 @@
  public:
   explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
 
-  void PrintInt(int value) OVERRIDE {
+  void PrintInt(int value) override {
     str_ += android::base::StringPrintf("%d", value);
   }
 
-  void PrintString(const char* value) OVERRIDE {
+  void PrintString(const char* value) override {
     str_ += value;
   }
 
-  void PrintNewLine() OVERRIDE {
+  void PrintNewLine() override {
     str_ += '\n';
   }
 
@@ -54,7 +54,7 @@
 
   std::string str() const { return str_; }
 
-  void VisitIntConstant(HIntConstant* constant) OVERRIDE {
+  void VisitIntConstant(HIntConstant* constant) override {
     PrintPreInstruction(constant);
     str_ += constant->DebugName();
     str_ += " ";
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index f0069c0..b1abcf6 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -31,7 +31,7 @@
         do_implicit_null_checks_(do_implicit_null_checks) {}
 
  private:
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* check) override {
     // Replace the length by the array itself, so that we can do compares to memory.
     HArrayLength* array_len = check->InputAt(1)->AsArrayLength();
 
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index b254000..3f4178d 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -31,7 +31,7 @@
                              CodeGenerator* codegen,
                              OptimizingCompilerStats* stats);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kX86MemoryOperandGenerationPassName =
           "x86_memory_operand_generation";
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index b0310f2..98c0191 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,7 +39,7 @@
 namespace art {
 namespace arm {
 
-class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
+class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
  public:
   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
   // fewer system calls than a larger default capacity.
@@ -149,7 +149,7 @@
   using MacroAssembler::Vmov;
 };
 
-class ArmVIXLAssembler FINAL : public Assembler {
+class ArmVIXLAssembler final : public Assembler {
  private:
   class ArmException;
  public:
@@ -161,19 +161,19 @@
 
   virtual ~ArmVIXLAssembler() {}
   ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Size of generated code.
-  size_t CodeSize() const OVERRIDE;
-  const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+  size_t CodeSize() const override;
+  const uint8_t* CodeBufferBaseAddress() const override;
 
   // Copy instructions out of assembly buffer into the given region of memory.
-  void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+  void FinalizeInstructions(const MemoryRegion& region) override;
 
-  void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
   }
 
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 4bc5d69..674bf12 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -32,7 +32,7 @@
 namespace art {
 namespace arm {
 
-class ArmVIXLJNIMacroAssembler FINAL
+class ArmVIXLJNIMacroAssembler final
     : public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
  private:
   class ArmException;
@@ -42,7 +42,7 @@
         exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
 
   virtual ~ArmVIXLJNIMacroAssembler() {}
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   //
   // Overridden common assembler high-level functionality
@@ -52,109 +52,109 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
   void LoadFromThread(ManagedRegister dest,
                       ThreadOffset32 src,
-                      size_t size) OVERRIDE;
+                      size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister scratch) OVERRIDE;
+                          ManagedRegister scratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+  void GetCurrentThread(ManagedRegister mtr) override;
   void GetCurrentThread(FrameOffset dest_offset,
-                        ManagedRegister scratch) OVERRIDE;
+                        ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -163,43 +163,43 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
   void LoadReferenceFromHandleScope(ManagedRegister dst,
-                                    ManagedRegister src) OVERRIDE;
+                                    ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+  void MemoryBarrier(ManagedRegister scratch) override;
 
   void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
   void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
@@ -231,7 +231,7 @@
   friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
 };
 
-class ArmVIXLJNIMacroLabel FINAL
+class ArmVIXLJNIMacroLabel final
     : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
                                  vixl32::Label,
                                  InstructionSet::kArm> {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 8983af2..74537dd 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@
   kStoreDWord
 };
 
-class Arm64Assembler FINAL : public Assembler {
+class Arm64Assembler final : public Assembler {
  public:
   explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
 
@@ -70,11 +70,11 @@
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
 
   // Finalize the code.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Size of generated code.
-  size_t CodeSize() const OVERRIDE;
-  const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+  size_t CodeSize() const override;
+  const uint8_t* CodeBufferBaseAddress() const override;
 
   // Copy instructions out of assembly buffer into the given region of memory.
   void FinalizeInstructions(const MemoryRegion& region);
@@ -109,10 +109,10 @@
   // MaybeGenerateMarkingRegisterCheck and is passed to the BRK instruction.
   void GenerateMarkingRegisterCheck(vixl::aarch64::Register temp, int code = 0);
 
-  void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
   }
 
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index f531b2a..45316ed 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,7 +40,7 @@
 namespace art {
 namespace arm64 {
 
-class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
  public:
   explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
       : JNIMacroAssemblerFwd(allocator),
@@ -49,94 +49,94 @@
   ~Arm64JNIMacroAssembler();
 
   // Finalize the code.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit code that will create an activation on the stack.
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+                                ManagedRegister scratch) override;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+               bool unpoison_reference) override;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
   void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+      override;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+            size_t size) override;
+  void MemoryBarrier(ManagedRegister scratch) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -145,40 +145,40 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   class Arm64Exception {
@@ -234,7 +234,7 @@
   ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
 };
 
-class Arm64JNIMacroLabel FINAL
+class Arm64JNIMacroLabel final
     : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
                                  vixl::aarch64::Label,
                                  InstructionSet::kArm64> {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 379a639..096410d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -283,7 +283,7 @@
 
 // The purpose of this class is to ensure that we do not have to explicitly
 // call the AdvancePC method (which is good for convenience and correctness).
-class DebugFrameOpCodeWriterForAssembler FINAL
+class DebugFrameOpCodeWriterForAssembler final
     : public dwarf::DebugFrameOpCodeWriter<> {
  public:
   struct DelayedAdvancePC {
@@ -292,7 +292,7 @@
   };
 
   // This method is called the by the opcode writers.
-  virtual void ImplicitlyAdvancePC() FINAL;
+  void ImplicitlyAdvancePC() final;
 
   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
       : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 7c800b3..9e23d11 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -737,7 +737,7 @@
  protected:
   AssemblerTest() {}
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     allocator_.reset(new ArenaAllocator(&pool_));
     assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
@@ -753,7 +753,7 @@
     SetUpHelpers();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
     allocator_.reset();
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index f5df926..e6130cf 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -259,19 +259,19 @@
 template <typename T, PointerSize kPointerSize>
 class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
  public:
-  void FinalizeCode() OVERRIDE {
+  void FinalizeCode() override {
     asm_.FinalizeCode();
   }
 
-  size_t CodeSize() const OVERRIDE {
+  size_t CodeSize() const override {
     return asm_.CodeSize();
   }
 
-  void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+  void FinalizeInstructions(const MemoryRegion& region) override {
     asm_.FinalizeInstructions(region);
   }
 
-  DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+  DebugFrameOpCodeWriterForAssembler& cfi() override {
     return asm_.cfi();
   }
 
@@ -299,7 +299,7 @@
   JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
   }
 
-  virtual ~JNIMacroLabelCommon() OVERRIDE {}
+  ~JNIMacroLabelCommon() override {}
 
  private:
   PlatformLabel label_;
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index b70c18b..067a595 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,7 +58,7 @@
  protected:
   JNIMacroAssemblerTest() {}
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     allocator_.reset(new ArenaAllocator(&pool_));
     assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
@@ -74,7 +74,7 @@
     SetUpHelpers();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
     allocator_.reset();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index af3d7a0..8a1e1df 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -263,7 +263,7 @@
   DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
 };
 
-class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
+class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
  public:
   using JNIBase = JNIMacroAssembler<PointerSize::k32>;
 
@@ -285,8 +285,8 @@
     cfi().DelayEmittingAdvancePCs();
   }
 
-  size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
-  size_t CodePosition() OVERRIDE;
+  size_t CodeSize() const override { return Assembler::CodeSize(); }
+  size_t CodePosition() override;
   DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
 
   virtual ~MipsAssembler() {
@@ -1143,10 +1143,10 @@
     }
   }
 
-  void Bind(Label* label) OVERRIDE {
+  void Bind(Label* label) override {
     Bind(down_cast<MipsLabel*>(label));
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
   }
 
@@ -1155,25 +1155,25 @@
   using JNIBase::Jump;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
   // Emit a conditional jump to the label by applying a unary condition test to the register.
   void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
             JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+            ManagedRegister test ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
 
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
@@ -1232,108 +1232,108 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister mscratch) OVERRIDE;
+                                ManagedRegister mscratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister msrc,
                      FrameOffset in_off,
-                     ManagedRegister mscratch) OVERRIDE;
+                     ManagedRegister mscratch) override;
 
   // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister mdest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister mscratch) OVERRIDE;
+                            ManagedRegister mscratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister mscratch) OVERRIDE;
+                          ManagedRegister mscratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -1342,34 +1342,34 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister mscratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
 
   // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit branches and finalize all instructions.
   void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index 0f85892..f9919f5 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -61,15 +61,15 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --no-warn -32 -march=mips32r5 -mmsa";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -78,15 +78,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r5";
   }
 
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -222,7 +222,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -234,23 +234,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 3d876ca..1ec7a6a 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -61,16 +61,16 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerCmdName() OVERRIDE {
+  std::string GetAssemblerCmdName() override {
     // We assemble and link for MIPS32R6. See GetAssemblerParameters() for details.
     return "gcc";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     // We assemble and link for MIPS32R6. The reason is that object files produced for MIPS32R6
     // (and MIPS64R6) with the GNU assembler don't have correct final offsets in PC-relative
     // branches in the .text section and so they require a relocation pass (there's a relocation
@@ -82,7 +82,7 @@
         " -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -91,15 +91,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r6";
   }
 
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -235,7 +235,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -247,23 +247,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index f94d074..9527fa6 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -55,19 +55,19 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --no-warn -32 -march=mips32r2";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r2";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -170,7 +170,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -181,19 +181,19 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 19f23b7..ce447db 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -414,7 +414,7 @@
   DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
 };
 
-class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Mips64Assembler final : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
  public:
   using JNIBase = JNIMacroAssembler<PointerSize::k64>;
 
@@ -439,7 +439,7 @@
     }
   }
 
-  size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+  size_t CodeSize() const override { return Assembler::CodeSize(); }
   DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
 
   // Emit Machine Instructions.
@@ -920,10 +920,10 @@
     }
   }
 
-  void Bind(Label* label) OVERRIDE {
+  void Bind(Label* label) override {
     Bind(down_cast<Mips64Label*>(label));
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
   }
 
@@ -934,25 +934,25 @@
   using JNIBase::Jump;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
   // Emit a conditional jump to the label by applying a unary condition test to the register.
   void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
             JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+            ManagedRegister test ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
 
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
@@ -1322,119 +1322,119 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister mscratch) OVERRIDE;
+                                ManagedRegister mscratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
-                     ManagedRegister mscratch) OVERRIDE;
+                     ManagedRegister mscratch) override;
 
   // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister mscratch) OVERRIDE;
+                            ManagedRegister mscratch) override;
 
   void CopyRawPtrToThread(ThreadOffset64 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister mscratch) OVERRIDE;
+                          ManagedRegister mscratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
   // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+                              ManagedRegister in_reg, bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
-                              mscratch, bool null_allowed) OVERRIDE;
+                              mscratch, bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
 
   // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit branches and finalize all instructions.
   void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index a53ff7c..4ceb356 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -63,16 +63,16 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips64";
   }
 
-  std::string GetAssemblerCmdName() OVERRIDE {
+  std::string GetAssemblerCmdName() override {
     // We assemble and link for MIPS64R6. See GetAssemblerParameters() for details.
     return "gcc";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     // We assemble and link for MIPS64R6. The reason is that object files produced for MIPS64R6
     // (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
     // branches in the .text section and so they require a relocation pass (there's a relocation
@@ -80,7 +80,7 @@
     return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -89,15 +89,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa64r6";
   }
 
-  mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
       registers_.push_back(new mips64::GpuRegister(mips64::AT));
@@ -233,7 +233,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -245,23 +245,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
+  std::vector<mips64::GpuRegister*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips64::FpuRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips64::FpuRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips64::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e42c4c9..5ac9236 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -306,7 +306,7 @@
   ArenaVector<int32_t> buffer_;
 };
 
-class X86Assembler FINAL : public Assembler {
+class X86Assembler final : public Assembler {
  public:
   explicit X86Assembler(ArenaAllocator* allocator)
       : Assembler(allocator), constant_area_(allocator) {}
@@ -758,8 +758,8 @@
   //
   int PreferredLoopAlignment() { return 16; }
   void Align(int alignment, int offset);
-  void Bind(Label* label) OVERRIDE;
-  void Jump(Label* label) OVERRIDE {
+  void Bind(Label* label) override;
+  void Jump(Label* label) override {
     jmp(label);
   }
   void Bind(NearLabel* label);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cd007b3..b03c40a 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -51,19 +51,19 @@
                         x86::Immediate> Base;
 
  protected:
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --32";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386 --no-show-raw-insn";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (addresses_singleton_.size() == 0) {
       // One addressing mode to test the repeat drivers.
       addresses_singleton_.push_back(x86::Address(x86::EAX, x86::EBX, x86::TIMES_1, 2));
@@ -118,25 +118,25 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
   }
 
-  std::vector<x86::Address> GetAddresses() OVERRIDE {
+  std::vector<x86::Address> GetAddresses() override {
     return addresses_;
   }
 
-  std::vector<x86::Register*> GetRegisters() OVERRIDE {
+  std::vector<x86::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<x86::XmmRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+  x86::Immediate CreateImmediate(int64_t imm_value) override {
     return x86::Immediate(imm_value);
   }
 
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index dd99f03..540d72b 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -25,10 +25,10 @@
 namespace x86 {
 
 // Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
+class X86ExceptionSlowPath final : public SlowPath {
  public:
   explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
-  virtual void Emit(Assembler *sp_asm) OVERRIDE;
+  void Emit(Assembler *sp_asm) override;
  private:
   const size_t stack_adjust_;
 };
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 99219d8..a701080 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -32,7 +32,7 @@
 
 class X86JNIMacroLabel;
 
-class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
  public:
   explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
   virtual ~X86JNIMacroAssembler() {}
@@ -45,130 +45,130 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
 
   // Copying routines
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
+      override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
+            ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
+            ManagedRegister scratch, size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current()
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
   // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+                              ManagedRegister in_reg, bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
-                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
+                              ManagedRegister scratch, bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset]
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
 };
 
-class X86JNIMacroLabel FINAL
+class X86JNIMacroLabel final
     : public JNIMacroLabelCommon<X86JNIMacroLabel,
                                  art::Label,
                                  InstructionSet::kX86> {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e4d72a7..e696635 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -351,7 +351,7 @@
 };
 
 
-class X86_64Assembler FINAL : public Assembler {
+class X86_64Assembler final : public Assembler {
  public:
   explicit X86_64Assembler(ArenaAllocator* allocator)
       : Assembler(allocator), constant_area_(allocator) {}
@@ -844,8 +844,8 @@
   //
   int PreferredLoopAlignment() { return 16; }
   void Align(int alignment, int offset);
-  void Bind(Label* label) OVERRIDE;
-  void Jump(Label* label) OVERRIDE {
+  void Bind(Label* label) override;
+  void Jump(Label* label) override {
     jmp(label);
   }
   void Bind(NearLabel* label);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0589df5..e1de1f1 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -145,15 +145,15 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86_64";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (addresses_singleton_.size() == 0) {
       // One addressing mode to test the repeat drivers.
       addresses_singleton_.push_back(
@@ -291,7 +291,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -301,29 +301,29 @@
     return addresses_;
   }
 
-  std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
+  std::vector<x86_64::CpuRegister*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<x86_64::XmmRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+  x86_64::Immediate CreateImmediate(int64_t imm_value) override {
     return x86_64::Immediate(imm_value);
   }
 
-  std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
 
-  std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(tertiary_register_names_.find(reg) != tertiary_register_names_.end());
     return tertiary_register_names_[reg];
   }
 
-  std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(quaternary_register_names_.find(reg) != quaternary_register_names_.end());
     return quaternary_register_names_[reg];
   }
@@ -2002,11 +2002,11 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86_64";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
   }
 
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index f6b2f9d..5924a8b 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -574,10 +574,10 @@
 }
 
 // Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
+class X86_64ExceptionSlowPath final : public SlowPath {
  public:
   explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
-  virtual void Emit(Assembler *sp_asm) OVERRIDE;
+  void Emit(Assembler *sp_asm) override;
  private:
   const size_t stack_adjust_;
 };
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d766ad4..465ebbe 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -31,7 +31,7 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assembler,
                                                                   PointerSize::k64> {
  public:
   explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
@@ -46,107 +46,107 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset  src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset  src) override;
 
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
 
   // Copying routines
   void Move(ManagedRegister dest, ManagedRegister src, size_t size);
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
+      override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current()
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -155,46 +155,46 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst
-  virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset]
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
 };
 
-class X86_64JNIMacroLabel FINAL
+class X86_64JNIMacroLabel final
     : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
                                  art::Label,
                                  InstructionSet::kX86_64> {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 5d76329..81932a9 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -47,11 +47,11 @@
       : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp),
         deps_(nullptr) {}
 
-  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {}
-  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
-  bool IsRelocationPossible() OVERRIDE { return false; }
+  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
+  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
+  bool IsRelocationPossible() override { return false; }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return deps_; }
+  verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
   void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
 
  private:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 29df067..9406c62 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -613,7 +613,7 @@
   bool shutting_down_;
 };
 
-class Dex2Oat FINAL {
+class Dex2Oat final {
  public:
   explicit Dex2Oat(TimingLogger* timings) :
       compiler_kind_(Compiler::kOptimizing),
@@ -1348,12 +1348,12 @@
         }
       }
     } else {
-      std::unique_ptr<File> oat_file(new File(oat_fd_, oat_location_, /* check_usage */ true));
-      if (oat_file == nullptr) {
+      std::unique_ptr<File> oat_file(
+          new File(DupCloexec(oat_fd_), oat_location_, /* check_usage */ true));
+      if (!oat_file->IsOpened()) {
         PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
         return false;
       }
-      oat_file->DisableAutoClose();
       if (oat_file->SetLength(0) != 0) {
         PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
         oat_file->Erase();
@@ -1385,12 +1385,12 @@
 
       DCHECK_NE(output_vdex_fd_, -1);
       std::string vdex_location = ReplaceFileExtension(oat_location_, "vdex");
-      std::unique_ptr<File> vdex_file(new File(output_vdex_fd_, vdex_location, /* check_usage */ true));
-      if (vdex_file == nullptr) {
+      std::unique_ptr<File> vdex_file(new File(
+          DupCloexec(output_vdex_fd_), vdex_location, /* check_usage */ true));
+      if (!vdex_file->IsOpened()) {
         PLOG(ERROR) << "Failed to create vdex file: " << vdex_location;
         return false;
       }
-      vdex_file->DisableAutoClose();
       if (input_vdex_file_ != nullptr && output_vdex_fd_ == input_vdex_fd_) {
         update_input_vdex_ = true;
       } else {
@@ -1472,10 +1472,7 @@
         PLOG(ERROR) << "Failed to create swap file: " << swap_file_name_;
         return false;
       }
-      swap_fd_ = swap_file->Fd();
-      swap_file->MarkUnchecked();     // We don't we to track this, it will be unlinked immediately.
-      swap_file->DisableAutoClose();  // We'll handle it ourselves, the File object will be
-                                      // released immediately.
+      swap_fd_ = swap_file->Release();
       unlink(swap_file_name_.c_str());
     }
 
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 4247e17..e047b4f 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -52,7 +52,7 @@
 
 class Dex2oatImageTest : public CommonRuntimeTest {
  public:
-  virtual void TearDown() OVERRIDE {}
+  void TearDown() override {}
 
  protected:
   // Visitors take method and type references
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2b96684..a1fed5f 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -55,7 +55,7 @@
 
 class Dex2oatTest : public Dex2oatEnvironmentTest {
  public:
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     Dex2oatEnvironmentTest::TearDown();
 
     output_ = "";
@@ -139,11 +139,11 @@
       std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                        odex_location.c_str(),
                                                        odex_location.c_str(),
-                                                       nullptr,
-                                                       nullptr,
-                                                       false,
-                                                       /*low_4gb*/false,
+                                                       /* requested_base */ nullptr,
+                                                       /* executable */ false,
+                                                       /* low_4gb */ false,
                                                        dex_location.c_str(),
+                                                       /* reservation */ nullptr,
                                                        &error_msg));
       ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
 
@@ -159,11 +159,11 @@
         std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                          odex_location.c_str(),
                                                          odex_location.c_str(),
-                                                         nullptr,
-                                                         nullptr,
-                                                         false,
-                                                         /*low_4gb*/false,
+                                                         /* requested_base */ nullptr,
+                                                         /* executable */ false,
+                                                         /* low_4gb */ false,
                                                          dex_location.c_str(),
+                                                         /* reservation */ nullptr,
                                                          &error_msg));
         ASSERT_TRUE(odex_file.get() == nullptr);
       }
@@ -349,7 +349,7 @@
 
 class Dex2oatSwapUseTest : public Dex2oatSwapTest {
  protected:
-  void CheckHostResult(bool expect_use) OVERRIDE {
+  void CheckHostResult(bool expect_use) override {
     if (!kIsTargetBuild) {
       if (expect_use) {
         EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
@@ -361,7 +361,7 @@
     }
   }
 
-  std::string GetTestDexFileName() OVERRIDE {
+  std::string GetTestDexFileName() override {
     // Use Statics as it has a handful of functions.
     return CommonRuntimeTest::GetTestDexFileName("Statics");
   }
@@ -474,7 +474,7 @@
 class Dex2oatVeryLargeTest : public Dex2oatTest {
  protected:
   void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
-                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
     // Ignore, we'll do our own checks.
   }
 
@@ -516,11 +516,11 @@
     std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     nullptr,
-                                                     nullptr,
-                                                     false,
-                                                     /*low_4gb*/false,
+                                                     /* requested_base */ nullptr,
+                                                     /* executable */ false,
+                                                     /* low_4gb */ false,
                                                      dex_location.c_str(),
+                                                     /* reservation */ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     EXPECT_GT(app_image_file.length(), 0u);
@@ -627,7 +627,7 @@
 class Dex2oatLayoutTest : public Dex2oatTest {
  protected:
   void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
-                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
     // Ignore, we'll do our own checks.
   }
 
@@ -787,11 +787,11 @@
     std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     nullptr,
-                                                     nullptr,
-                                                     false,
-                                                     /*low_4gb*/false,
+                                                     /* requested_base */ nullptr,
+                                                     /* executable */ false,
+                                                     /* low_4gb */ false,
                                                      dex_location.c_str(),
+                                                     /* reservation */ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
 
@@ -949,11 +949,11 @@
     std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                      odex_location.c_str(),
                                                      odex_location.c_str(),
-                                                     nullptr,
-                                                     nullptr,
-                                                     false,
-                                                     /*low_4gb*/false,
+                                                     /* requested_base */ nullptr,
+                                                     /* executable */ false,
+                                                     /* low_4gb */ false,
                                                      dex_location.c_str(),
+                                                     /* reservation */ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -1329,11 +1329,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex->GetLocation().c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1439,11 +1439,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1684,11 +1684,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    oat_filename.c_str(),
                                                    oat_filename.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    temp_dex.GetFilename().c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1705,7 +1705,7 @@
   // Create a multidex file with only one dex that gets rejected for cdex conversion.
   ScratchFile apk_file;
   {
-    FILE* file = fdopen(apk_file.GetFd(), "w+b");
+    FILE* file = fdopen(dup(apk_file.GetFd()), "w+b");
     ZipWriter writer(file);
     // Add vdex to zip.
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
@@ -1762,11 +1762,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1788,11 +1788,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1826,11 +1826,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/ false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr) << dex_location;
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1847,7 +1847,7 @@
     std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_location.c_str()));
     ASSERT_TRUE(vdex_file != nullptr);
     ASSERT_GT(vdex_file->GetLength(), 0u);
-    FILE* file = fdopen(dm_file.GetFd(), "w+b");
+    FILE* file = fdopen(dup(dm_file.GetFd()), "w+b");
     ZipWriter writer(file);
     auto write_all_bytes = [&](File* file) {
       std::unique_ptr<uint8_t[]> bytes(new uint8_t[file->GetLength()]);
@@ -1973,7 +1973,7 @@
 TEST_F(Dex2oatTest, CompactDexInvalidSource) {
   ScratchFile invalid_dex;
   {
-    FILE* file = fdopen(invalid_dex.GetFd(), "w+b");
+    FILE* file = fdopen(dup(invalid_dex.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kAlign32);
     DexFile::Header header = {};
@@ -2015,7 +2015,7 @@
   // Create a zip containing the invalid dex.
   ScratchFile invalid_dex_zip;
   {
-    FILE* file = fdopen(invalid_dex_zip.GetFd(), "w+b");
+    FILE* file = fdopen(dup(invalid_dex_zip.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
     ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
@@ -2065,11 +2065,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    odex_location.c_str(),
                                                    odex_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    odex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ImageHeader header = {};
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.h b/dex2oat/linker/arm/relative_patcher_arm_base.h
index f5a1395..0eb4417 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.h
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.h
@@ -31,10 +31,10 @@
  public:
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  protected:
   ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.h b/dex2oat/linker/arm/relative_patcher_thumb2.h
index 3a42928..dbf64a1 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.h
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.h
@@ -29,7 +29,7 @@
 
 namespace linker {
 
-class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Thumb2RelativePatcher final : public ArmBaseRelativePatcher {
  public:
   explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
                                  RelativePatcherTargetProvider* target_provider);
@@ -37,18 +37,18 @@
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 
  protected:
-  uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
-  uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+  uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+  uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
 
  private:
   void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.h b/dex2oat/linker/arm64/relative_patcher_arm64.h
index f7f673c..e95d0fe 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.h
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.h
@@ -28,7 +28,7 @@
 
 namespace linker {
 
-class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Arm64RelativePatcher final : public ArmBaseRelativePatcher {
  public:
   Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
                        RelativePatcherTargetProvider* target_provider,
@@ -36,24 +36,24 @@
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 
  protected:
-  uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
-  uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+  uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+  uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
 
  private:
   static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 4e7d636..852293b 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -94,35 +94,35 @@
 };
 
 template <typename ElfTypes>
-class ElfWriterQuick FINAL : public ElfWriter {
+class ElfWriterQuick final : public ElfWriter {
  public:
   ElfWriterQuick(const CompilerOptions& compiler_options,
                  File* elf_file);
   ~ElfWriterQuick();
 
-  void Start() OVERRIDE;
+  void Start() override;
   void PrepareDynamicSection(size_t rodata_size,
                              size_t text_size,
                              size_t data_bimg_rel_ro_size,
                              size_t bss_size,
                              size_t bss_methods_offset,
                              size_t bss_roots_offset,
-                             size_t dex_section_size) OVERRIDE;
-  void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
-  OutputStream* StartRoData() OVERRIDE;
-  void EndRoData(OutputStream* rodata) OVERRIDE;
-  OutputStream* StartText() OVERRIDE;
-  void EndText(OutputStream* text) OVERRIDE;
-  OutputStream* StartDataBimgRelRo() OVERRIDE;
-  void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
-  void WriteDynamicSection() OVERRIDE;
-  void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
-  bool StripDebugInfo() OVERRIDE;
-  bool End() OVERRIDE;
+                             size_t dex_section_size) override;
+  void PrepareDebugInfo(const debug::DebugInfo& debug_info) override;
+  OutputStream* StartRoData() override;
+  void EndRoData(OutputStream* rodata) override;
+  OutputStream* StartText() override;
+  void EndText(OutputStream* text) override;
+  OutputStream* StartDataBimgRelRo() override;
+  void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) override;
+  void WriteDynamicSection() override;
+  void WriteDebugInfo(const debug::DebugInfo& debug_info) override;
+  bool StripDebugInfo() override;
+  bool End() override;
 
-  virtual OutputStream* GetStream() OVERRIDE;
+  OutputStream* GetStream() override;
 
-  size_t GetLoadedSize() OVERRIDE;
+  size_t GetLoadedSize() override;
 
   static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
                                std::vector<uint8_t>* buffer);
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index b2be003..40495f3 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -14,9 +14,12 @@
  * limitations under the License.
  */
 
+#include <sys/mman.h>  // For the PROT_NONE constant.
+
 #include "elf_file.h"
 
 #include "base/file_utils.h"
+#include "base/mem_map.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "common_compiler_test.h"
@@ -65,8 +68,8 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              false,
-                                              false,
+                                              /* writable */ false,
+                                              /* program_header_only */ false,
                                               /*low_4gb*/false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
@@ -77,9 +80,9 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              false,
-                                              false,
-                                              /*low_4gb*/false,
+                                              /* writable */ false,
+                                              /* program_header_only */ false,
+                                              /* low_4gb */ false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
@@ -87,16 +90,28 @@
     EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true);
   }
   {
-    uint8_t* base = reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS);
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              false,
-                                              true,
-                                              /*low_4gb*/false,
-                                              &error_msg,
-                                              base));
+                                              /* writable */ false,
+                                              /* program_header_only */ true,
+                                              /* low_4gb */ false,
+                                              &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
-    CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg;
+    size_t size;
+    bool success = ef->GetLoadedSize(&size, &error_msg);
+    CHECK(success) << error_msg;
+    MemMap reservation = MemMap::MapAnonymous("ElfWriterTest#dlsym reservation",
+                                              /* addr */ nullptr,
+                                              RoundUp(size, kPageSize),
+                                              PROT_NONE,
+                                              /* low_4gb */ true,
+                                              &error_msg);
+    CHECK(reservation.IsValid()) << error_msg;
+    uint8_t* base = reservation.Begin();
+    success =
+        ef->Load(file.get(), /* executable */ false, /* low_4gb */ false, &reservation, &error_msg);
+    CHECK(success) << error_msg;
+    CHECK(!reservation.IsValid());
     EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatdata) + reinterpret_cast<uintptr_t>(base),
         reinterpret_cast<uintptr_t>(ef->FindDynamicSymbolAddress("oatdata")));
     EXPECT_EQ(reinterpret_cast<uintptr_t>(dl_oatexec) + reinterpret_cast<uintptr_t>(base),
@@ -116,9 +131,9 @@
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
-                                              false,
-                                              false,
-                                              /*low_4gb*/false,
+                                              /* writable */ false,
+                                              /* program_header_only */ false,
+                                              /* low_4gb */ false,
                                               &error_msg));
     CHECK(ef.get() != nullptr) << error_msg;
     EXPECT_TRUE(ef->HasSection(".note.gnu.build-id"));
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 440b3a4..d575420 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -83,7 +83,7 @@
                const std::string& extra_dex = "",
                const std::initializer_list<std::string>& image_classes = {});
 
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonCompilerTest::SetUpRuntimeOptions(options);
     QuickCompilerCallbacks* new_callbacks =
         new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage);
@@ -92,7 +92,7 @@
     options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
   }
 
-  std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+  std::unique_ptr<HashSet<std::string>> GetImageClasses() override {
     return std::make_unique<HashSet<std::string>>(image_classes_);
   }
 
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 67ded32..6a13454 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -755,7 +755,7 @@
 
 class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
  public:
-  bool operator()(ObjPtr<Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(Thread::Current());
     mirror::Class::ComputeName(hs.NewHandle(c));
     return true;
@@ -987,7 +987,7 @@
         classes_to_prune_(),
         defined_class_count_(0u) { }
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!image_writer_->KeepClass(klass.Ptr())) {
       classes_to_prune_.insert(klass.Ptr());
       if (klass->GetClassLoader() == class_loader_) {
@@ -1022,7 +1022,7 @@
   explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer)
       : image_writer_(image_writer), removed_class_count_(0) {}
 
-  virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
+  void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PruneClassesVisitor classes_visitor(image_writer_, class_loader);
     ClassTable* class_table =
@@ -1677,7 +1677,7 @@
 
   void VisitRoots(mirror::Object*** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots_->push_back(*roots[i]);
@@ -1686,7 +1686,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots_->push_back(roots[i]->AsMirrorPtr());
@@ -2104,14 +2104,14 @@
   void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
                   size_t count ATTRIBUTE_UNUSED,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(FATAL) << "Unsupported";
   }
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       // Copy the reference. Since we do not have the address for recording the relocation,
       // it needs to be recorded explicitly by the user of FixupRootVisitor.
@@ -2401,7 +2401,7 @@
   size_t oat_index_;
 };
 
-class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
+class ImageWriter::FixupClassVisitor final : public FixupVisitor {
  public:
   FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
       : FixupVisitor(image_writer, copy, oat_index) {}
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 7cf555b..e45023e 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -73,7 +73,7 @@
 namespace linker {
 
 // Write a Space built during compilation for use during execution.
-class ImageWriter FINAL {
+class ImageWriter final {
  public:
   ImageWriter(const CompilerOptions& compiler_options,
               uintptr_t image_begin,
diff --git a/dex2oat/linker/mips/relative_patcher_mips.h b/dex2oat/linker/mips/relative_patcher_mips.h
index d3a4c5a..4c385a3 100644
--- a/dex2oat/linker/mips/relative_patcher_mips.h
+++ b/dex2oat/linker/mips/relative_patcher_mips.h
@@ -23,28 +23,28 @@
 namespace art {
 namespace linker {
 
-class MipsRelativePatcher FINAL : public RelativePatcher {
+class MipsRelativePatcher final : public RelativePatcher {
  public:
   explicit MipsRelativePatcher(const MipsInstructionSetFeatures* features)
       : is_r6(features->IsR6()) {}
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  private:
   bool is_r6;
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.h b/dex2oat/linker/mips64/relative_patcher_mips64.h
index 9f5a125..7b7c2cc 100644
--- a/dex2oat/linker/mips64/relative_patcher_mips64.h
+++ b/dex2oat/linker/mips64/relative_patcher_mips64.h
@@ -22,27 +22,27 @@
 namespace art {
 namespace linker {
 
-class Mips64RelativePatcher FINAL : public RelativePatcher {
+class Mips64RelativePatcher final : public RelativePatcher {
  public:
   Mips64RelativePatcher() {}
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index 60fcfe8..9b47a0d 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -35,7 +35,7 @@
 // any number of oat files. It provides storage for method code offsets
 // and wraps RelativePatcher calls, adjusting relative offsets according
 // to the value set by SetAdjustment().
-class MultiOatRelativePatcher FINAL {
+class MultiOatRelativePatcher final {
  public:
   using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator;
 
@@ -139,7 +139,7 @@
 
     void GetThunkCode(const LinkerPatch& patch,
                       /*out*/ ArrayRef<const uint8_t>* code,
-                      /*out*/ std::string* debug_name) OVERRIDE;
+                      /*out*/ std::string* debug_name) override;
 
    private:
     CompiledMethodStorage* storage_;
@@ -149,7 +149,7 @@
   // Wrap the map in a class implementing RelativePatcherTargetProvider.
   class MethodOffsetMap : public RelativePatcherTargetProvider {
    public:
-    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
+    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override;
     SafeMap<MethodReference, uint32_t> map;
   };
 
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index 05fe36a..a5831b6 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -35,7 +35,7 @@
 
     uint32_t ReserveSpace(uint32_t offset,
                           const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-                          MethodReference method_ref) OVERRIDE {
+                          MethodReference method_ref) override {
       last_reserve_offset_ = offset;
       last_reserve_method_ = method_ref;
       offset += next_reserve_adjustment_;
@@ -43,7 +43,7 @@
       return offset;
     }
 
-    uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+    uint32_t ReserveSpaceEnd(uint32_t offset) override {
       last_reserve_offset_ = offset;
       last_reserve_method_ = kNullMethodRef;
       offset += next_reserve_adjustment_;
@@ -51,7 +51,7 @@
       return offset;
     }
 
-    uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+    uint32_t WriteThunks(OutputStream* out, uint32_t offset) override {
       last_write_offset_ = offset;
       if (next_write_alignment_ != 0u) {
         offset += next_write_alignment_;
@@ -79,7 +79,7 @@
     void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                    uint32_t literal_offset,
                    uint32_t patch_offset,
-                   uint32_t target_offset) OVERRIDE {
+                   uint32_t target_offset) override {
       last_literal_offset_ = literal_offset;
       last_patch_offset_ = patch_offset;
       last_target_offset_ = target_offset;
@@ -88,7 +88,7 @@
     void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                   const LinkerPatch& patch,
                                   uint32_t patch_offset,
-                                  uint32_t target_offset) OVERRIDE {
+                                  uint32_t target_offset) override {
       last_literal_offset_ = patch.LiteralOffset();
       last_patch_offset_ = patch_offset;
       last_target_offset_ = target_offset;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 9045c43..e8f57f5 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -103,16 +103,16 @@
   ChecksumUpdatingOutputStream(OutputStream* out, OatHeader* oat_header)
       : OutputStream(out->GetLocation()), out_(out), oat_header_(oat_header) { }
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     oat_header_->UpdateChecksum(buffer, byte_count);
     return out_->WriteFully(buffer, byte_count);
   }
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE {
+  off_t Seek(off_t offset, Whence whence) override {
     return out_->Seek(offset, whence);
   }
 
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     return out_->Flush();
   }
 
@@ -826,7 +826,7 @@
         oat_class_index_(0u),
         method_offsets_index_(0u) {}
 
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
     DexMethodVisitor::StartClass(dex_file, class_def_index);
     if (kIsDebugBuild && writer_->MayHaveCompiledMethods()) {
       // There are no oat classes if there aren't any compiled methods.
@@ -836,7 +836,7 @@
     return true;
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     ++oat_class_index_;
     return DexMethodVisitor::EndClass();
   }
@@ -862,7 +862,7 @@
       : DexMethodVisitor(writer, /* offset */ 0u) {}
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassAccessor::Method& method) OVERRIDE {
+                   const ClassAccessor::Method& method) override {
     // Look for patches with .bss references and prepare maps with placeholders for their offsets.
     CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
         MethodReference(dex_file_, method.GetIndex()));
@@ -936,7 +936,7 @@
     DCHECK(num_classes == 0u || IsAligned<4u>(offset));
   }
 
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
     DexMethodVisitor::StartClass(dex_file, class_def_index);
     compiled_methods_.clear();
     compiled_methods_with_code_ = 0u;
@@ -944,7 +944,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassAccessor::Method& method) OVERRIDE {
+                   const ClassAccessor::Method& method) override {
     // Fill in the compiled_methods_ array for methods that have a
     // CompiledMethod. We track the number of non-null entries in
     // compiled_methods_with_code_ since we only want to allocate
@@ -959,7 +959,7 @@
     return true;
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     ClassReference class_ref(dex_file_, class_def_index_);
     ClassStatus status;
     bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
@@ -1145,14 +1145,14 @@
       : OatDexMethodVisitor(writer, offset) {
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     OatDexMethodVisitor::EndClass();
     return true;
   }
 
   bool VisitMethod(size_t class_def_method_index,
                    const ClassAccessor::Method& method)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_)  {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
 
@@ -1248,7 +1248,7 @@
                                              std::move(ordered_methods)) {
   }
 
-  virtual bool VisitComplete() OVERRIDE {
+  bool VisitComplete() override {
     offset_ = writer_->relative_patcher_->ReserveSpaceEnd(offset_);
     if (generate_debug_info_) {
       std::vector<debug::MethodDebugInfo> thunk_infos =
@@ -1260,8 +1260,7 @@
     return true;
   }
 
-  virtual bool VisitMethod(const OrderedMethodData& method_data)
-      OVERRIDE
+  bool VisitMethod(const OrderedMethodData& method_data) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = method_data.oat_class;
     CompiledMethod* compiled_method = method_data.compiled_method;
@@ -1445,7 +1444,7 @@
 
   bool VisitMethod(size_t class_def_method_index,
                    const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -1495,7 +1494,7 @@
   // in the same oat file. If the origin and the copied methods are
   // in different oat files don't touch the copied method.
   // References to other oat files are not supported yet.
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     OatDexMethodVisitor::StartClass(dex_file, class_def_index);
     // Skip classes that are not in the image.
@@ -1533,7 +1532,7 @@
     return true;
   }
 
-  bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
+  bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Skip methods that are not in the image.
     if (!IsImageClass()) {
@@ -1652,7 +1651,7 @@
     }
   }
 
-  virtual bool VisitStart() OVERRIDE {
+  bool VisitStart() override {
     return true;
   }
 
@@ -1681,7 +1680,7 @@
     return true;
   }
 
-  virtual bool VisitMethod(const OrderedMethodData& method_data) OVERRIDE
+  bool VisitMethod(const OrderedMethodData& method_data) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const MethodReference& method_ref = method_data.method_reference;
     UpdateDexFileAndDexCache(method_ref.dex_file);
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 0264b09..bd09f23 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -406,11 +406,11 @@
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                   tmp_oat.GetFilename(),
                                                   tmp_oat.GetFilename(),
-                                                  nullptr,
-                                                  nullptr,
-                                                  false,
-                                                  /*low_4gb*/true,
-                                                  nullptr,
+                                                  /* requested_base */ nullptr,
+                                                  /* executable */ false,
+                                                  /* low_4gb */ true,
+                                                  /* abs_dex_location */ nullptr,
+                                                  /* reservation */ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
   const OatHeader& oat_header = oat_file->GetOatHeader();
@@ -529,11 +529,11 @@
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                   tmp_oat.GetFilename(),
                                                   tmp_oat.GetFilename(),
-                                                  nullptr,
-                                                  nullptr,
-                                                  false,
-                                                  /*low_4gb*/false,
-                                                  nullptr,
+                                                  /* requested_base */ nullptr,
+                                                  /* executable */ false,
+                                                  /* low_4gb */ false,
+                                                  /* abs_dex_location */ nullptr,
+                                                  /* reservation */ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file != nullptr);
   EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -607,11 +607,11 @@
   std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
                                                          tmp_oat.GetFilename(),
                                                          tmp_oat.GetFilename(),
-                                                         nullptr,
-                                                         nullptr,
-                                                         false,
+                                                         /* requested_base */ nullptr,
+                                                         /* executable */ false,
                                                          low_4gb,
-                                                         nullptr,
+                                                         /* abs_dex_location */ nullptr,
+                                                         /* reservation */ nullptr,
                                                          &error_msg));
   ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
   if (low_4gb) {
@@ -738,11 +738,11 @@
       std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
                                                              tmp_oat.GetFilename(),
                                                              tmp_oat.GetFilename(),
-                                                             nullptr,
-                                                             nullptr,
-                                                             false,
-                                                             /*low_4gb*/false,
-                                                             nullptr,
+                                                             /* requested_base */ nullptr,
+                                                             /* executable */ false,
+                                                             /* low_4gb */ false,
+                                                             /* abs_dex_location */ nullptr,
+                                                             /* reservation */ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -788,11 +788,11 @@
       std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
                                                              tmp_oat.GetFilename(),
                                                              tmp_oat.GetFilename(),
-                                                             nullptr,
-                                                             nullptr,
-                                                             false,
-                                                             /*low_4gb*/false,
-                                                             nullptr,
+                                                             /* requested_base */ nullptr,
+                                                             /* executable */ false,
+                                                             /* low_4gb */ false,
+                                                             /* abs_dex_location */ nullptr,
+                                                             /* reservation */ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index b6135c9..564cf30 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -45,35 +45,35 @@
     const InstructionSetFeatures* features,
     RelativePatcherThunkProvider* thunk_provider,
     RelativePatcherTargetProvider* target_provider) {
-  class RelativePatcherNone FINAL : public RelativePatcher {
+  class RelativePatcherNone final : public RelativePatcher {
    public:
     RelativePatcherNone() { }
 
     uint32_t ReserveSpace(uint32_t offset,
                           const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-                          MethodReference method_ref ATTRIBUTE_UNUSED) OVERRIDE {
+                          MethodReference method_ref ATTRIBUTE_UNUSED) override {
       return offset;  // No space reserved; no patches expected.
     }
 
-    uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+    uint32_t ReserveSpaceEnd(uint32_t offset) override {
       return offset;  // No space reserved; no patches expected.
     }
 
-    uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
+    uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) override {
       return offset;  // No thunks added; no patches expected.
     }
 
     void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                    uint32_t literal_offset ATTRIBUTE_UNUSED,
                    uint32_t patch_offset ATTRIBUTE_UNUSED,
-                   uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+                   uint32_t target_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "Unexpected relative call patch.";
     }
 
     void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                   const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                   uint32_t patch_offset ATTRIBUTE_UNUSED,
-                                  uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+                                  uint32_t target_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "Unexpected relative dex cache array patch.";
     }
 
@@ -84,7 +84,7 @@
     }
 
     std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
-        uint32_t executable_offset ATTRIBUTE_UNUSED) OVERRIDE {
+        uint32_t executable_offset ATTRIBUTE_UNUSED) override {
       return std::vector<debug::MethodDebugInfo>();  // No thunks added.
     }
 
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 9556c5f..9725570 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -57,7 +57,7 @@
     patched_code_.reserve(16 * KB);
   }
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     OverrideInstructionSetFeatures(instruction_set_, variant_);
     CommonCompilerTest::SetUp();
 
@@ -67,7 +67,7 @@
                                        &method_offset_map_);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     compiled_methods_.clear();
     patcher_.reset();
     CommonCompilerTest::TearDown();
@@ -260,7 +260,7 @@
 
     void GetThunkCode(const LinkerPatch& patch,
                       /*out*/ ArrayRef<const uint8_t>* code,
-                      /*out*/ std::string* debug_name) OVERRIDE {
+                      /*out*/ std::string* debug_name) override {
       auto it = thunk_map_.find(ThunkKey(patch));
       CHECK(it != thunk_map_.end());
       const ThunkValue& value = it->second;
@@ -316,9 +316,9 @@
 
   // Map method reference to assinged offset.
   // Wrap the map in a class implementing RelativePatcherTargetProvider.
-  class MethodOffsetMap FINAL : public RelativePatcherTargetProvider {
+  class MethodOffsetMap final : public RelativePatcherTargetProvider {
    public:
-    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE {
+    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override {
       auto it = map.find(ref);
       if (it == map.end()) {
         return std::pair<bool, uint32_t>(false, 0u);
diff --git a/dex2oat/linker/x86/relative_patcher_x86.h b/dex2oat/linker/x86/relative_patcher_x86.h
index e723580..3da62fb 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.h
+++ b/dex2oat/linker/x86/relative_patcher_x86.h
@@ -22,17 +22,17 @@
 namespace art {
 namespace linker {
 
-class X86RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86RelativePatcher final : public X86BaseRelativePatcher {
  public:
   X86RelativePatcher() { }
 
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 };
 
 }  // namespace linker
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.h b/dex2oat/linker/x86/relative_patcher_x86_base.h
index 4cc7b07..a1925e0 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.h
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.h
@@ -26,14 +26,14 @@
  public:
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                 uint32_t target_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  protected:
   X86BaseRelativePatcher() { }
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.h b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
index a31e1eb..a82fef3 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.h
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
@@ -22,17 +22,17 @@
 namespace art {
 namespace linker {
 
-class X86_64RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86_64RelativePatcher final : public X86BaseRelativePatcher {
  public:
   X86_64RelativePatcher() { }
 
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 };
 
 }  // namespace linker
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index f8274e2..e9b6402 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -123,8 +123,7 @@
 /*
  * Converts a type descriptor to human-readable "dotted" form.  For
  * example, "Ljava/lang/String;" becomes "java.lang.String", and
- * "[I" becomes "int[]".  Also converts '$' to '.', which means this
- * form can't be converted back to a descriptor.
+ * "[I" becomes "int[]".
  */
 static std::unique_ptr<char[]> descriptorToDot(const char* str) {
   int targetLen = strlen(str);
@@ -157,7 +156,7 @@
   int i = 0;
   for (; i < targetLen; i++) {
     const char ch = str[offset + i];
-    newStr[i] = (ch == '/' || ch == '$') ? '.' : ch;
+    newStr[i] = (ch == '/') ? '.' : ch;
   }  // for
 
   // Add the appropriate number of brackets for arrays.
@@ -171,10 +170,9 @@
 }
 
 /*
- * Converts the class name portion of a type descriptor to human-readable
- * "dotted" form. For example, "Ljava/lang/String;" becomes "String".
+ * Retrieves the class name portion of a type descriptor.
  */
-static std::unique_ptr<char[]> descriptorClassToDot(const char* str) {
+static std::unique_ptr<char[]> descriptorClassToName(const char* str) {
   // Reduce to just the class name prefix.
   const char* lastSlash = strrchr(str, '/');
   if (lastSlash == nullptr) {
@@ -187,8 +185,7 @@
   const int targetLen = strlen(lastSlash);
   std::unique_ptr<char[]> newStr(new char[targetLen]);
   for (int i = 0; i < targetLen - 1; i++) {
-    const char ch = lastSlash[i];
-    newStr[i] = ch == '$' ? '.' : ch;
+    newStr[i] = lastSlash[i];
   }  // for
   newStr[targetLen - 1] = '\0';
   return newStr;
@@ -1250,7 +1247,7 @@
 
     // Method name and prototype.
     if (constructor) {
-      std::unique_ptr<char[]> dot(descriptorClassToDot(backDescriptor));
+      std::unique_ptr<char[]> dot(descriptorClassToName(backDescriptor));
       fprintf(gOutFile, "<constructor name=\"%s\"\n", dot.get());
       dot = descriptorToDot(backDescriptor);
       fprintf(gOutFile, " type=\"%s\"\n", dot.get());
@@ -1469,7 +1466,7 @@
     }
     fprintf(gOutFile, "  Interfaces        -\n");
   } else {
-    std::unique_ptr<char[]> dot(descriptorClassToDot(classDescriptor));
+    std::unique_ptr<char[]> dot(descriptorClassToName(classDescriptor));
     fprintf(gOutFile, "<class name=\"%s\"\n", dot.get());
     if (superclassDescriptor != nullptr) {
       dot = descriptorToDot(superclassDescriptor);
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index e7d5ed9..c81d0c7 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -112,15 +112,15 @@
  public:
   class Container : public DexContainer {
    public:
-    Section* GetMainSection() OVERRIDE {
+    Section* GetMainSection() override {
       return &main_section_;
     }
 
-    Section* GetDataSection() OVERRIDE {
+    Section* GetDataSection() override {
       return &data_section_;
     }
 
-    bool IsCompactDexContainer() const OVERRIDE {
+    bool IsCompactDexContainer() const override {
       return true;
     }
 
@@ -139,21 +139,21 @@
   // Return true if we can generate compact dex for the IR.
   bool CanGenerateCompactDex(std::string* error_msg);
 
-  bool Write(DexContainer* output, std::string* error_msg) OVERRIDE;
+  bool Write(DexContainer* output, std::string* error_msg) override;
 
-  std::unique_ptr<DexContainer> CreateDexContainer() const OVERRIDE;
+  std::unique_ptr<DexContainer> CreateDexContainer() const override;
 
-  void WriteHeader(Stream* stream) OVERRIDE;
+  void WriteHeader(Stream* stream) override;
 
-  size_t GetHeaderSize() const OVERRIDE;
+  size_t GetHeaderSize() const override;
 
   uint32_t WriteDebugInfoOffsetTable(Stream* stream);
 
-  void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) OVERRIDE;
+  void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) override;
 
-  void WriteStringData(Stream* stream, dex_ir::StringData* string_data) OVERRIDE;
+  void WriteStringData(Stream* stream, dex_ir::StringData* string_data) override;
 
-  void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) OVERRIDE;
+  void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) override;
 
   void SortDebugInfosByMethodIndex();
 
diff --git a/dexlayout/dex_container.h b/dexlayout/dex_container.h
index 2b9a5f9..2d742b0 100644
--- a/dexlayout/dex_container.h
+++ b/dexlayout/dex_container.h
@@ -57,19 +57,19 @@
    public:
     virtual ~VectorSection() {}
 
-    uint8_t* Begin() OVERRIDE {
+    uint8_t* Begin() override {
       return &data_[0];
     }
 
-    size_t Size() const OVERRIDE {
+    size_t Size() const override {
       return data_.size();
     }
 
-    void Resize(size_t size) OVERRIDE {
+    void Resize(size_t size) override {
       data_.resize(size, 0u);
     }
 
-    void Clear() OVERRIDE {
+    void Clear() override {
       data_.clear();
     }
 
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 8f853ea..178a4d4 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -233,7 +233,7 @@
     // Preallocate so that assignment does not invalidate pointers into the vector.
     collection_.reserve(size);
   }
-  virtual ~CollectionVector() OVERRIDE { }
+  ~CollectionVector() override { }
 
   template<class... Args>
   T* CreateAndAddItem(Args&&... args) {
@@ -242,7 +242,7 @@
     return object;
   }
 
-  virtual uint32_t Size() const OVERRIDE { return collection_.size(); }
+  uint32_t Size() const override { return collection_.size(); }
 
   Iterator<ElementType> begin() const { return Iterator<ElementType>(collection_, 0U, Size()); }
   Iterator<ElementType> end() const { return Iterator<ElementType>(collection_, Size(), Size()); }
@@ -406,7 +406,7 @@
                       data_size,
                       data_offset);
   }
-  ~Header() OVERRIDE { }
+  ~Header() override { }
 
   static size_t ItemSize() { return kHeaderItemSize; }
 
@@ -590,7 +590,7 @@
   explicit StringId(StringData* string_data) : string_data_(string_data) {
     size_ = kStringIdItemSize;
   }
-  ~StringId() OVERRIDE { }
+  ~StringId() override { }
 
   static size_t ItemSize() { return kStringIdItemSize; }
 
@@ -608,7 +608,7 @@
 class TypeId : public IndexedItem {
  public:
   explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; }
-  ~TypeId() OVERRIDE { }
+  ~TypeId() override { }
 
   static size_t ItemSize() { return kTypeIdItemSize; }
 
@@ -629,7 +629,7 @@
   explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) {
     size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t));
   }
-  ~TypeList() OVERRIDE { }
+  ~TypeList() override { }
 
   const TypeIdVector* GetTypeList() const { return type_list_.get(); }
 
@@ -644,7 +644,7 @@
   ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters)
       : shorty_(shorty), return_type_(return_type), parameters_(parameters)
       { size_ = kProtoIdItemSize; }
-  ~ProtoId() OVERRIDE { }
+  ~ProtoId() override { }
 
   static size_t ItemSize() { return kProtoIdItemSize; }
 
@@ -666,7 +666,7 @@
  public:
   FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
       : class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; }
-  ~FieldId() OVERRIDE { }
+  ~FieldId() override { }
 
   static size_t ItemSize() { return kFieldIdItemSize; }
 
@@ -688,7 +688,7 @@
  public:
   MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
       : class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; }
-  ~MethodId() OVERRIDE { }
+  ~MethodId() override { }
 
   static size_t ItemSize() { return kMethodIdItemSize; }
 
@@ -710,7 +710,7 @@
  public:
   FieldItem(uint32_t access_flags, const FieldId* field_id)
       : access_flags_(access_flags), field_id_(field_id) { }
-  ~FieldItem() OVERRIDE { }
+  ~FieldItem() override { }
 
   FieldItem(FieldItem&&) = default;
 
@@ -732,7 +732,7 @@
  public:
   MethodItem(uint32_t access_flags, const MethodId* method_id, CodeItem* code)
       : access_flags_(access_flags), method_id_(method_id), code_(code) { }
-  ~MethodItem() OVERRIDE { }
+  ~MethodItem() override { }
 
   MethodItem(MethodItem&&) = default;
 
@@ -876,7 +876,7 @@
         direct_methods_(direct_methods),
         virtual_methods_(virtual_methods) { }
 
-  ~ClassData() OVERRIDE = default;
+  ~ClassData() override = default;
   FieldItemVector* StaticFields() { return static_fields_.get(); }
   FieldItemVector* InstanceFields() { return instance_fields_.get(); }
   MethodItemVector* DirectMethods() { return direct_methods_.get(); }
@@ -912,7 +912,7 @@
         class_data_(class_data),
         static_values_(static_values) { size_ = kClassDefItemSize; }
 
-  ~ClassDef() OVERRIDE { }
+  ~ClassDef() override { }
 
   static size_t ItemSize() { return kClassDefItemSize; }
 
@@ -980,7 +980,7 @@
  public:
   TryItem(uint32_t start_addr, uint16_t insn_count, const CatchHandler* handlers)
       : start_addr_(start_addr), insn_count_(insn_count), handlers_(handlers) { }
-  ~TryItem() OVERRIDE { }
+  ~TryItem() override { }
 
   uint32_t StartAddr() const { return start_addr_; }
   uint16_t InsnCount() const { return insn_count_; }
@@ -1042,7 +1042,7 @@
         tries_(tries),
         handlers_(handlers) { }
 
-  ~CodeItem() OVERRIDE { }
+  ~CodeItem() override { }
 
   uint16_t RegistersSize() const { return registers_size_; }
   uint16_t InsSize() const { return ins_size_; }
@@ -1115,7 +1115,7 @@
   explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) {
     size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
   }
-  ~AnnotationSetItem() OVERRIDE { }
+  ~AnnotationSetItem() override { }
 
   std::vector<AnnotationItem*>* GetItems() { return items_.get(); }
 
@@ -1132,7 +1132,7 @@
   explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) {
     size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
   }
-  ~AnnotationSetRefList() OVERRIDE { }
+  ~AnnotationSetRefList() override { }
 
   std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); }
 
@@ -1227,7 +1227,7 @@
   explicit CallSiteId(EncodedArrayItem* call_site_item) : call_site_item_(call_site_item) {
     size_ = kCallSiteIdItemSize;
   }
-  ~CallSiteId() OVERRIDE { }
+  ~CallSiteId() override { }
 
   static size_t ItemSize() { return kCallSiteIdItemSize; }
 
@@ -1248,7 +1248,7 @@
         field_or_method_id_(field_or_method_id) {
     size_ = kMethodHandleItemSize;
   }
-  ~MethodHandleItem() OVERRIDE { }
+  ~MethodHandleItem() override { }
 
   static size_t ItemSize() { return kMethodHandleItemSize; }
 
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a83a46b..ca6ff9e 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -92,7 +92,7 @@
 template<class T> class CollectionMap : public CollectionBase {
  public:
   CollectionMap() = default;
-  virtual ~CollectionMap() OVERRIDE { }
+  ~CollectionMap() override { }
 
   template <class... Args>
   T* CreateAndAddItem(CollectionVector<T>& vector,
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index db1898b..dd2ebad 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -192,15 +192,15 @@
 
   class Container : public DexContainer {
    public:
-    Section* GetMainSection() OVERRIDE {
+    Section* GetMainSection() override {
       return &main_section_;
     }
 
-    Section* GetDataSection() OVERRIDE {
+    Section* GetDataSection() override {
       return &data_section_;
     }
 
-    bool IsCompactDexContainer() const OVERRIDE {
+    bool IsCompactDexContainer() const override {
       return false;
     }
 
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 53145c2..60dd7e4 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -71,11 +71,11 @@
     std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
                                                oat_location.c_str(),
                                                oat_location.c_str(),
-                                               nullptr,
-                                               nullptr,
-                                               false,
-                                               /*low_4gb*/false,
-                                               nullptr,
+                                               /* requested_base */ nullptr,
+                                               /* executable */ false,
+                                               /* low_4gb */ false,
+                                               /* abs_dex_location */ nullptr,
+                                               /* reservation */ nullptr,
                                                &error_msg));
     EXPECT_TRUE(oat != nullptr) << error_msg;
     return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index d6dd9d1..52d355b 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -71,26 +71,10 @@
 }
 
 /*
- * Converts a type descriptor to human-readable "dotted" form.  For
- * example, "Ljava/lang/String;" becomes "java.lang.String", and
- * "[I" becomes "int[]".  Also converts '$' to '.', which means this
- * form can't be converted back to a descriptor.
- */
-static std::string DescriptorToDotWrapper(const char* descriptor) {
-  std::string result = DescriptorToDot(descriptor);
-  size_t found = result.find('$');
-  while (found != std::string::npos) {
-    result[found] = '.';
-    found = result.find('$', found);
-  }
-  return result;
-}
-
-/*
  * Converts the class name portion of a type descriptor to human-readable
  * "dotted" form. For example, "Ljava/lang/String;" becomes "String".
  */
-static std::string DescriptorClassToDot(const char* str) {
+static std::string DescriptorClassToName(const char* str) {
   std::string descriptor(str);
   // Reduce to just the class name prefix.
   size_t last_slash = descriptor.rfind('/');
@@ -104,13 +88,6 @@
   size_t size = descriptor.size() - 1 - last_slash;
   std::string result(descriptor.substr(last_slash, size));
 
-  // Replace '$' with '.'.
-  size_t dollar_sign = result.find('$');
-  while (dollar_sign != std::string::npos) {
-    result[dollar_sign] = '.';
-    dollar_sign = result.find('$', dollar_sign);
-  }
-
   return result;
 }
 
@@ -786,7 +763,7 @@
   if (options_.output_format_ == kOutputPlain) {
     fprintf(out_file_, "    #%d              : '%s'\n", i, interface_name);
   } else {
-    std::string dot(DescriptorToDotWrapper(interface_name));
+    std::string dot(DescriptorToDot(interface_name));
     fprintf(out_file_, "<implements name=\"%s\">\n</implements>\n", dot.c_str());
   }
 }
@@ -1044,7 +1021,7 @@
   const char* back_descriptor = method_id->Class()->GetStringId()->Data();
 
   // Generate header.
-  std::string dot(DescriptorToDotWrapper(back_descriptor));
+  std::string dot(DescriptorToDot(back_descriptor));
   fprintf(out_file_, "%06x:                                        |[%06x] %s.%s:%s\n",
           code_offset, code_offset, dot.c_str(), name, type_descriptor.c_str());
 
@@ -1212,9 +1189,9 @@
 
     // Method name and prototype.
     if (constructor) {
-      std::string dot(DescriptorClassToDot(back_descriptor));
+      std::string dot(DescriptorClassToName(back_descriptor));
       fprintf(out_file_, "<constructor name=\"%s\"\n", dot.c_str());
-      dot = DescriptorToDotWrapper(back_descriptor);
+      dot = DescriptorToDot(back_descriptor);
       fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
     } else {
       fprintf(out_file_, "<method name=\"%s\"\n", name);
@@ -1223,7 +1200,7 @@
         LOG(ERROR) << "bad method type descriptor '" << type_descriptor << "'";
         goto bail;
       }
-      std::string dot(DescriptorToDotWrapper(return_type + 1));
+      std::string dot(DescriptorToDot(return_type + 1));
       fprintf(out_file_, " return=\"%s\"\n", dot.c_str());
       fprintf(out_file_, " abstract=%s\n", QuotedBool((flags & kAccAbstract) != 0));
       fprintf(out_file_, " native=%s\n", QuotedBool((flags & kAccNative) != 0));
@@ -1265,7 +1242,7 @@
       }
       // Null terminate and display.
       *cp++ = '\0';
-      std::string dot(DescriptorToDotWrapper(tmp_buf));
+      std::string dot(DescriptorToDot(tmp_buf));
       fprintf(out_file_, "<parameter name=\"arg%d\" type=\"%s\">\n"
                         "</parameter>\n", arg_num++, dot.c_str());
     }  // while
@@ -1309,7 +1286,7 @@
     }
   } else if (options_.output_format_ == kOutputXml) {
     fprintf(out_file_, "<field name=\"%s\"\n", name);
-    std::string dot(DescriptorToDotWrapper(type_descriptor));
+    std::string dot(DescriptorToDot(type_descriptor));
     fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
     fprintf(out_file_, " transient=%s\n", QuotedBool((flags & kAccTransient) != 0));
     fprintf(out_file_, " volatile=%s\n", QuotedBool((flags & kAccVolatile) != 0));
@@ -1415,10 +1392,10 @@
     }
     fprintf(out_file_, "  Interfaces        -\n");
   } else {
-    std::string dot(DescriptorClassToDot(class_descriptor));
+    std::string dot(DescriptorClassToName(class_descriptor));
     fprintf(out_file_, "<class name=\"%s\"\n", dot.c_str());
     if (superclass_descriptor != nullptr) {
-      dot = DescriptorToDotWrapper(superclass_descriptor);
+      dot = DescriptorToDot(superclass_descriptor);
       fprintf(out_file_, " extends=\"%s\"\n", dot.c_str());
     }
     fprintf(out_file_, " interface=%s\n",
@@ -1817,14 +1794,20 @@
   // If options_.output_dex_directory_ is non null, we are outputting to a file.
   if (options_.output_dex_directory_ != nullptr) {
     std::string output_location(options_.output_dex_directory_);
-    size_t last_slash = dex_file_location.rfind('/');
+    const size_t last_slash = dex_file_location.rfind('/');
     std::string dex_file_directory = dex_file_location.substr(0, last_slash + 1);
     if (output_location == dex_file_directory) {
       output_location = dex_file_location + ".new";
-    } else if (last_slash != std::string::npos) {
-      output_location += dex_file_location.substr(last_slash);
     } else {
-      output_location += "/" + dex_file_location + ".new";
+      if (!output_location.empty() && output_location.back() != '/') {
+        output_location += "/";
+      }
+      const size_t separator = dex_file_location.rfind('!');
+      if (separator != std::string::npos) {
+        output_location += dex_file_location.substr(separator + 1);
+      } else {
+        output_location += "classes.dex";
+      }
     }
     new_file.reset(OS::CreateEmptyFile(output_location.c_str()));
     if (new_file == nullptr) {
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 71e56d19..9f73347 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -60,6 +60,7 @@
   LOG(ERROR) << " -p : profile file name (defaults to no profile)";
   LOG(ERROR) << " -s : visualize reference pattern";
   LOG(ERROR) << " -t : display file section sizes";
+  LOG(ERROR) << " -u : update dex checksums";
   LOG(ERROR) << " -v : verify output file is canonical to input (IR level comparison)";
   LOG(ERROR) << " -w : output dex directory";
   LOG(ERROR) << " -x : compact dex generation level, either 'none' or 'fast'";
@@ -85,7 +86,7 @@
 
   // Parse all arguments.
   while (1) {
-    const int ic = getopt(argc, argv, "abcdefghil:o:p:stvw:x:");
+    const int ic = getopt(argc, argv, "abcdefghil:o:p:stuvw:x:");
     if (ic < 0) {
       break;  // done
     }
@@ -138,6 +139,9 @@
         options.show_section_statistics_ = true;
         options.verbose_ = false;
         break;
+      case 'u':  // update checksum
+        options.update_checksum_ = true;
+        break;
       case 'v':  // verify output
         options.verify_output_ = true;
         break;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index a20930b..187c687 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -267,7 +267,7 @@
     ScratchFile dexlayout_output;
     const std::string& dexlayout_filename = dexlayout_output.GetFilename();
 
-    for (const std::string &dex_file : GetLibCoreDexFileNames()) {
+    for (const std::string& dex_file : GetLibCoreDexFileNames()) {
       std::vector<std::string> dexdump_exec_argv =
           { dexdump, "-d", "-f", "-h", "-l", "plain", "-o", dexdump_filename, dex_file };
       std::vector<std::string> dexlayout_args =
@@ -293,31 +293,33 @@
     const std::string& tmp_name = tmp_file.GetFilename();
     size_t tmp_last_slash = tmp_name.rfind('/');
     std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+    std::string unzip_dir = tmp_dir + "unzip/";
 
-    for (const std::string &dex_file : GetLibCoreDexFileNames()) {
+    for (const std::string& dex_file : GetLibCoreDexFileNames()) {
       std::vector<std::string> dexlayout_args =
           { "-w", tmp_dir, "-o", tmp_name, dex_file };
       if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
         return false;
       }
-      size_t dex_file_last_slash = dex_file.rfind('/');
-      std::string dex_file_name = dex_file.substr(dex_file_last_slash + 1);
+      std::string dex_file_name = "classes.dex";
       std::vector<std::string> unzip_exec_argv =
-          { "/usr/bin/unzip", dex_file, "classes.dex", "-d", tmp_dir};
+          { "/usr/bin/unzip", dex_file, "classes.dex", "-d", unzip_dir};
       if (!::art::Exec(unzip_exec_argv, error_msg)) {
         return false;
       }
       std::vector<std::string> diff_exec_argv =
-          { "/usr/bin/diff", tmp_dir + "classes.dex" , tmp_dir + dex_file_name };
+          { "/usr/bin/diff", tmp_dir + "classes.dex" , unzip_dir + dex_file_name };
       if (!::art::Exec(diff_exec_argv, error_msg)) {
         return false;
       }
-      if (!UnlinkFile(tmp_dir + "classes.dex")) {
+      if (!UnlinkFile(unzip_dir + "classes.dex")) {
         return false;
       }
       if (!UnlinkFile(tmp_dir + dex_file_name)) {
         return false;
       }
+      // Remove the unzip temp directory so that unlinking android_data doesn't fail.
+      EXPECT_EQ(rmdir(unzip_dir.c_str()), 0);
     }
     return true;
   }
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 871cd08..00b8ef2 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -132,7 +132,7 @@
   exit(kErrorInvalidArguments);
 }
 
-class DexoptAnalyzer FINAL {
+class DexoptAnalyzer final {
  public:
   DexoptAnalyzer() :
       assume_profile_changed_(false),
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 49f9249..c1a6f59 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -39,15 +39,15 @@
 
 static const vixl::aarch32::Register tr(TR);
 
-class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
-  class CustomDisassemblerStream FINAL : public DisassemblerStream {
+class DisassemblerArm::CustomDisassembler final : public PrintDisassembler {
+  class CustomDisassemblerStream final : public DisassemblerStream {
    public:
     CustomDisassemblerStream(std::ostream& os,
                              const CustomDisassembler* disasm,
                              const DisassemblerOptions* options)
         : DisassemblerStream(os), disasm_(disasm), options_(options) {}
 
-    DisassemblerStream& operator<<(const PrintLabel& label) OVERRIDE {
+    DisassemblerStream& operator<<(const PrintLabel& label) override {
       const LocationType type = label.GetLocationType();
 
       switch (type) {
@@ -73,7 +73,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
+    DisassemblerStream& operator<<(vixl::aarch32::Register reg) override {
       if (reg.Is(tr)) {
         os() << "tr";
         return *this;
@@ -82,7 +82,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(const MemOperand& operand) OVERRIDE {
+    DisassemblerStream& operator<<(const MemOperand& operand) override {
       // VIXL must use a PrintLabel object whenever the base register is PC;
       // the following check verifies this invariant, and guards against bugs.
       DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -96,7 +96,7 @@
       return *this;
     }
 
-    DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) OVERRIDE {
+    DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) override {
       // VIXL must use a PrintLabel object whenever the base register is PC;
       // the following check verifies this invariant, and guards against bugs.
       DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -116,7 +116,7 @@
         disassembler_stream_(os, this, options),
         is_t32_(true) {}
 
-  void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
+  void PrintCodeAddress(uint32_t prog_ctr) override {
     os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
   }
 
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index 237b577..dd6621d 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,14 +26,14 @@
 namespace art {
 namespace arm {
 
-class DisassemblerArm FINAL : public Disassembler {
+class DisassemblerArm final : public Disassembler {
   class CustomDisassembler;
 
  public:
   explicit DisassemblerArm(DisassemblerOptions* options);
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   uintptr_t GetPc(uintptr_t instr_ptr) const {
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 19e4dfb..89beaa9 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -29,7 +29,7 @@
 namespace art {
 namespace arm64 {
 
-class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
+class CustomDisassembler final : public vixl::aarch64::Disassembler {
  public:
   explicit CustomDisassembler(DisassemblerOptions* options)
       : vixl::aarch64::Disassembler(),
@@ -45,13 +45,13 @@
 
   // Use register aliases in the disassembly.
   void AppendRegisterNameToOutput(const vixl::aarch64::Instruction* instr,
-                                  const vixl::aarch64::CPURegister& reg) OVERRIDE;
+                                  const vixl::aarch64::CPURegister& reg) override;
 
   // Improve the disassembly of literal load instructions.
-  void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) OVERRIDE;
+  void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) override;
 
   // Improve the disassembly of thread offset.
-  void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) OVERRIDE;
+  void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) override;
 
  private:
   // Indicate if the disassembler should read data loaded from literal pools.
@@ -69,15 +69,15 @@
   DisassemblerOptions* options_;
 };
 
-class DisassemblerArm64 FINAL : public Disassembler {
+class DisassemblerArm64 final : public Disassembler {
  public:
   explicit DisassemblerArm64(DisassemblerOptions* options) :
       Disassembler(options), disasm(options) {
     decoder.AppendVisitor(&disasm);
   }
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   vixl::aarch64::Decoder decoder;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index afa6af3..bc74b43 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -24,7 +24,7 @@
 namespace art {
 namespace mips {
 
-class DisassemblerMips FINAL : public Disassembler {
+class DisassemblerMips final : public Disassembler {
  public:
   explicit DisassemblerMips(DisassemblerOptions* options, bool is_o32_abi)
       : Disassembler(options),
@@ -33,8 +33,8 @@
         is_o32_abi_(is_o32_abi) {}
 
   const char* RegName(uint32_t reg);
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   // Address and encoding of the last disassembled instruction.
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 31b62bc..a329280 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,13 +24,13 @@
 
 enum RegFile { GPR, MMX, SSE };
 
-class DisassemblerX86 FINAL : public Disassembler {
+class DisassemblerX86 final : public Disassembler {
  public:
   DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
       : Disassembler(options), supports_rex_(supports_rex) {}
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   size_t DumpNops(std::ostream& os, const uint8_t* instr);
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f54c551..ebc18fc 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -347,9 +347,9 @@
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
 
-  virtual ~ImgObjectVisitor() OVERRIDE { }
+  ~ImgObjectVisitor() override { }
 
-  virtual void Visit(mirror::Object* object) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
     // Sanity check that we are reading a real mirror::Object
     CHECK(object->GetClass() != nullptr) << "Image object at address "
                                          << object
@@ -658,8 +658,8 @@
     dirty_func_(std::move(dirty_func)),
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
-  virtual ~ImgArtMethodVisitor() OVERRIDE { }
-  virtual void Visit(ArtMethod* method) OVERRIDE {
+  ~ImgArtMethodVisitor() override { }
+  void Visit(ArtMethod* method) override {
     dirty_func_(method, begin_image_ptr_, dirty_pages_);
   }
 
@@ -1671,8 +1671,7 @@
  protected:
   using Base = CmdlineArgs;
 
-  virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+  ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -1703,7 +1702,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  ParseStatus ParseChecks(std::string* error_msg) override {
     // Perform the parent checks.
     ParseStatus parent_checks = Base::ParseChecks(error_msg);
     if (parent_checks != kParseOk) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 52096f0..cb40c7d 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -57,7 +57,7 @@
     boot_image_location_ = image_spaces[0]->GetImageLocation();
   }
 
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     // Needs to live until CommonRuntimeTest::SetUp finishes, since we pass it a cstring.
     runtime_args_image_ = android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str());
     options->push_back(std::make_pair(runtime_args_image_, nullptr));
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index c4ac180..1bcfe87 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -25,7 +25,7 @@
 
 namespace art {
 
-class MallocAllocator FINAL : public Allocator {
+class MallocAllocator final : public Allocator {
  public:
   MallocAllocator() {}
   ~MallocAllocator() {}
@@ -44,7 +44,7 @@
 
 MallocAllocator g_malloc_allocator;
 
-class NoopAllocator FINAL : public Allocator {
+class NoopAllocator final : public Allocator {
  public:
   NoopAllocator() {}
   ~NoopAllocator() {}
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index 01f9013..c6d8993 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -50,7 +50,7 @@
     ArenaBitVectorAllocatorKindImpl<kArenaAllocatorCountAllocations>;
 
 template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
+class ArenaBitVectorAllocator final : public Allocator, private ArenaBitVectorAllocatorKind {
  public:
   static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
     void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 5668b6c..76f57da 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -26,7 +26,7 @@
 
 // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
 // abstracting away the bit start offset to avoid needing passing as an argument everywhere.
-class BitMemoryRegion FINAL : public ValueObject {
+class BitMemoryRegion final : public ValueObject {
  public:
   struct Less {
     bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index e24b073..6dd2381 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -318,7 +318,8 @@
 
 std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
   return std::vector<std::string>({GetDexFileName("core-oj", IsHost()),
-                                   GetDexFileName("core-libart", IsHost())});
+                                   GetDexFileName("core-libart", IsHost()),
+                                   GetDexFileName("core-simple", IsHost())});
 }
 
 std::string CommonArtTestImpl::GetTestAndroidRoot() {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 62834c7..d645fa1 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -209,11 +209,11 @@
   virtual ~CommonArtTestBase() {}
 
  protected:
-  virtual void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonArtTestImpl::SetUp();
   }
 
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     CommonArtTestImpl::TearDown();
   }
 };
diff --git a/libartbase/base/dumpable.h b/libartbase/base/dumpable.h
index 0c00505..bd8622f 100644
--- a/libartbase/base/dumpable.h
+++ b/libartbase/base/dumpable.h
@@ -29,7 +29,7 @@
 //   os << Dumpable<MyType>(my_type_instance);
 //
 template<typename T>
-class Dumpable FINAL {
+class Dumpable final {
  public:
   explicit Dumpable(const T& value) : value_(value) {
   }
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index a63f326..1d106b2 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -279,4 +279,12 @@
   return android::base::StartsWith(full_path, framework_path);
 }
 
+int DupCloexec(int fd) {
+#if defined(__linux__)
+  return fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
+  return dup(fd);
+#endif
+}
+
 }  // namespace art
diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h
index 063393b..c249bcc 100644
--- a/libartbase/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -78,6 +78,9 @@
 // Return whether the location is on system/framework (i.e. android_root/framework).
 bool LocationIsOnSystemFramework(const char* location);
 
+// dup(2), except setting the O_CLOEXEC flag atomically, when possible.
+int DupCloexec(int fd);
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_FILE_UTILS_H_
diff --git a/libartbase/base/fuchsia_compat.h b/libartbase/base/fuchsia_compat.h
deleted file mode 100644
index 018bac0..0000000
--- a/libartbase/base/fuchsia_compat.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
-#define ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
-
-// stubs for features lacking in Fuchsia
-
-struct rlimit {
-  int rlim_cur;
-};
-
-#define RLIMIT_FSIZE (1)
-#define RLIM_INFINITY (-1)
-static int getrlimit(int resource, struct rlimit *rlim) {
-  LOG(FATAL) << "getrlimit not available for Fuchsia";
-}
-
-static int ashmem_create_region(const char *name, size_t size) {
-  LOG(FATAL) << "ashmem_create_region not available for Fuchsia";
-}
-
-#endif  // ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index a479b7d..81d55fc 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -37,7 +37,7 @@
         count_(count) {}
 
  private:
-  std::streamsize xsputn(const char* s, std::streamsize n) OVERRIDE {
+  std::streamsize xsputn(const char* s, std::streamsize n) override {
     std::streamsize result = n;  // Aborts on failure.
     const char* eol = static_cast<const char*>(memchr(s, '\n', n));
     while (eol != nullptr) {
@@ -54,7 +54,7 @@
     return result;
   }
 
-  int_type overflow(int_type c) OVERRIDE {
+  int_type overflow(int_type c) override {
     if (UNLIKELY(c == std::char_traits<char>::eof())) {
       out_sbuf_->pubsync();
       return c;
diff --git a/libartbase/base/leb128.h b/libartbase/base/leb128.h
index d5847fd..b866d37 100644
--- a/libartbase/base/leb128.h
+++ b/libartbase/base/leb128.h
@@ -357,7 +357,7 @@
 
 // An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
 template <typename Vector = std::vector<uint8_t>>
-class Leb128EncodingVector FINAL : private Vector,
+class Leb128EncodingVector final : private Vector,
                                    public Leb128Encoder<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index f26cf07..33866bb 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -23,9 +23,6 @@
 #include "android-base/macros.h"
 #include "android-base/thread_annotations.h"
 
-#define OVERRIDE override
-#define FINAL final
-
 // Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
 // globally importing gtest/gtest.h into the main ART header files.
 #define ART_FRIEND_TEST(test_set_name, individual_test)\
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 15a5d71..02e29f1 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -28,7 +28,7 @@
 
 namespace art {
 
-class MallocArena FINAL : public Arena {
+class MallocArena final : public Arena {
  public:
   explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
   virtual ~MallocArena();
diff --git a/libartbase/base/malloc_arena_pool.h b/libartbase/base/malloc_arena_pool.h
index c48be59..9216c03 100644
--- a/libartbase/base/malloc_arena_pool.h
+++ b/libartbase/base/malloc_arena_pool.h
@@ -23,17 +23,17 @@
 
 namespace art {
 
-class MallocArenaPool FINAL : public ArenaPool {
+class MallocArenaPool final : public ArenaPool {
  public:
   MallocArenaPool();
   ~MallocArenaPool();
-  Arena* AllocArena(size_t size) OVERRIDE;
-  void FreeArenaChain(Arena* first) OVERRIDE;
-  size_t GetBytesAllocated() const OVERRIDE;
-  void ReclaimMemory() OVERRIDE;
-  void LockReclaimMemory() OVERRIDE;
+  Arena* AllocArena(size_t size) override;
+  void FreeArenaChain(Arena* first) override;
+  size_t GetBytesAllocated() const override;
+  void ReclaimMemory() override;
+  void LockReclaimMemory() override;
   // Is a nop for malloc pools.
-  void TrimMaps() OVERRIDE;
+  void TrimMaps() override;
 
  private:
   Arena* free_arenas_;
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index d9760bd..1bf553d 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -23,6 +23,10 @@
 #include <sys/resource.h>
 #endif
 
+#if defined(__linux__)
+#include <sys/prctl.h>
+#endif
+
 #include <map>
 #include <memory>
 #include <sstream>
@@ -30,12 +34,6 @@
 #include "android-base/stringprintf.h"
 #include "android-base/unique_fd.h"
 
-#if !defined(__Fuchsia__)
-#include "cutils/ashmem.h"
-#else
-#include "fuchsia_compat.h"
-#endif
-
 #include "allocator.h"
 #include "bit_utils.h"
 #include "globals.h"
@@ -61,6 +59,9 @@
 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
 
+// A map containing unique strings used for indentifying anonymous mappings
+static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
+
 // Retrieve iterator to a `gMaps` entry that is known to exist.
 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
   DCHECK(map.IsValid());
@@ -226,6 +227,33 @@
   return false;
 }
 
+bool MemMap::CheckReservation(uint8_t* expected_ptr,
+                              size_t byte_count,
+                              const char* name,
+                              const MemMap& reservation,
+                              /*out*/std::string* error_msg) {
+  if (!reservation.IsValid()) {
+    *error_msg = StringPrintf("Invalid reservation for %s", name);
+    return false;
+  }
+  DCHECK_ALIGNED(reservation.Begin(), kPageSize);
+  if (reservation.Begin() != expected_ptr) {
+    *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
+                              name,
+                              reservation.Begin(),
+                              expected_ptr);
+    return false;
+  }
+  if (byte_count > reservation.Size()) {
+    *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
+                              byte_count,
+                              reservation.Size());
+    return false;
+  }
+  return true;
+}
+
+
 #if USE_ART_LOW_4G_ALLOCATOR
 void* MemMap::TryMemMapLow4GB(void* ptr,
                                     size_t page_aligned_byte_count,
@@ -246,18 +274,45 @@
 }
 #endif
 
+void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
+  // Debug naming is only used for Android target builds. For Linux targets,
+  // we'll still call prctl but it wont do anything till we upstream the prctl.
+  if (kIsTargetFuchsia || !kIsTargetBuild) {
+    return;
+  }
+
+  // lock as std::map is not thread-safe
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+
+  std::string debug_friendly_name("dalvik-");
+  debug_friendly_name += name;
+  auto it = debugStrMap.find(debug_friendly_name);
+
+  if (it == debugStrMap.end()) {
+    it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
+  }
+
+  DCHECK(it != debugStrMap.end());
+#if defined(PR_SET_VMA) && defined(__linux__)
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
+#else
+  // Prevent variable unused compiler errors.
+  UNUSED(map_ptr, size);
+#endif
+}
+
 MemMap MemMap::MapAnonymous(const char* name,
                             uint8_t* addr,
                             size_t byte_count,
                             int prot,
                             bool low_4gb,
                             bool reuse,
-                            std::string* error_msg,
-                            bool use_ashmem) {
+                            /*inout*/MemMap* reservation,
+                            /*out*/std::string* error_msg,
+                            bool use_debug_name) {
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
-  use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   if (byte_count == 0) {
     *error_msg = "Empty MemMap requested.";
     return Invalid();
@@ -269,46 +324,20 @@
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
     CHECK(addr != nullptr);
+    DCHECK(reservation == nullptr);
 
     DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
     flags |= MAP_FIXED;
-  }
-
-  if (use_ashmem) {
-    if (!kIsTargetBuild) {
-      // When not on Android (either host or assuming a linux target) ashmem is faked using
-      // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
-      // will then use a regular mmap.
-      struct rlimit rlimit_fsize;
-      CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
-      use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
-        (page_aligned_byte_count < rlimit_fsize.rlim_cur);
+  } else if (reservation != nullptr) {
+    CHECK(addr != nullptr);
+    if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
+      return MemMap::Invalid();
     }
+    flags |= MAP_FIXED;
   }
 
   unique_fd fd;
 
-
-  if (use_ashmem) {
-    // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
-    // prefixed "dalvik-".
-    std::string debug_friendly_name("dalvik-");
-    debug_friendly_name += name;
-    fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
-
-    if (fd.get() == -1) {
-      // We failed to create the ashmem region. Print a warning, but continue
-      // anyway by creating a true anonymous mmap with an fd of -1. It is
-      // better to use an unlabelled anonymous map than to fail to create a
-      // map at all.
-      PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
-    } else {
-      // We succeeded in creating the ashmem region. Use the created ashmem
-      // region as backing for the mmap.
-      flags &= ~MAP_ANONYMOUS;
-    }
-  }
-
   // We need to store and potentially set an error number for pretty printing of errors
   int saved_errno = 0;
 
@@ -341,6 +370,16 @@
   if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
     return Invalid();
   }
+
+  if (use_debug_name) {
+    SetDebugName(actual, name, page_aligned_byte_count);
+  }
+
+  if (reservation != nullptr) {
+    // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+    DCHECK_EQ(actual, reservation->Begin());
+    reservation->ReleaseReservedMemory(byte_count);
+  }
   return MemMap(name,
                 reinterpret_cast<uint8_t*>(actual),
                 byte_count,
@@ -446,22 +485,30 @@
                                 int fd,
                                 off_t start,
                                 bool low_4gb,
-                                bool reuse,
                                 const char* filename,
-                                std::string* error_msg) {
+                                bool reuse,
+                                /*inout*/MemMap* reservation,
+                                /*out*/std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
 
-  // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
-  // expect his mapping to be contained within an existing map.
+  // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
+  // reservation, i.e we expect this mapping to be contained within an existing map.
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
     CHECK(expected_ptr != nullptr);
+    DCHECK(reservation == nullptr);
     DCHECK(error_msg != nullptr);
     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
         << ((error_msg != nullptr) ? *error_msg : std::string());
     flags |= MAP_FIXED;
+  } else if (reservation != nullptr) {
+    DCHECK(error_msg != nullptr);
+    if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
+      return Invalid();
+    }
+    flags |= MAP_FIXED;
   } else {
     CHECK_EQ(0, flags & MAP_FIXED);
     // Don't bother checking for an overlapping region here. We'll
@@ -523,6 +570,11 @@
     page_aligned_byte_count -= redzone_size;
   }
 
+  if (reservation != nullptr) {
+    // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+    DCHECK_EQ(actual, reservation->Begin());
+    reservation->ReleaseReservedMemory(byte_count);
+  }
   return MemMap(filename,
                 actual + page_offset,
                 byte_count,
@@ -639,8 +691,7 @@
                           const char* tail_name,
                           int tail_prot,
                           std::string* error_msg,
-                          bool use_ashmem) {
-  use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
+                          bool use_debug_name) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -666,19 +717,6 @@
 
   unique_fd fd;
   int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
-  if (use_ashmem) {
-    // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
-    // prefixed "dalvik-".
-    std::string debug_friendly_name("dalvik-");
-    debug_friendly_name += tail_name;
-    fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
-    flags = MAP_PRIVATE | MAP_FIXED;
-    if (fd.get() == -1) {
-      *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
-                                tail_name, strerror(errno));
-      return Invalid();
-    }
-  }
 
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
@@ -703,12 +741,56 @@
     auto it = GetGMapsEntry(*this);
     gMaps->erase(it);
   }
+
+  if (use_debug_name) {
+    SetDebugName(actual, tail_name, tail_base_size);
+  }
+
   size_ = new_size;
   base_size_ = new_base_size;
   // Return the new mapping.
   return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
+MemMap MemMap::TakeReservedMemory(size_t byte_count) {
+  uint8_t* begin = Begin();
+  ReleaseReservedMemory(byte_count);  // Performs necessary DCHECK()s on this reservation.
+  size_t base_size = RoundUp(byte_count, kPageSize);
+  return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+}
+
+void MemMap::ReleaseReservedMemory(size_t byte_count) {
+  // Check the reservation mapping.
+  DCHECK(IsValid());
+  DCHECK(!reuse_);
+  DCHECK(!already_unmapped_);
+  DCHECK_EQ(redzone_size_, 0u);
+  DCHECK_EQ(begin_, base_begin_);
+  DCHECK_EQ(size_, base_size_);
+  DCHECK_ALIGNED(begin_, kPageSize);
+  DCHECK_ALIGNED(size_, kPageSize);
+
+  // Check and round up the `byte_count`.
+  DCHECK_NE(byte_count, 0u);
+  DCHECK_LE(byte_count, size_);
+  byte_count = RoundUp(byte_count, kPageSize);
+
+  if (byte_count == size_) {
+    Invalidate();
+  } else {
+    // Shrink the reservation MemMap and update its `gMaps` entry.
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    auto it = GetGMapsEntry(*this);
+    // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+    gMaps->erase(it);
+    begin_ += byte_count;
+    size_ -= byte_count;
+    base_begin_ = begin_;
+    base_size_ = size_;
+    gMaps->emplace(base_begin_, this);
+  }
+}
+
 void MemMap::MadviseDontNeedAndZero() {
   if (base_begin_ != nullptr || base_size_ != 0) {
     if (!kMadviseZeroes) {
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index cd7d502..20eda32 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -114,9 +114,15 @@
   // of the source mmap is returned to the caller.
   bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
 
+  // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
+  static void SetDebugName(void* map_ptr, const char* name, size_t size);
+
   // Request an anonymous region of length 'byte_count' and a requested base address.
   // Use null as the requested base address if you don't care.
-  // "reuse" allows re-mapping an address range from an existing mapping.
+  //
+  // `reuse` allows re-mapping an address range from an existing mapping which retains the
+  // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+  // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
   //
   // The word "anonymous" in this context means "not backed by a file". The supplied
   // 'name' will be used -- on systems that support it -- to give the mapping
@@ -129,15 +135,23 @@
                              int prot,
                              bool low_4gb,
                              bool reuse,
-                             std::string* error_msg,
-                             bool use_ashmem = true);
+                             /*inout*/MemMap* reservation,
+                             /*out*/std::string* error_msg,
+                             bool use_debug_name = true);
   static MemMap MapAnonymous(const char* name,
                              uint8_t* addr,
                              size_t byte_count,
                              int prot,
                              bool low_4gb,
-                             std::string* error_msg) {
-    return MapAnonymous(name, addr, byte_count, prot, low_4gb, /* reuse */ false, error_msg);
+                             /*out*/std::string* error_msg) {
+    return MapAnonymous(name,
+                        addr,
+                        byte_count,
+                        prot,
+                        low_4gb,
+                        /* reuse */ false,
+                        /* reservation */ nullptr,
+                        error_msg);
   }
 
   // Create placeholder for a region allocated by direct call to mmap.
@@ -164,18 +178,23 @@
                             flags,
                             fd,
                             start,
-                            /*low_4gb*/low_4gb,
-                            /*reuse*/false,
+                            /* low_4gb */ low_4gb,
                             filename,
+                            /* reuse */ false,
+                            /* reservation */ nullptr,
                             error_msg);
   }
 
   // Map part of a file, taking care of non-page aligned offsets.  The "start" offset is absolute,
   // not relative. This version allows requesting a specific address for the base of the mapping.
-  // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
-  // the memory. If error_msg is null then we do not print /proc/maps to the log if
-  // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
-  // printing /proc/maps takes several milliseconds in the worst case.
+  //
+  // `reuse` allows re-mapping an address range from an existing mapping which retains the
+  // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+  // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
+  //
+  // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
+  // This helps improve performance of the fail case since reading and printing /proc/maps takes
+  // several milliseconds in the worst case.
   //
   // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
   static MemMap MapFileAtAddress(uint8_t* addr,
@@ -185,9 +204,10 @@
                                  int fd,
                                  off_t start,
                                  bool low_4gb,
-                                 bool reuse,
                                  const char* filename,
-                                 std::string* error_msg);
+                                 bool reuse,
+                                 /*inout*/MemMap* reservation,
+                                 /*out*/std::string* error_msg);
 
   const std::string& GetName() const {
     return name_;
@@ -239,7 +259,15 @@
                     const char* tail_name,
                     int tail_prot,
                     std::string* error_msg,
-                    bool use_ashmem = true);
+                    bool use_debug_name = true);
+
+  // Take ownership of pages at the beginning of the mapping. The mapping must be an
+  // anonymous reservation mapping, owning entire pages. The `byte_count` must not
+  // exceed the size of this reservation.
+  //
+  // Returns a mapping owning `byte_count` bytes rounded up to entire pages
+  // with size set to the passed `byte_count`.
+  MemMap TakeReservedMemory(size_t byte_count);
 
   static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
       REQUIRES(!MemMap::mem_maps_lock_);
@@ -304,12 +332,21 @@
                                              off_t offset)
       REQUIRES(!MemMap::mem_maps_lock_);
 
+  // Release memory owned by a reservation mapping.
+  void ReleaseReservedMemory(size_t byte_count);
+
   // member function to access real_munmap
   static bool CheckMapRequest(uint8_t* expected_ptr,
                               void* actual_ptr,
                               size_t byte_count,
                               std::string* error_msg);
 
+  static bool CheckReservation(uint8_t* expected_ptr,
+                               size_t byte_count,
+                               const char* name,
+                               const MemMap& reservation,
+                               /*out*/std::string* error_msg);
+
   std::string name_;
   uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
   size_t size_ = 0u;            // Length of data.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 396f12b..ab3d18f 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -540,6 +540,7 @@
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
                                     /* reuse */ false,
+                                    /* reservation */ nullptr,
                                     &error_msg);
   ASSERT_TRUE(map.IsValid());
   ASSERT_TRUE(error_msg.empty());
@@ -549,6 +550,7 @@
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
                                      /* reuse */ true,
+                                     /* reservation */ nullptr,
                                      &error_msg);
   ASSERT_TRUE(map2.IsValid());
   ASSERT_TRUE(error_msg.empty());
@@ -720,4 +722,108 @@
   }
 }
 
+TEST_F(MemMapTest, Reservation) {
+  CommonInit();
+  std::string error_msg;
+  ScratchFile scratch_file;
+  constexpr size_t kMapSize = 5 * kPageSize;
+  std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+  ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+  MemMap reservation = MemMap::MapAnonymous("Test reservation",
+                                            /* addr */ nullptr,
+                                            kMapSize,
+                                            PROT_NONE,
+                                            /* low_4gb */ false,
+                                            &error_msg);
+  ASSERT_TRUE(reservation.IsValid());
+  ASSERT_TRUE(error_msg.empty());
+
+  // Map first part of the reservation.
+  constexpr size_t kChunk1Size = kPageSize - 1u;
+  static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
+  uint8_t* addr1 = reservation.Begin();
+  MemMap map1 = MemMap::MapFileAtAddress(addr1,
+                                         /* byte_count */ kChunk1Size,
+                                         PROT_READ,
+                                         MAP_PRIVATE,
+                                         scratch_file.GetFd(),
+                                         /* start */ 0,
+                                         /* low_4gb */ false,
+                                         scratch_file.GetFilename().c_str(),
+                                         /* reuse */ false,
+                                         &reservation,
+                                         &error_msg);
+  ASSERT_TRUE(map1.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map1.Size(), kChunk1Size);
+  ASSERT_EQ(addr1, map1.Begin());
+  ASSERT_TRUE(reservation.IsValid());
+  // Entire pages are taken from the `reservation`.
+  ASSERT_LT(map1.End(), map1.BaseEnd());
+  ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
+
+  // Map second part as an anonymous mapping.
+  constexpr size_t kChunk2Size = 2 * kPageSize;
+  DCHECK_LT(kChunk2Size, reservation.Size());  // We want to split the reservation.
+  uint8_t* addr2 = reservation.Begin();
+  MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
+                                     addr2,
+                                     /* byte_count */ kChunk2Size,
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &reservation,
+                                     &error_msg);
+  ASSERT_TRUE(map2.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map2.Size(), kChunk2Size);
+  ASSERT_EQ(addr2, map2.Begin());
+  ASSERT_EQ(map2.End(), map2.BaseEnd());  // kChunk2Size is page aligned.
+  ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
+
+  // Map the rest of the reservation except the last byte.
+  const size_t kChunk3Size = reservation.Size() - 1u;
+  uint8_t* addr3 = reservation.Begin();
+  MemMap map3 = MemMap::MapFileAtAddress(addr3,
+                                         /* byte_count */ kChunk3Size,
+                                         PROT_READ,
+                                         MAP_PRIVATE,
+                                         scratch_file.GetFd(),
+                                         /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
+                                         /* low_4gb */ false,
+                                         scratch_file.GetFilename().c_str(),
+                                         /* reuse */ false,
+                                         &reservation,
+                                         &error_msg);
+  ASSERT_TRUE(map3.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map3.Size(), kChunk3Size);
+  ASSERT_EQ(addr3, map3.Begin());
+  // Entire pages are taken from the `reservation`, so it's now exhausted.
+  ASSERT_FALSE(reservation.IsValid());
+
+  // Now split the MiddleReservation.
+  constexpr size_t kChunk2ASize = kPageSize - 1u;
+  DCHECK_LT(kChunk2ASize, map2.Size());  // We want to split the reservation.
+  MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
+  ASSERT_TRUE(map2a.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map2a.Size(), kChunk2ASize);
+  ASSERT_EQ(addr2, map2a.Begin());
+  ASSERT_TRUE(map2.IsValid());
+  ASSERT_LT(map2a.End(), map2a.BaseEnd());
+  ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
+
+  // And take the rest of the middle reservation.
+  const size_t kChunk2BSize = map2.Size() - 1u;
+  uint8_t* addr2b = map2.Begin();
+  MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
+  ASSERT_TRUE(map2b.IsValid()) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  ASSERT_EQ(map2b.Size(), kChunk2ASize);
+  ASSERT_EQ(addr2b, map2b.Begin());
+  ASSERT_FALSE(map2.IsValid());
+}
+
 }  // namespace art
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 2060329..9c9ff92 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -34,7 +34,7 @@
 // Memory regions are useful for accessing memory with bounds check in
 // debug mode. They can be safely passed by value and do not assume ownership
 // of the region.
-class MemoryRegion FINAL : public ValueObject {
+class MemoryRegion final : public ValueObject {
  public:
   struct ContentEquals {
     constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const {
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index c5313e9..d715670 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -21,6 +21,10 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#if defined(__BIONIC__)
+#include <android/fdsan.h>
+#endif
+
 #include <limits>
 
 #include <android-base/logging.h>
@@ -36,26 +40,34 @@
 
 namespace unix_file {
 
-FdFile::FdFile()
-    : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true), read_only_mode_(false) {
+#if defined(__BIONIC__)
+static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
+  return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
+                                        reinterpret_cast<uint64_t>(fd_file));
 }
+#endif
 
 FdFile::FdFile(int fd, bool check_usage)
-    : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
-      fd_(fd), auto_close_(true), read_only_mode_(false) {
-}
+    : FdFile(fd, std::string(), check_usage) {}
 
 FdFile::FdFile(int fd, const std::string& path, bool check_usage)
-    : FdFile(fd, path, check_usage, false) {
-}
+    : FdFile(fd, path, check_usage, false) {}
 
-FdFile::FdFile(int fd, const std::string& path, bool check_usage, bool read_only_mode)
+FdFile::FdFile(int fd, const std::string& path, bool check_usage,
+               bool read_only_mode)
     : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
-      fd_(fd), file_path_(path), auto_close_(true), read_only_mode_(read_only_mode) {
+      fd_(fd),
+      file_path_(path),
+      read_only_mode_(read_only_mode) {
+#if defined(__BIONIC__)
+  if (fd >= 0) {
+    android_fdsan_exchange_owner_tag(fd, 0, GetFdFileOwnerTag(this));
+  }
+#endif
 }
 
-FdFile::FdFile(const std::string& path, int flags, mode_t mode, bool check_usage)
-    : fd_(-1), auto_close_(true) {
+FdFile::FdFile(const std::string& path, int flags, mode_t mode,
+               bool check_usage) {
   Open(path, flags, mode);
   if (!check_usage || !IsOpened()) {
     guard_state_ = GuardState::kNoCheck;
@@ -72,13 +84,27 @@
     }
     DCHECK_GE(guard_state_, GuardState::kClosed);
   }
-  if (auto_close_ && fd_ != -1) {
+  if (fd_ != -1) {
     if (Close() != 0) {
       PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
     }
   }
 }
 
+FdFile::FdFile(FdFile&& other)
+    : guard_state_(other.guard_state_),
+      fd_(other.fd_),
+      file_path_(std::move(other.file_path_)),
+      read_only_mode_(other.read_only_mode_) {
+#if defined(__BIONIC__)
+  if (fd_ >= 0) {
+    android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
+  }
+#endif
+  other.guard_state_ = GuardState::kClosed;
+  other.fd_ = -1;
+}
+
 FdFile& FdFile::operator=(FdFile&& other) {
   if (this == &other) {
     return *this;
@@ -91,10 +117,15 @@
   guard_state_ = other.guard_state_;
   fd_ = other.fd_;
   file_path_ = std::move(other.file_path_);
-  auto_close_ = other.auto_close_;
   read_only_mode_ = other.read_only_mode_;
-  other.Release();  // Release other.
 
+#if defined(__BIONIC__)
+  if (fd_ >= 0) {
+    android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
+  }
+#endif
+  other.guard_state_ = GuardState::kClosed;
+  other.fd_ = -1;
   return *this;
 }
 
@@ -102,6 +133,39 @@
   Destroy();
 }
 
+int FdFile::Release() {
+  int tmp_fd = fd_;
+  fd_ = -1;
+  guard_state_ = GuardState::kNoCheck;
+#if defined(__BIONIC__)
+  if (tmp_fd >= 0) {
+    android_fdsan_exchange_owner_tag(tmp_fd, GetFdFileOwnerTag(this), 0);
+  }
+#endif
+  return tmp_fd;
+}
+
+void FdFile::Reset(int fd, bool check_usage) {
+  CHECK_NE(fd, fd_);
+
+  if (fd_ != -1) {
+    Destroy();
+  }
+  fd_ = fd;
+
+#if defined(__BIONIC__)
+  if (fd_ >= 0) {
+    android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
+  }
+#endif
+
+  if (check_usage) {
+    guard_state_ = fd == -1 ? GuardState::kNoCheck : GuardState::kBase;
+  } else {
+    guard_state_ = GuardState::kNoCheck;
+  }
+}
+
 void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
   if (kCheckSafeUsage) {
     if (guard_state_ < GuardState::kNoCheck) {
@@ -125,10 +189,6 @@
   }
 }
 
-void FdFile::DisableAutoClose() {
-  auto_close_ = false;
-}
-
 bool FdFile::Open(const std::string& path, int flags) {
   return Open(path, flags, 0640);
 }
@@ -141,6 +201,11 @@
   if (fd_ == -1) {
     return false;
   }
+
+#if defined(__BIONIC__)
+  android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
+#endif
+
   file_path_ = path;
   if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
     // Start in the base state (not flushed, not closed).
@@ -154,7 +219,11 @@
 }
 
 int FdFile::Close() {
+#if defined(__BIONIC__)
+  int result = android_fdsan_close_with_tag(fd_, GetFdFileOwnerTag(this));
+#else
   int result = close(fd_);
+#endif
 
   // Test here, so the file is closed and not leaked.
   if (kCheckSafeUsage) {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index d61dab6..e362ed1 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -34,9 +34,9 @@
 // Not thread safe.
 class FdFile : public RandomAccessFile {
  public:
-  FdFile();
-  // Creates an FdFile using the given file descriptor. Takes ownership of the
-  // file descriptor. (Use DisableAutoClose to retain ownership.)
+  FdFile() = default;
+  // Creates an FdFile using the given file descriptor.
+  // Takes ownership of the file descriptor.
   FdFile(int fd, bool checkUsage);
   FdFile(int fd, const std::string& path, bool checkUsage);
   FdFile(int fd, const std::string& path, bool checkUsage, bool read_only_mode);
@@ -46,40 +46,16 @@
   FdFile(const std::string& path, int flags, mode_t mode, bool checkUsage);
 
   // Move constructor.
-  FdFile(FdFile&& other)
-      : guard_state_(other.guard_state_),
-        fd_(other.fd_),
-        file_path_(std::move(other.file_path_)),
-        auto_close_(other.auto_close_),
-        read_only_mode_(other.read_only_mode_) {
-    other.Release();  // Release the src.
-  }
+  FdFile(FdFile&& other);
 
   // Move assignment operator.
   FdFile& operator=(FdFile&& other);
 
   // Release the file descriptor. This will make further accesses to this FdFile invalid. Disables
   // all further state checking.
-  int Release() {
-    int tmp_fd = fd_;
-    fd_ = -1;
-    guard_state_ = GuardState::kNoCheck;
-    auto_close_ = false;
-    return tmp_fd;
-  }
+  int Release();
 
-  void Reset(int fd, bool check_usage) {
-    if (fd_ != -1 && fd_ != fd) {
-      Destroy();
-    }
-    fd_ = fd;
-    if (check_usage) {
-      guard_state_ = fd == -1 ? GuardState::kNoCheck : GuardState::kBase;
-    } else {
-      guard_state_ = GuardState::kNoCheck;
-    }
-    // Keep the auto_close_ state.
-  }
+  void Reset(int fd, bool check_usage);
 
   // Destroys an FdFile, closing the file descriptor if Close hasn't already
   // been called. (If you care about the return value of Close, call it
@@ -89,13 +65,13 @@
   virtual ~FdFile();
 
   // RandomAccessFile API.
-  int Close() OVERRIDE WARN_UNUSED;
-  int64_t Read(char* buf, int64_t byte_count, int64_t offset) const OVERRIDE WARN_UNUSED;
-  int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
-  int64_t GetLength() const OVERRIDE;
-  int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+  int Close() override WARN_UNUSED;
+  int64_t Read(char* buf, int64_t byte_count, int64_t offset) const override WARN_UNUSED;
+  int SetLength(int64_t new_length) override WARN_UNUSED;
+  int64_t GetLength() const override;
+  int64_t Write(const char* buf, int64_t byte_count, int64_t offset) override WARN_UNUSED;
 
-  int Flush() OVERRIDE WARN_UNUSED;
+  int Flush() override WARN_UNUSED;
 
   // Short for SetLength(0); Flush(); Close();
   // If the file was opened with a path name and unlink = true, also calls Unlink() on the path.
@@ -121,7 +97,6 @@
   const std::string& GetPath() const {
     return file_path_;
   }
-  void DisableAutoClose();
   bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
   bool PreadFully(void* buffer, size_t byte_count, size_t offset) WARN_UNUSED;
   bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
@@ -168,7 +143,7 @@
     }
   }
 
-  GuardState guard_state_;
+  GuardState guard_state_ = GuardState::kClosed;
 
   // Opens file 'file_path' using 'flags' and 'mode'.
   bool Open(const std::string& file_path, int flags);
@@ -180,10 +155,9 @@
 
   void Destroy();  // For ~FdFile and operator=(&&).
 
-  int fd_;
+  int fd_ = -1;
   std::string file_path_;
-  bool auto_close_;
-  bool read_only_mode_;
+  bool read_only_mode_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(FdFile);
 };
diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc
index 1f731a7..298b2d7 100644
--- a/libartbase/base/unix_file/fd_file_test.cc
+++ b/libartbase/base/unix_file/fd_file_test.cc
@@ -24,7 +24,10 @@
 class FdFileTest : public RandomAccessFileTest {
  protected:
   virtual RandomAccessFile* MakeTestFile() {
-    return new FdFile(fileno(tmpfile()), false);
+    FILE* tmp = tmpfile();
+    int fd = dup(fileno(tmp));
+    fclose(tmp);
+    return new FdFile(fd, false);
   }
 };
 
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index a841bae..174d227 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -133,16 +133,14 @@
   }
 
   MemMap map =
-      MemMap::MapFileAtAddress(nullptr,  // Expected pointer address
-                               GetUncompressedLength(),  // Byte count
-                               PROT_READ | PROT_WRITE,
-                               MAP_PRIVATE,
-                               zip_fd,
-                               offset,
-                               false,  // Don't restrict allocation to lower4GB
-                               false,  // Doesn't overlap existing map (reuse=false)
-                               name.c_str(),
-                               /*out*/error_msg);
+      MemMap::MapFile(GetUncompressedLength(),  // Byte count
+                      PROT_READ | PROT_WRITE,
+                      MAP_PRIVATE,
+                      zip_fd,
+                      offset,
+                      /* low_4gb */ false,
+                      name.c_str(),
+                      error_msg);
 
   if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
diff --git a/libartbase/base/zip_archive_test.cc b/libartbase/base/zip_archive_test.cc
index b99a471..b923881 100644
--- a/libartbase/base/zip_archive_test.cc
+++ b/libartbase/base/zip_archive_test.cc
@@ -41,7 +41,7 @@
 
   ScratchFile tmp;
   ASSERT_NE(-1, tmp.GetFd());
-  std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
+  std::unique_ptr<File> file(new File(dup(tmp.GetFd()), tmp.GetFilename(), false));
   ASSERT_TRUE(file.get() != nullptr);
   bool success = zip_entry->ExtractToFile(*file, &error_msg);
   ASSERT_TRUE(success) << error_msg;
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 1846a13..eb7d3d3 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -40,9 +40,9 @@
 class MemMapContainer : public DexFileContainer {
  public:
   explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
-  virtual ~MemMapContainer() OVERRIDE { }
+  ~MemMapContainer() override { }
 
-  int GetPermissions() OVERRIDE {
+  int GetPermissions() override {
     if (!mem_map_.IsValid()) {
       return 0;
     } else {
@@ -50,11 +50,11 @@
     }
   }
 
-  bool IsReadOnly() OVERRIDE {
+  bool IsReadOnly() override {
     return GetPermissions() == PROT_READ;
   }
 
-  bool EnableWrite() OVERRIDE {
+  bool EnableWrite() override {
     CHECK(IsReadOnly());
     if (!mem_map_.IsValid()) {
       return false;
@@ -63,7 +63,7 @@
     }
   }
 
-  bool DisableWrite() OVERRIDE {
+  bool DisableWrite() override {
     CHECK(!IsReadOnly());
     if (!mem_map_.IsValid()) {
       return false;
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 420b347..40d4673 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -51,7 +51,7 @@
                             std::vector<uint32_t>* checksums,
                             std::string* error_msg,
                             int zip_fd = -1,
-                            bool* only_contains_uncompressed_dex = nullptr) const OVERRIDE;
+                            bool* only_contains_uncompressed_dex = nullptr) const override;
 
   // Opens .dex file, backed by existing memory
   std::unique_ptr<const DexFile> Open(const uint8_t* base,
@@ -61,7 +61,7 @@
                                       const OatDexFile* oat_dex_file,
                                       bool verify,
                                       bool verify_checksum,
-                                      std::string* error_msg) const OVERRIDE;
+                                      std::string* error_msg) const override;
 
   // Opens .dex file that has been memory-mapped by the caller.
   std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index 3f311b7..a7d0363 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -44,7 +44,7 @@
 }
 
 class ArtDexFileLoaderTest : public CommonArtTest {
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonArtTest::SetUp();
     // Open a jar file from the boot classpath for use in basic tests of dex accessors.
     std::vector<std::string> lib_core_dex_file_names = GetLibCoreDexFileNames();
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index affc9a2..8eade6d 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -253,15 +253,15 @@
 
   // Returns true if the byte string points to the magic value.
   static bool IsMagicValid(const uint8_t* magic);
-  virtual bool IsMagicValid() const OVERRIDE;
+  bool IsMagicValid() const override;
 
   // Returns true if the byte string after the magic is the correct value.
   static bool IsVersionValid(const uint8_t* magic);
-  virtual bool IsVersionValid() const OVERRIDE;
+  bool IsVersionValid() const override;
 
   // TODO This is completely a guess. We really need to do better. b/72402467
   // We ask for 64 megabytes which should be big enough for any realistic dex file.
-  virtual size_t GetDequickenedSize() const OVERRIDE {
+  size_t GetDequickenedSize() const override {
     return 64 * MB;
   }
 
@@ -269,9 +269,9 @@
     return down_cast<const Header&>(DexFile::GetHeader());
   }
 
-  virtual bool SupportsDefaultMethods() const OVERRIDE;
+  bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
 
   uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
     return debug_info_offsets_.GetOffset(dex_method_index);
@@ -281,7 +281,7 @@
                                     size_t base_size,
                                     const uint8_t* data_begin,
                                     size_t data_size);
-  virtual uint32_t CalculateChecksum() const OVERRIDE;
+  uint32_t CalculateChecksum() const override;
 
  private:
   CompactDexFile(const uint8_t* base,
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 6d9ca4a..400c32b 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -36,21 +36,21 @@
 class VectorContainer : public DexFileContainer {
  public:
   explicit VectorContainer(std::vector<uint8_t>&& vector) : vector_(std::move(vector)) { }
-  virtual ~VectorContainer() OVERRIDE { }
+  ~VectorContainer() override { }
 
-  int GetPermissions() OVERRIDE {
+  int GetPermissions() override {
     return 0;
   }
 
-  bool IsReadOnly() OVERRIDE {
+  bool IsReadOnly() override {
     return true;
   }
 
-  bool EnableWrite() OVERRIDE {
+  bool EnableWrite() override {
     return false;
   }
 
-  bool DisableWrite() OVERRIDE {
+  bool DisableWrite() override {
     return false;
   }
 
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index 6807025..ad8a184 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -708,12 +708,12 @@
 
 // Class for accessing operands for instructions with a range format
 // (e.g. 3rc and 4rcc).
-class RangeInstructionOperands FINAL : public InstructionOperands {
+class RangeInstructionOperands final : public InstructionOperands {
  public:
   RangeInstructionOperands(uint32_t first_operand, size_t num_operands)
       : InstructionOperands(num_operands), first_operand_(first_operand) {}
   ~RangeInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const uint32_t first_operand_;
@@ -723,13 +723,13 @@
 
 // Class for accessing operands for instructions with a variable
 // number of arguments format (e.g. 35c and 45cc).
-class VarArgsInstructionOperands FINAL : public InstructionOperands {
+class VarArgsInstructionOperands final : public InstructionOperands {
  public:
   VarArgsInstructionOperands(const uint32_t (&operands)[Instruction::kMaxVarArgRegs],
                              size_t num_operands)
       : InstructionOperands(num_operands), operands_(operands) {}
   ~VarArgsInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const uint32_t (&operands_)[Instruction::kMaxVarArgRegs];
@@ -739,12 +739,12 @@
 
 // Class for accessing operands without the receiver by wrapping an
 // existing InstructionOperands instance.
-class NoReceiverInstructionOperands FINAL : public InstructionOperands {
+class NoReceiverInstructionOperands final : public InstructionOperands {
  public:
   explicit NoReceiverInstructionOperands(const InstructionOperands* const inner)
       : InstructionOperands(inner->GetNumberOfOperands() - 1), inner_(inner) {}
   ~NoReceiverInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const InstructionOperands* const inner_;
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index 999e5b9..fd7e78f 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -73,17 +73,17 @@
 
   // Returns true if the byte string points to the magic value.
   static bool IsMagicValid(const uint8_t* magic);
-  virtual bool IsMagicValid() const OVERRIDE;
+  bool IsMagicValid() const override;
 
   // Returns true if the byte string after the magic is the correct value.
   static bool IsVersionValid(const uint8_t* magic);
-  virtual bool IsVersionValid() const OVERRIDE;
+  bool IsVersionValid() const override;
 
-  virtual bool SupportsDefaultMethods() const OVERRIDE;
+  bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
 
-  virtual size_t GetDequickenedSize() const OVERRIDE {
+  size_t GetDequickenedSize() const override {
     return Size();
   }
 
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 42c3320..417abaa 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -35,7 +35,7 @@
 
 class ProfileCompilationInfoTest : public CommonArtTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonArtTest::SetUp();
     allocator_.reset(new ArenaAllocator(&pool_));
   }
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b6bfec6..d9be750 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -129,7 +129,7 @@
 }
 
 template <typename ElfTypes>
-class OatSymbolizer FINAL {
+class OatSymbolizer final {
  public:
   OatSymbolizer(const OatFile* oat_file, const std::string& output_name, bool no_bits) :
       oat_file_(oat_file),
@@ -1825,11 +1825,11 @@
       oat_file = OatFile::Open(/* zip_fd */ -1,
                                oat_location,
                                oat_location,
-                               nullptr,
-                               nullptr,
-                               false,
-                               /*low_4gb*/false,
-                               nullptr,
+                               /* requested_base */ nullptr,
+                               /* executable */ false,
+                               /* low_4gb */ false,
+                               /* abs_dex_location */ nullptr,
+                               /* reservation */ nullptr,
                                &error_msg);
     }
     if (oat_file == nullptr) {
@@ -1980,7 +1980,7 @@
    public:
     explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
 
-    virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::ostream& indent_os = image_dumper_->vios_.Stream();
       indent_os << method << " " << " ArtMethod: " << ArtMethod::PrettyMethod(method) << "\n";
       image_dumper_->DumpMethod(method, indent_os);
@@ -2723,11 +2723,11 @@
     std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                     options->app_oat_,
                                                     options->app_oat_,
-                                                    nullptr,
-                                                    nullptr,
-                                                    false,
-                                                    /*low_4gb*/true,
-                                                    nullptr,
+                                                    /* requested_base */ nullptr,
+                                                    /* executable */ false,
+                                                    /* low_4gb */ true,
+                                                    /* abs_dex_location */ nullptr,
+                                                    /* reservation */ nullptr,
                                                     &error_msg));
     if (oat_file == nullptr) {
       LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2847,11 +2847,11 @@
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                   oat_filename,
                                                   oat_filename,
-                                                  nullptr,
-                                                  nullptr,
-                                                  false,
-                                                  /*low_4gb*/false,
+                                                  /* requested_base */ nullptr,
+                                                  /* executable */ false,
+                                                  /* low_4gb */ false,
                                                   dex_filename,
+                                                  /* reservation */ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
     LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2873,11 +2873,11 @@
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                   oat_filename,
                                                   oat_filename,
-                                                  nullptr,
-                                                  nullptr,
-                                                  false,
-                                                  /*low_4gb*/false,
+                                                  /* requested_base */ nullptr,
+                                                  /* executable */ false,
+                                                  /* low_4gb */ false,
                                                   dex_filename,
+                                                  /* reservation */ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
     LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2921,11 +2921,11 @@
       std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
                                                       oat_filename,
                                                       oat_filename,
-                                                      nullptr,
-                                                      nullptr,
-                                                      false,
+                                                      /* requested_base */ nullptr,
+                                                      /* executable */ false,
                                                       /*low_4gb*/false,
                                                       dex_filename,
+                                                      /* reservation */ nullptr,
                                                       &error_msg));
       if (oat_file == nullptr) {
         LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -3350,8 +3350,7 @@
  protected:
   using Base = CmdlineArgs;
 
-  virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+  ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -3408,7 +3407,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  ParseStatus ParseChecks(std::string* error_msg) override {
     // Infer boot image location from the image location if possible.
     if (boot_image_location_ == nullptr) {
       boot_image_location_ = image_location_;
@@ -3536,7 +3535,7 @@
 };
 
 struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
-  virtual bool NeedsRuntime() OVERRIDE {
+  bool NeedsRuntime() override {
     CHECK(args_ != nullptr);
 
     // If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
@@ -3563,7 +3562,7 @@
           !args_->symbolize_;
   }
 
-  virtual bool ExecuteWithoutRuntime() OVERRIDE {
+  bool ExecuteWithoutRuntime() override {
     CHECK(args_ != nullptr);
     CHECK(args_->oat_filename_ != nullptr);
 
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 59f61e2..3213bbe 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -506,13 +506,15 @@
 
   static jvmtiError IterateOverInstancesOfClass(
       jvmtiEnv* env,
-      jclass klass ATTRIBUTE_UNUSED,
-      jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
-      jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
-      const void* user_data ATTRIBUTE_UNUSED) {
+      jclass klass,
+      jvmtiHeapObjectFilter object_filter,
+      jvmtiHeapObjectCallback heap_object_callback,
+      const void* user_data) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_tag_objects);
-    return ERR(NOT_IMPLEMENTED);
+    HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
+    return heap_util.IterateOverInstancesOfClass(
+        env, klass, object_filter, heap_object_callback, user_data);
   }
 
   static jvmtiError GetLocalObject(jvmtiEnv* env,
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 6e991de..d9f34a5 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -58,13 +58,13 @@
   explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
 
   bool IsMethodBeingInspected(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   bool IsMethodSafeToJit(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   bool MethodNeedsDebugVersion(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
  private:
   DeoptManager* manager_;
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 6a8ba48..e98517f 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -50,7 +50,7 @@
 // pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
 // specification we allow exceptions originating from events to overwrite the current exception,
 // including exceptions originating from earlier events.
-class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
+class ScopedEventDispatchEnvironment final : public art::ValueObject {
  public:
   ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) {
     DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index f71a5dc..43d0b10 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -265,7 +265,7 @@
   explicit JvmtiDdmChunkListener(EventHandler* handler) : handler_(handler) {}
 
   void DdmPublishChunk(uint32_t type, const art::ArrayRef<const uint8_t>& data)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kDdmPublishChunk)) {
       art::Thread* self = art::Thread::Current();
       handler_->DispatchEvent<ArtJvmtiEvent::kDdmPublishChunk>(
@@ -288,7 +288,7 @@
   explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
 
   void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     DCHECK_EQ(self, art::Thread::Current());
 
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kVmObjectAlloc)) {
@@ -337,7 +337,7 @@
   explicit JvmtiMonitorListener(EventHandler* handler) : handler_(handler) {}
 
   void MonitorContendedLocking(art::Monitor* m)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEnter)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -351,7 +351,7 @@
   }
 
   void MonitorContendedLocked(art::Monitor* m)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEntered)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -365,7 +365,7 @@
   }
 
   void ObjectWaitStart(art::Handle<art::mirror::Object> obj, int64_t timeout)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -392,7 +392,7 @@
   //
   // See b/65558434 for more discussion.
   void MonitorWaitFinished(art::Monitor* m, bool timeout)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -429,11 +429,11 @@
         start_enabled_(false),
         finish_enabled_(false) {}
 
-  void StartPause() OVERRIDE {
+  void StartPause() override {
     handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
   }
 
-  void EndPause() OVERRIDE {
+  void EndPause() override {
     handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
   }
 
@@ -475,7 +475,7 @@
   }
 }
 
-class JvmtiMethodTraceListener FINAL : public art::instrumentation::InstrumentationListener {
+class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
  public:
   explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
 
@@ -484,7 +484,7 @@
                      art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                      art::ArtMethod* method,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -501,7 +501,7 @@
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> return_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_EQ(
@@ -528,7 +528,7 @@
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const art::JValue& return_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_NE(
@@ -556,7 +556,7 @@
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       jvalue val;
@@ -586,7 +586,7 @@
                   art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                   art::ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     DCHECK(!method->IsRuntimeMethod());
     // Default methods might be copied to multiple classes. We need to get the canonical version of
     // this method so that we can check for breakpoints correctly.
@@ -613,7 +613,7 @@
                  art::ArtMethod* method,
                  uint32_t dex_pc,
                  art::ArtField* field)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // DCHECK(!self->IsExceptionPending());
@@ -638,7 +638,7 @@
                     uint32_t dex_pc,
                     art::ArtField* field,
                     art::Handle<art::mirror::Object> new_val)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // DCHECK(!self->IsExceptionPending());
@@ -670,7 +670,7 @@
                     uint32_t dex_pc,
                     art::ArtField* field,
                     const art::JValue& field_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       DCHECK(!self->IsExceptionPending());
@@ -700,7 +700,7 @@
   }
 
   void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
     jboolean is_exception_pending = self->IsExceptionPending();
     RunEventCallback<ArtJvmtiEvent::kFramePop>(
@@ -720,7 +720,7 @@
     // Finds the location where this exception will most likely be caught. We ignore intervening
     // native frames (which could catch the exception) and return the closest java frame with a
     // compatible catch statement.
-    class CatchLocationFinder FINAL : public art::StackVisitor {
+    class CatchLocationFinder final : public art::StackVisitor {
      public:
       CatchLocationFinder(art::Thread* target,
                           art::Handle<art::mirror::Class> exception_class,
@@ -733,7 +733,7 @@
           catch_method_ptr_(out_catch_method),
           catch_dex_pc_ptr_(out_catch_pc) {}
 
-      bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
         art::ArtMethod* method = GetMethod();
         DCHECK(method != nullptr);
         if (method->IsRuntimeMethod()) {
@@ -782,7 +782,7 @@
 
   // Call-back when an exception is thrown.
   void ExceptionThrown(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     DCHECK(self->IsExceptionThrownByCurrentMethod(exception_object.Get()));
     // The instrumentation events get rid of this for us.
     DCHECK(!self->IsExceptionPending());
@@ -812,7 +812,7 @@
 
   // Call-back when an exception is handled.
   void ExceptionHandled(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     // Since the exception has already been handled there shouldn't be one pending.
     DCHECK(!self->IsExceptionPending());
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kExceptionCatch)) {
@@ -839,7 +839,7 @@
               art::ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     return;
   }
 
@@ -849,7 +849,7 @@
                                 art::ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     return;
   }
 
@@ -959,7 +959,7 @@
         : runtime_(runtime) {}
 
     bool operator()(art::ObjPtr<art::mirror::Class> klass)
-        OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+        override REQUIRES(art::Locks::mutator_lock_) {
       if (!klass->IsLoaded()) {
         // Skip classes that aren't loaded since they might not have fully allocated and initialized
         // their methods. Furthemore since the jvmti-plugin must have been loaded by this point
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 1b8366a..4181302 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -45,15 +45,15 @@
 struct ArtJvmTiEnv;
 class EventHandler;
 
-class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
+class ObjectTagTable final : public JvmtiWeakTable<jlong> {
  public:
   ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
       : event_handler_(event_handler), jvmti_env_(env) {}
 
-  bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
+  bool Set(art::mirror::Object* obj, jlong tag) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
-  bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
+  bool SetLocked(art::mirror::Object* obj, jlong tag) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(allow_disallow_lock_);
 
@@ -73,8 +73,8 @@
   }
 
  protected:
-  bool DoesHandleNullOnSweep() OVERRIDE;
-  void HandleNullSweep(jlong tag) OVERRIDE;
+  bool DoesHandleNullOnSweep() override;
+  void HandleNullSweep(jlong tag) override;
 
  private:
   EventHandler* event_handler_;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 209add3..f1d6fb0 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -163,7 +163,7 @@
                       const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                       /*out*/art::DexFile const** final_dex_file,
                       /*out*/art::DexFile::ClassDef const** final_class_def)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     bool is_enabled =
         event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
         event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
@@ -381,7 +381,7 @@
     void VisitRoots(art::mirror::Object*** roots,
                     size_t count,
                     const art::RootInfo& info ATTRIBUTE_UNUSED)
-        OVERRIDE {
+        override {
       for (size_t i = 0; i != count; ++i) {
         if (*roots[i] == input_) {
           *roots[i] = output_;
@@ -392,7 +392,7 @@
     void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
                     size_t count,
                     const art::RootInfo& info ATTRIBUTE_UNUSED)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       for (size_t i = 0; i != count; ++i) {
         if (roots[i]->AsMirrorPtr() == input_) {
           roots[i]->Assign(output_);
@@ -418,7 +418,7 @@
       WeakGlobalUpdate(art::mirror::Class* root_input, art::mirror::Class* root_output)
           : input_(root_input), output_(root_output) {}
 
-      art::mirror::Object* IsMarked(art::mirror::Object* obj) OVERRIDE {
+      art::mirror::Object* IsMarked(art::mirror::Object* obj) override {
         if (obj == input_) {
           return output_;
         }
diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc
index 253580e..c9abb71 100644
--- a/openjdkjvmti/ti_dump.cc
+++ b/openjdkjvmti/ti_dump.cc
@@ -44,7 +44,7 @@
 namespace openjdkjvmti {
 
 struct DumpCallback : public art::RuntimeSigQuitCallback {
-  void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void SigQuit() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Thread* thread = art::Thread::Current();
     art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
     event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current());
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index d23370b..6c79a60 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -653,6 +653,70 @@
   art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
 }
 
+jvmtiError HeapUtil::IterateOverInstancesOfClass(jvmtiEnv* env,
+                                                 jclass klass,
+                                                 jvmtiHeapObjectFilter filter,
+                                                 jvmtiHeapObjectCallback cb,
+                                                 const void* user_data) {
+  if (cb == nullptr || klass == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
+  art::StackHandleScope<1> hs(self);
+
+  art::ObjPtr<art::mirror::Object> klass_ptr(soa.Decode<art::mirror::Class>(klass));
+  if (!klass_ptr->IsClass()) {
+    return ERR(INVALID_CLASS);
+  }
+  art::Handle<art::mirror::Class> filter_klass(hs.NewHandle(klass_ptr->AsClass()));
+  if (filter_klass->IsInterface()) {
+    // nothing is an 'instance' of an interface so just return without walking anything.
+    return OK;
+  }
+
+  ObjectTagTable* tag_table = ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get();
+  bool stop_reports = false;
+  auto visitor = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    // Early return, as we can't really stop visiting.
+    if (stop_reports) {
+      return;
+    }
+
+    art::ScopedAssertNoThreadSuspension no_suspension("IterateOverInstancesOfClass");
+
+    art::ObjPtr<art::mirror::Class> klass = obj->GetClass();
+
+    if (filter_klass != nullptr && !filter_klass->IsAssignableFrom(klass)) {
+      return;
+    }
+
+    jlong tag = 0;
+    tag_table->GetTag(obj, &tag);
+    if ((filter != JVMTI_HEAP_OBJECT_EITHER) &&
+        ((tag == 0 && filter == JVMTI_HEAP_OBJECT_TAGGED) ||
+         (tag != 0 && filter == JVMTI_HEAP_OBJECT_UNTAGGED))) {
+      return;
+    }
+
+    jlong class_tag = 0;
+    tag_table->GetTag(klass.Ptr(), &class_tag);
+
+    jlong saved_tag = tag;
+    jint ret = cb(class_tag, obj->SizeOf(), &tag, const_cast<void*>(user_data));
+
+    stop_reports = (ret == JVMTI_ITERATION_ABORT);
+
+    if (tag != saved_tag) {
+      tag_table->Set(obj, tag);
+    }
+  };
+  art::Runtime::Current()->GetHeap()->VisitObjects(visitor);
+
+  return OK;
+}
+
 template <typename T>
 static jvmtiError DoIterateThroughHeap(T fn,
                                        jvmtiEnv* env,
@@ -760,7 +824,7 @@
                               user_data);
 }
 
-class FollowReferencesHelper FINAL {
+class FollowReferencesHelper final {
  public:
   FollowReferencesHelper(HeapUtil* h,
                          jvmtiEnv* jvmti_env,
@@ -828,7 +892,7 @@
   }
 
  private:
-  class CollectAndReportRootsVisitor FINAL : public art::RootVisitor {
+  class CollectAndReportRootsVisitor final : public art::RootVisitor {
    public:
     CollectAndReportRootsVisitor(FollowReferencesHelper* helper,
                                  ObjectTagTable* tag_table,
@@ -841,7 +905,7 @@
           stop_reports_(false) {}
 
     void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info)
-        OVERRIDE
+        override
         REQUIRES_SHARED(art::Locks::mutator_lock_)
         REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
       for (size_t i = 0; i != count; ++i) {
@@ -852,7 +916,7 @@
     void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
                     size_t count,
                     const art::RootInfo& info)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_)
+        override REQUIRES_SHARED(art::Locks::mutator_lock_)
         REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
       for (size_t i = 0; i != count; ++i) {
         AddRoot(roots[i]->AsMirrorPtr(), info);
@@ -1347,7 +1411,7 @@
     explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
 
     bool operator()(art::ObjPtr<art::mirror::Class> klass)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       if (klass->IsLoaded() || klass->IsErroneous()) {
         classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
       }
diff --git a/openjdkjvmti/ti_heap.h b/openjdkjvmti/ti_heap.h
index 62761b5..382d80f 100644
--- a/openjdkjvmti/ti_heap.h
+++ b/openjdkjvmti/ti_heap.h
@@ -30,6 +30,12 @@
 
   jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr);
 
+  jvmtiError IterateOverInstancesOfClass(jvmtiEnv* env,
+                                         jclass klass,
+                                         jvmtiHeapObjectFilter filter,
+                                         jvmtiHeapObjectCallback cb,
+                                         const void* user_data);
+
   jvmtiError IterateThroughHeap(jvmtiEnv* env,
                                 jint heap_filter,
                                 jclass klass,
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 87d832c..1588df4 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -66,7 +66,7 @@
   void RegisterNativeMethod(art::ArtMethod* method,
                             const void* cur_method,
                             /*out*/void** new_method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kNativeMethodBind)) {
       art::Thread* thread = art::Thread::Current();
       art::JNIEnvExt* jnienv = thread->GetJniEnv();
@@ -550,7 +550,7 @@
   CommonLocalVariableClosure(jint depth, jint slot)
       : result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run");
     std::unique_ptr<art::Context> context(art::Context::Create());
@@ -702,7 +702,7 @@
   jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimByte:
       case art::Primitive::kPrimChar:
@@ -722,7 +722,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -816,7 +816,7 @@
   jvmtiError GetTypeError(art::ArtMethod* method,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimNot: {
         if (type_ != art::Primitive::kPrimNot) {
@@ -852,7 +852,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -941,7 +941,7 @@
         depth_(depth),
         val_(nullptr) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
     art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run");
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     std::unique_ptr<art::Context> context(art::Context::Create());
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 7157974..4fa97f1 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,7 @@
     return soa.AddLocalReference<jthread>(soa.Self()->GetPeer());
   }
 
-  void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+  void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     art::Thread* self = art::Thread::Current();
     switch (phase) {
       case RuntimePhase::kInitialAgents:
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index f8c1172..db2b143 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -158,7 +158,7 @@
           obsoleted_methods_(obsoleted_methods),
           obsolete_maps_(obsolete_maps) { }
 
-  ~ObsoleteMethodStackVisitor() OVERRIDE {}
+  ~ObsoleteMethodStackVisitor() override {}
 
  public:
   // Returns true if we successfully installed obsolete methods on this thread, filling
@@ -177,7 +177,7 @@
     visitor.WalkStack();
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES(art::Locks::mutator_lock_) {
     art::ScopedAssertNoThreadSuspension snts("Fixing up the stack for obsolete methods.");
     art::ArtMethod* old_method = GetMethod();
     if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bcbab14..1189b1d 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -186,7 +186,7 @@
 }
 
 struct SearchCallback : public art::RuntimePhaseCallback {
-  void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void NextRuntimePhase(RuntimePhase phase) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (phase == RuntimePhase::kStart) {
       // It's time to update the system properties.
       Update();
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 318d98d..b6969af 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -128,7 +128,7 @@
         start_result(0),
         stop_result(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     auto frames_fn = [&](jvmtiFrameInfo info) {
       frames.push_back(info);
     };
@@ -195,7 +195,7 @@
     DCHECK_GE(start_input, 0u);
   }
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     auto frames_fn = [&](jvmtiFrameInfo info) {
       frame_buffer[index] = info;
       ++index;
@@ -287,7 +287,7 @@
   GetAllStackTracesVectorClosure(size_t stop, Data* data_)
       : barrier(0), stop_input(stop), data(data_) {}
 
-  void Run(art::Thread* thread) OVERRIDE
+  void Run(art::Thread* thread) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!data->mutex) {
     art::Thread* self = art::Thread::Current();
@@ -678,7 +678,7 @@
  public:
   GetFrameCountClosure() : count(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     GetFrameCountVisitor visitor(self);
     visitor.WalkStack(false);
 
@@ -759,7 +759,7 @@
  public:
   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     GetLocationVisitor visitor(self, n);
     visitor.WalkStack(false);
 
@@ -842,7 +842,7 @@
     delete context_;
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     if (!GetMethod()->IsRuntimeMethod()) {
       art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
@@ -867,7 +867,7 @@
   }
 
   void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     for (const art::Handle<art::mirror::Object>& m : monitors) {
       if (m.Get() == obj) {
         return;
@@ -889,7 +889,7 @@
   explicit MonitorInfoClosure(Fn handle_results)
       : err_(OK), handle_results_(handle_results) {}
 
-  void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     // Find the monitors on the stack.
     MonitorVisitor visitor(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 949b566..e533094 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -82,7 +82,7 @@
                                          thread.get());
   }
 
-  void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void ThreadStart(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (!started) {
       // Runtime isn't started. We only expect at most the signal handler or JIT threads to be
       // started here.
@@ -101,7 +101,7 @@
     Post<ArtJvmtiEvent::kThreadStart>(self);
   }
 
-  void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void ThreadDeath(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     Post<ArtJvmtiEvent::kThreadEnd>(self);
   }
 
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 8797553..d87ca56 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -68,7 +68,7 @@
 namespace openjdkjvmti {
 
 // A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
-class TransformationFaultHandler FINAL : public art::FaultHandler {
+class TransformationFaultHandler final : public art::FaultHandler {
  public:
   explicit TransformationFaultHandler(art::FaultManager* manager)
       : art::FaultHandler(manager),
@@ -84,7 +84,7 @@
     uninitialized_class_definitions_.clear();
   }
 
-  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
     DCHECK_EQ(sig, SIGSEGV);
     art::Thread* self = art::Thread::Current();
     if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 8169979..02fc925 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -815,7 +815,7 @@
  public:
   explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
-  void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
     dest->SetDeclaringClass(
         patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
@@ -834,7 +834,7 @@
  public:
   explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
-  void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
     patch_oat_->FixupMethod(method, dest);
   }
@@ -877,7 +877,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
     }
@@ -885,7 +885,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
     }
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 370f59d..286b686 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -40,7 +40,7 @@
 
 class ProfileAssistantTest : public CommonRuntimeTest {
  public:
-  void PostRuntimeCreate() OVERRIDE {
+  void PostRuntimeCreate() override {
     allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
diff --git a/profman/profman.cc b/profman/profman.cc
index 9b47097..cecd3c2 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -185,7 +185,7 @@
 
 // TODO(calin): This class has grown too much from its initial design. Split the functionality
 // into smaller, more contained pieces.
-class ProfMan FINAL {
+class ProfMan final {
  public:
   ProfMan() :
       reference_profile_file_fd_(kInvalidFd),
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 927b533..6a8133e 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -34,14 +34,14 @@
                                                  Handle<mirror::Class> klass,
                                                  verifier::HardFailLogMode log_level,
                                                  std::string* error_msg)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool InitializeClass(Thread *self,
                        Handle<mirror::Class> klass,
                        bool can_run_clinit,
                        bool can_init_parents)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 };
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4ceede..d4dbbf9 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,7 +46,7 @@
 
 class ArchTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use 64-bit ISA for runtime setup to make method size potentially larger
     // than necessary (rather than smaller) during CreateCalleeSaveMethod
     options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -55,7 +55,7 @@
   // Do not do any of the finalization. We don't want to run any code, we don't need the heap
   // prepared, it actually will be a problem with setting the instruction set to x86_64 in
   // SetUpRuntimeOptions.
-  void FinalizeSetup() OVERRIDE {
+  void FinalizeSetup() override {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
 };
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index b980296..845cdaa 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -26,7 +26,7 @@
 namespace art {
 namespace arm {
 
-class ArmContext FINAL : public Context {
+class ArmContext final : public Context {
  public:
   ArmContext() {
     Reset();
@@ -34,55 +34,55 @@
 
   virtual ~ArmContext() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(PC, new_pc);
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(R0, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f82534b..d964148 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -25,7 +25,7 @@
 using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
 
 // Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class ArmInstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
   static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,18 +47,18 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static ArmFeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+  bool HasAtLeast(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
   // Return a string of the form "div,lpae" or "none".
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Is the divide instruction feature enabled?
   bool HasDivideInstruction() const {
@@ -82,7 +82,7 @@
   // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   ArmInstructionSetFeatures(bool has_div,
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 0f0814a..16f4792 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -23,6 +23,12 @@
 #include "quick/quick_method_frame_info.h"
 #include "thread-current-inl.h"
 
+#if __has_feature(hwaddress_sanitizer)
+#include <sanitizer/hwasan_interface.h>
+#else
+#define __hwasan_handle_longjmp(sp)
+#endif
+
 namespace art {
 namespace arm64 {
 
@@ -139,6 +145,8 @@
   }
   // Ensure the Thread Register contains the address of the current thread.
   DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]);
+  // Tell HWASan about the new stack top.
+  __hwasan_handle_longjmp(reinterpret_cast<void*>(gprs[SP]));
   // The Marking Register will be updated by art_quick_do_long_jump.
   art_quick_do_long_jump(gprs, fprs);
 }
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index e64cfb8..95dac90 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -26,7 +26,7 @@
 namespace art {
 namespace arm64 {
 
-class Arm64Context FINAL : public Context {
+class Arm64Context final : public Context {
  public:
   Arm64Context() {
     Reset();
@@ -34,56 +34,56 @@
 
   ~Arm64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_lr) OVERRIDE {
+  void SetPC(uintptr_t new_lr) override {
     SetGPR(kPC, new_lr);
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(X0, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, arraysize(gprs_));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, arraysize(gprs_));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     // Note: PC isn't an available GPR (outside of internals), so don't allow retrieving the value.
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfXRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
   static constexpr size_t kPC = kNumberOfXRegisters;
 
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index af2d4c7..163a2d8 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -25,7 +25,7 @@
 using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
 
 // Instruction set features relevant to the ARM64 architecture.
-class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
   static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,16 +47,16 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static Arm64FeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm64;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
   // Return a string of the form "a53" or "none".
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Generate code addressing Cortex-A53 erratum 835769?
   bool NeedFixCortexA53_835769() const {
@@ -74,7 +74,7 @@
   // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 2622d40..96ceecf 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -83,12 +83,10 @@
      * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
      */
 .macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    // art::Runtime** xIP0 = &art::Runtime::instance_
-    adrp xIP0, :got:_ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+    // art::Runtime* xIP0 = art::Runtime::instance_;
     // Our registers aren't intermixed - just spill in order.
-    ldr xIP0, [xIP0]  // art::Runtime* xIP0 = art::Runtime::instance_;
+    adrp xIP0, _ZN3art7Runtime9instance_E
+    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
     ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
@@ -127,12 +125,10 @@
      * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
      */
 .macro SETUP_SAVE_REFS_ONLY_FRAME
-    // art::Runtime** xIP0 = &art::Runtime::instance_
-    adrp xIP0, :got:_ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+    // art::Runtime* xIP0 = art::Runtime::instance_;
     // Our registers aren't intermixed - just spill in order.
-    ldr xIP0, [xIP0]  // art::Runtime* xIP0 = art::Runtime::instance_;
+    adrp xIP0, _ZN3art7Runtime9instance_E
+    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
     ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
@@ -227,12 +223,10 @@
      * TODO This is probably too conservative - saving FP & LR.
      */
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME
-    // art::Runtime** xIP0 = &art::Runtime::instance_
-    adrp xIP0, :got:_ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
+    // art::Runtime* xIP0 = art::Runtime::instance_;
     // Our registers aren't intermixed - just spill in order.
-    ldr xIP0, [xIP0]  // art::Runtime* xIP0 = art::Runtime::instance_;
+    adrp xIP0, _ZN3art7Runtime9instance_E
+    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs];
     ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
@@ -331,11 +325,9 @@
     SAVE_TWO_REGS x25, x26, 464
     SAVE_TWO_REGS x27, x28, 480
 
-    // art::Runtime** xIP0 = &art::Runtime::instance_
-    adrp xIP0, :got:_ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
-
-    ldr xIP0, [xIP0]  // art::Runtime* xIP0 = art::Runtime::instance_;
+    // art::Runtime* xIP0 = art::Runtime::instance_;
+    adrp xIP0, _ZN3art7Runtime9instance_E
+    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything];
     ldr xIP0, [xIP0, \runtime_method_offset]
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 7e073b2..960aea1 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,53 +33,53 @@
   }
   virtual ~MipsContext() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(T9, new_pc);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(A0, new_arg0_value);
   }
 
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 76bc639..ab5bb3c 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -28,7 +28,7 @@
 using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
 
 // Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class MipsInstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
   static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -50,15 +50,15 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static MipsFeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kMips;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Is this an ISA revision greater than 2 opening up new opcodes.
   bool IsMipsIsaRevGreaterThanEqual2() const {
@@ -87,7 +87,7 @@
   // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index b2a6138..857abfd 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -33,53 +33,53 @@
   }
   virtual ~Mips64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(T9, new_pc);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(A0, new_arg0_value);
   }
 
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index 27e544e..e204d9d 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -25,7 +25,7 @@
 using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
 
 // Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
   static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
@@ -48,15 +48,15 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static Mips64FeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kMips64;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Does it have MSA (MIPS SIMD Architecture) support.
   bool HasMsa() const {
@@ -69,7 +69,7 @@
   // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index b0c0e43..e8df90e 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -37,7 +37,7 @@
 class StubTest : public CommonRuntimeTest {
  protected:
   // We need callee-save methods set up in the Runtime for exceptions.
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // Do the normal setup.
     CommonRuntimeTest::SetUp();
 
@@ -54,7 +54,7 @@
     }
   }
 
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use a smaller heap
     for (std::pair<std::string, const void*>& pair : *options) {
       if (pair.first.find("-Xmx") == 0) {
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 0ebb22b..5b438c3 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -26,62 +26,62 @@
 namespace art {
 namespace x86 {
 
-class X86Context FINAL : public Context {
+class X86Context final : public Context {
  public:
   X86Context() {
     Reset();
   }
   virtual ~X86Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(ESP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     eip_ = new_pc;
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(EAX, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2..6bd6263 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -49,17 +49,17 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+  bool HasAtLeast(const InstructionSetFeatures* other) const override;
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   virtual ~X86InstructionSetFeatures() {}
 
@@ -69,9 +69,9 @@
 
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
-  virtual std::unique_ptr<const InstructionSetFeatures>
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE {
+                                 std::string* error_msg) const override {
     return AddFeaturesFromSplitString(features, false, error_msg);
   }
 
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d242693..ab38614 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -26,62 +26,62 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64Context FINAL : public Context {
+class X86_64Context final : public Context {
  public:
   X86_64Context() {
     Reset();
   }
   virtual ~X86_64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(RSP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     rip_ = new_pc;
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(RDI, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index e76490b..76258fa 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -25,7 +25,7 @@
 using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
 
 // Instruction set features relevant to the X86_64 architecture.
-class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+class X86_64InstructionSetFeatures final : public X86InstructionSetFeatures {
  public:
   // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
   static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
@@ -59,7 +59,7 @@
     return Convert(X86InstructionSetFeatures::FromAssembly(true));
   }
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86_64;
   }
 
@@ -69,7 +69,7 @@
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE {
+                                 std::string* error_msg) const override {
     return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg);
   }
 
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 123595c..5afd000 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -40,7 +40,7 @@
 class String;
 }  // namespace mirror
 
-class ArtField FINAL {
+class ArtField final {
  public:
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce08cb0..48ddc69 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -66,7 +66,7 @@
 using MethodDexCacheType = std::atomic<MethodDexCachePair>;
 }  // namespace mirror
 
-class ArtMethod FINAL {
+class ArtMethod final {
  public:
   // Should the class state be checked on sensitive operations?
   DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index a9fbafe..851c23f 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -31,11 +31,11 @@
 
 namespace art {
 
-class MemMapArena FINAL : public Arena {
+class MemMapArena final : public Arena {
  public:
   MemMapArena(size_t size, bool low_4gb, const char* name);
   virtual ~MemMapArena();
-  void Release() OVERRIDE;
+  void Release() override;
 
  private:
   static MemMap Allocate(size_t size, bool low_4gb, const char* name);
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
index 24e150e..e98ef07 100644
--- a/runtime/base/mem_map_arena_pool.h
+++ b/runtime/base/mem_map_arena_pool.h
@@ -21,17 +21,17 @@
 
 namespace art {
 
-class MemMapArenaPool FINAL : public ArenaPool {
+class MemMapArenaPool final : public ArenaPool {
  public:
   explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
   virtual ~MemMapArenaPool();
-  Arena* AllocArena(size_t size) OVERRIDE;
-  void FreeArenaChain(Arena* first) OVERRIDE;
-  size_t GetBytesAllocated() const OVERRIDE;
-  void ReclaimMemory() OVERRIDE;
-  void LockReclaimMemory() OVERRIDE;
+  Arena* AllocArena(size_t size) override;
+  void FreeArenaChain(Arena* first) override;
+  size_t GetBytesAllocated() const override;
+  void ReclaimMemory() override;
+  void LockReclaimMemory() override;
   // Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
-  void TrimMaps() OVERRIDE;
+  void TrimMaps() override;
 
  private:
   const bool low_4gb_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 044c4c2..28b2912 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -125,7 +125,7 @@
   }
 }
 
-class ScopedAllMutexesLock FINAL {
+class ScopedAllMutexesLock final {
  public:
   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
@@ -144,7 +144,7 @@
   const BaseMutex* const mutex_;
 };
 
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
  public:
   explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
@@ -166,7 +166,7 @@
 };
 
 // Scoped class that generates events at the beginning and end of lock contention.
-class ScopedContentionRecorder FINAL : public ValueObject {
+class ScopedContentionRecorder final : public ValueObject {
  public:
   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
       : mutex_(kLogLockContentions ? mutex : nullptr),
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index fba209a..d127d0f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -297,7 +297,7 @@
   // For negative capabilities in clang annotations.
   const Mutex& operator!() const { return *this; }
 
-  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+  void WakeupToRespondToEmptyCheckpoint() override;
 
  private:
 #if ART_USE_FUTEXES
@@ -418,7 +418,7 @@
   // For negative capabilities in clang annotations.
   const ReaderWriterMutex& operator!() const { return *this; }
 
-  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+  void WakeupToRespondToEmptyCheckpoint() override;
 
  private:
 #if ART_USE_FUTEXES
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ce84e8c..3ea920d 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -181,7 +181,7 @@
 // headers, sets the should_deoptimize flag on stack to 1.
 // TODO: also set the register value to 1 when should_deoptimize is allocated in
 // a register.
-class CHAStackVisitor FINAL  : public StackVisitor {
+class CHAStackVisitor final  : public StackVisitor {
  public:
   CHAStackVisitor(Thread* thread_in,
                   Context* context,
@@ -190,7 +190,7 @@
         method_headers_(method_headers) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     // Avoid types of methods that do not have an oat quick method header.
     if (method == nullptr ||
@@ -245,13 +245,13 @@
   DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
 };
 
-class CHACheckpoint FINAL : public Closure {
+class CHACheckpoint final : public Closure {
  public:
   explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
       : barrier_(0),
         method_headers_(method_headers) {}
 
-  void Run(Thread* thread) OVERRIDE {
+  void Run(Thread* thread) override {
     // Note thread and self may not be equal if thread was already suspended at
     // the point of the request.
     Thread* self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bc28a66..ccb42c4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -884,7 +884,7 @@
   explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
     : image_pointer_size_(image_pointer_size) {}
 
-  void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kIsDebugBuild && !method->IsRuntimeMethod()) {
       CHECK(method->GetDeclaringClass() != nullptr);
     }
@@ -1390,7 +1390,7 @@
 
 // Helper class for ArtMethod checks when adding an image. Keeps all required functionality
 // together and caches some intermediate results.
-class ImageSanityChecks FINAL {
+class ImageSanityChecks final {
  public:
   static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1951,7 +1951,7 @@
         done_(false) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (!done_ && class_table != nullptr) {
       DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
@@ -1972,7 +1972,7 @@
                                      ClassVisitor* visitor)
         : defining_class_loader_(defining_class_loader), visitor_(visitor) { }
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (klass->GetClassLoader() != defining_class_loader_) {
         return true;
       }
@@ -2009,7 +2009,7 @@
 
 class GetClassesInToVector : public ClassVisitor {
  public:
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE {
+  bool operator()(ObjPtr<mirror::Class> klass) override {
     classes_.push_back(klass);
     return true;
   }
@@ -2021,7 +2021,7 @@
   explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
       : arr_(arr), index_(0) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ++index_;
     if (index_ <= arr_->GetLength()) {
       arr_->Set(index_ - 1, klass);
@@ -3845,7 +3845,7 @@
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES(Locks::classlinker_classes_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (class_table != nullptr) {
       class_table->FreezeSnapshot();
@@ -3871,7 +3871,7 @@
        result_(result) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
     // Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
@@ -5564,7 +5564,7 @@
 // Comparator for name and signature of a method, used in finding overriding methods. Implementation
 // avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
 // caches in the implementation below.
-class MethodNameAndSignatureComparator FINAL : public ValueObject {
+class MethodNameAndSignatureComparator final : public ValueObject {
  public:
   explicit MethodNameAndSignatureComparator(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_) :
@@ -8556,7 +8556,7 @@
   CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (class_table != nullptr) {
       num_zygote_classes += class_table->NumZygoteClasses(class_loader);
@@ -8826,7 +8826,7 @@
         extra_stats_(),
         last_extra_stats_(extra_stats_.end()) { }
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!klass->IsProxyClass() &&
         !klass->IsArrayClass() &&
         klass->IsResolved() &&
@@ -8914,7 +8914,7 @@
       : method_(method),
         pointer_size_(pointer_size) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) override {
     if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
       holder_ = klass;
     }
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e40f1db..ab7182a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -442,7 +442,7 @@
 
   class TestRootVisitor : public SingleRootVisitor {
    public:
-    void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+    void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
       EXPECT_TRUE(root != nullptr);
     }
   };
@@ -450,7 +450,7 @@
 
 class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
  protected:
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonRuntimeTest::SetUpRuntimeOptions(options);
   }
 };
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 234b66a..a5157df 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@
   virtual ~CommonRuntimeTestBase() {}
 
  protected:
-  virtual void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTestImpl::SetUp();
   }
 
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     CommonRuntimeTestImpl::TearDown();
   }
 };
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 60975b0..012ebcb 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -25,7 +25,7 @@
 
 namespace art {
 
-class CompilerFilter FINAL {
+class CompilerFilter final {
  public:
   // Note: Order here matters. Later filter choices are considered "as good
   // as" earlier filter choices.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e607b31..366b5ec 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -138,7 +138,7 @@
   return os;
 }
 
-class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
  public:
   DebugInstrumentationListener() {}
   virtual ~DebugInstrumentationListener() {}
@@ -147,7 +147,7 @@
                      Handle<mirror::Object> this_object,
                      ArtMethod* method,
                      uint32_t dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -176,7 +176,7 @@
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -195,7 +195,7 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     // We're not recorded to listen to this kind of event, so complain.
     LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc;
@@ -205,7 +205,7 @@
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
       // We also listen to kMethodExited instrumentation event and the current instruction is a
       // RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -229,7 +229,7 @@
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
   }
 
@@ -239,19 +239,19 @@
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
   }
 
   void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
                        Handle<mirror::Throwable> exception_object)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostException(exception_object.Get());
   }
 
   // We only care about branches in the Jit.
   void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc << ", " << dex_pc_offset;
   }
@@ -262,20 +262,20 @@
                                 ArtMethod* method,
                                 uint32_t dex_pc,
                                 ArtMethod* target ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc;
   }
 
   // TODO Might be worth it to post ExceptionCatch event.
   void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
-                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE {
+                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
     LOG(ERROR) << "Unexpected exception handled event in debugger";
   }
 
   // TODO Might be worth it to implement this.
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
-                       const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE {
+                       const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
     LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
   }
 
@@ -1087,7 +1087,7 @@
  public:
   explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
 
-  bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!c->IsPrimitive()) {
       classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
     }
@@ -2450,7 +2450,7 @@
       expandBufAdd4BE(buf_, frame_count_);
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (GetMethod()->IsRuntimeMethod()) {
         return true;  // The debugger can't do anything useful with a frame that has no Method*.
       }
@@ -2608,7 +2608,7 @@
 }
 
 // Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor FINAL : public StackVisitor {
+class FindFrameVisitor final : public StackVisitor {
  public:
   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -3040,7 +3040,7 @@
       throw_dex_pc_(dex::kDexNoIndex) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     DCHECK(method != nullptr);
     if (method->IsRuntimeMethod()) {
@@ -3693,7 +3693,7 @@
     : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
       needs_deoptimization_(false) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     // The visitor is meant to be used when handling exception from compiled code only.
     CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
                             << ArtMethod::PrettyMethod(GetMethod());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e1de991..33444f8 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -54,20 +54,20 @@
 class Thread;
 
 struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
-  bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 struct DebuggerDdmCallback : public DdmCallback {
   void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+      override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 struct InternalDebuggerControlCallback : public DebuggerControlCallback {
-  void StartDebugger() OVERRIDE;
-  void StopDebugger() OVERRIDE;
-  bool IsDebuggerConfigured() OVERRIDE;
+  void StartDebugger() override;
+  void StopDebugger() override;
+  bool IsDebuggerConfigured() override;
 };
 
 /*
@@ -831,15 +831,15 @@
 
   class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
    public:
-    void ThreadStart(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-    void ThreadDeath(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
+    void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
   };
 
   class DbgClassLoadCallback : public ClassLoadCallback {
    public:
-    void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
     void ClassPrepare(Handle<mirror::Class> temp_klass,
-                      Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+                      Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
   };
 
   static DbgThreadLifecycleCallback thread_lifecycle_callback_;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 00a95cc..2cbf557 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@
 // Test class that provides some helpers to set a test up for compilation using dex2oat.
 class Dex2oatEnvironmentTest : public CommonRuntimeTest {
  public:
-  virtual void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
     const ArtDexFileLoader dex_file_loader;
 
@@ -106,7 +106,7 @@
     ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
   }
 
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     // options->push_back(std::make_pair("-verbose:oat", nullptr));
 
     // Set up the image location.
@@ -117,7 +117,7 @@
     callbacks_.reset();
   }
 
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     ClearDirectory(odex_dir_.c_str());
     ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
 
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 9e3159d..93af77f 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -20,6 +20,8 @@
 #include <backtrace/BacktraceMap.h>
 #include <gtest/gtest.h>
 
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 #include "base/file_utils.h"
 #include "base/mem_map.h"
 #include "common_runtime_test.h"
@@ -27,6 +29,7 @@
 #include "dex2oat_environment_test.h"
 #include "dexopt_test.h"
 #include "gc/space/image_space.h"
+#include "hidden_api.h"
 
 namespace art {
 void DexoptTest::SetUp() {
@@ -45,6 +48,46 @@
   ReserveImageSpace();
 }
 
+static std::string ImageLocation() {
+  Runtime* runtime = Runtime::Current();
+  const std::vector<gc::space::ImageSpace*>& image_spaces =
+      runtime->GetHeap()->GetBootImageSpaces();
+  if (image_spaces.empty()) {
+    return "";
+  }
+  return image_spaces[0]->GetImageLocation();
+}
+
+bool DexoptTest::Dex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
+  Runtime* runtime = Runtime::Current();
+
+  std::vector<std::string> argv;
+  argv.push_back(runtime->GetCompilerExecutable());
+  if (runtime->IsJavaDebuggable()) {
+    argv.push_back("--debuggable");
+  }
+  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+  if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
+    argv.push_back("--runtime-arg");
+    argv.push_back("-Xhidden-api-checks");
+  }
+
+  if (!kIsTargetBuild) {
+    argv.push_back("--host");
+  }
+
+  argv.push_back("--boot-image=" + ImageLocation());
+
+  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+  argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+  argv.insert(argv.end(), args.begin(), args.end());
+
+  std::string command_line(android::base::Join(argv, ' '));
+  return Exec(argv, error_msg);
+}
+
 void DexoptTest::GenerateOatForTest(const std::string& dex_location,
                                     const std::string& oat_location_in,
                                     CompilerFilter::Filter filter,
@@ -96,7 +139,7 @@
   }
 
   std::string error_msg;
-  ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
 
   if (!relocate) {
     // Restore the dalvik cache if needed.
@@ -108,11 +151,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    oat_location.c_str(),
                                                    oat_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
   EXPECT_EQ(pic, odex_file->IsPic());
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 3203ee5..5dff379 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@
 
 class DexoptTest : public Dex2oatEnvironmentTest {
  public:
-  virtual void SetUp() OVERRIDE;
+  void SetUp() override;
 
   virtual void PreRuntimeCreate();
 
-  virtual void PostRuntimeCreate() OVERRIDE;
+  void PostRuntimeCreate() override;
 
   // Generate an oat file for the purposes of test.
   // The oat file will be generated for dex_location in the given oat_location
@@ -71,6 +71,8 @@
   // Generate a standard oat file in the oat location.
   void GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter);
 
+  static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
+
  private:
   // Pre-Relocate the image to a known non-zero offset so we don't have to
   // deal with the runtime randomly relocating the image by 0 and messing up
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index d45a689..e7715c4 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -37,9 +37,7 @@
 using android::base::StringPrintf;
 
 template <typename ElfTypes>
-ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
-                                   bool program_header_only,
-                                   uint8_t* requested_base)
+ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable, bool program_header_only)
   : writable_(writable),
     program_header_only_(program_header_only),
     header_(nullptr),
@@ -54,8 +52,7 @@
     dynstr_section_start_(nullptr),
     hash_section_start_(nullptr),
     symtab_symbol_table_(nullptr),
-    dynsym_symbol_table_(nullptr),
-    requested_base_(requested_base) {
+    dynsym_symbol_table_(nullptr) {
   CHECK(file != nullptr);
 }
 
@@ -64,10 +61,9 @@
                                                    bool writable,
                                                    bool program_header_only,
                                                    bool low_4gb,
-                                                   std::string* error_msg,
-                                                   uint8_t* requested_base) {
-  std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
-      (file, writable, program_header_only, requested_base));
+                                                   std::string* error_msg) {
+  std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
+      new ElfFileImpl<ElfTypes>(file, writable, program_header_only));
   int prot;
   int flags;
   if (writable) {
@@ -89,9 +85,8 @@
                                                    int flags,
                                                    bool low_4gb,
                                                    std::string* error_msg) {
-  std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
-      (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
-      /*requested_base*/nullptr));
+  std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
+      new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
   if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
     return nullptr;
   }
@@ -684,9 +679,7 @@
 typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
   CHECK_LT(i, GetProgramHeaderNum()) << file_path_;  // Sanity check for caller.
   uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
-  if (program_header >= End()) {
-    return nullptr;  // Failure condition.
-  }
+  CHECK_LT(program_header, End());
   return reinterpret_cast<Elf_Phdr*>(program_header);
 }
 
@@ -1027,9 +1020,17 @@
   return *(GetRelaSectionStart(section_header) + i);
 }
 
-// Base on bionic phdr_table_get_load_size
 template <typename ElfTypes>
 bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) const {
+  uint8_t* vaddr_begin;
+  return GetLoadedAddressRange(&vaddr_begin, size, error_msg);
+}
+
+// Base on bionic phdr_table_get_load_size
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::GetLoadedAddressRange(/*out*/uint8_t** vaddr_begin,
+                                                  /*out*/size_t* vaddr_size,
+                                                  /*out*/std::string* error_msg) const {
   Elf_Addr min_vaddr = static_cast<Elf_Addr>(-1);
   Elf_Addr max_vaddr = 0u;
   for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
@@ -1048,7 +1049,8 @@
           << program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr
           << " in ELF file \"" << file_path_ << "\"";
       *error_msg = oss.str();
-      *size = static_cast<size_t>(-1);
+      *vaddr_begin = nullptr;
+      *vaddr_size = static_cast<size_t>(-1);
       return false;
     }
     if (end_vaddr > max_vaddr) {
@@ -1058,17 +1060,19 @@
   min_vaddr = RoundDown(min_vaddr, kPageSize);
   max_vaddr = RoundUp(max_vaddr, kPageSize);
   CHECK_LT(min_vaddr, max_vaddr) << file_path_;
-  Elf_Addr loaded_size = max_vaddr - min_vaddr;
-  // Check that the loaded_size fits in size_t.
-  if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) {
+  // Check that the range fits into the runtime address space.
+  if (UNLIKELY(max_vaddr - 1u > std::numeric_limits<size_t>::max())) {
     std::ostringstream oss;
-    oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x"
-        << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_path_ << "\"";
+    oss << "Loaded range is 0x" << std::hex << min_vaddr << "-0x" << max_vaddr
+        << " but maximum size_t is 0x" << std::numeric_limits<size_t>::max()
+        << " for ELF file \"" << file_path_ << "\"";
     *error_msg = oss.str();
-    *size = static_cast<size_t>(-1);
+    *vaddr_begin = nullptr;
+    *vaddr_size = static_cast<size_t>(-1);
     return false;
   }
-  *size = loaded_size;
+  *vaddr_begin = reinterpret_cast<uint8_t*>(min_vaddr);
+  *vaddr_size = dchecked_integral_cast<size_t>(max_vaddr - min_vaddr);
   return true;
 }
 
@@ -1099,7 +1103,8 @@
 bool ElfFileImpl<ElfTypes>::Load(File* file,
                                  bool executable,
                                  bool low_4gb,
-                                 std::string* error_msg) {
+                                 /*inout*/MemMap* reservation,
+                                 /*out*/std::string* error_msg) {
   CHECK(program_header_only_) << file->GetPath();
 
   if (executable) {
@@ -1115,11 +1120,6 @@
   bool reserved = false;
   for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf_Phdr* program_header = GetProgramHeader(i);
-    if (program_header == nullptr) {
-      *error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
-                                i, file->GetPath().c_str());
-      return false;
-    }
 
     // Record .dynamic header information for later use
     if (program_header->p_type == PT_DYNAMIC) {
@@ -1150,41 +1150,39 @@
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
     if (!reserved) {
-      uint8_t* reserve_base = reinterpret_cast<uint8_t*>(program_header->p_vaddr);
-      uint8_t* reserve_base_override = reserve_base;
-      // Override the base (e.g. when compiling with --compile-pic)
-      if (requested_base_ != nullptr) {
-        reserve_base_override = requested_base_;
-      }
-      std::string reservation_name("ElfFile reservation for ");
-      reservation_name += file->GetPath();
-      size_t loaded_size;
-      if (!GetLoadedSize(&loaded_size, error_msg)) {
+      uint8_t* vaddr_begin;
+      size_t vaddr_size;
+      if (!GetLoadedAddressRange(&vaddr_begin, &vaddr_size, error_msg)) {
         DCHECK(!error_msg->empty());
         return false;
       }
-      MemMap reserve = MemMap::MapAnonymous(reservation_name.c_str(),
-                                            reserve_base_override,
-                                            loaded_size,
-                                            PROT_NONE,
-                                            low_4gb,
-                                            error_msg);
-      if (!reserve.IsValid()) {
+      std::string reservation_name = "ElfFile reservation for " + file->GetPath();
+      MemMap local_reservation = MemMap::MapAnonymous(
+          reservation_name.c_str(),
+          (reservation != nullptr) ? reservation->Begin() : nullptr,
+          vaddr_size,
+          PROT_NONE,
+          low_4gb,
+          /* reuse */ false,
+          reservation,
+          error_msg);
+      if (!local_reservation.IsValid()) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
-                                  reservation_name.c_str(), error_msg->c_str());
+                                  reservation_name.c_str(),
+                                  error_msg->c_str());
         return false;
       }
       reserved = true;
 
-      // Base address is the difference of actual mapped location and the p_vaddr
-      base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve.Begin())
-                       - reinterpret_cast<uintptr_t>(reserve_base));
+      // Base address is the difference of actual mapped location and the vaddr_begin.
+      base_address_ = reinterpret_cast<uint8_t*>(
+          static_cast<uintptr_t>(local_reservation.Begin() - vaddr_begin));
       // By adding the p_vaddr of a section/symbol to base_address_ we will always get the
       // dynamic memory address of where that object is actually mapped
       //
       // TODO: base_address_ needs to be calculated in ::Open, otherwise
       // FindDynamicSymbolAddress returns the wrong values until Load is called.
-      segments_.push_back(std::move(reserve));
+      segments_.push_back(std::move(local_reservation));
     }
     // empty segment, nothing to map
     if (program_header->p_memsz == 0) {
@@ -1239,9 +1237,10 @@
                                    flags,
                                    file->Fd(),
                                    program_header->p_offset,
-                                   /*low4_gb*/false,
-                                   /*reuse*/true,  // implies MAP_FIXED
+                                   /* low4_gb */ false,
                                    file->GetPath().c_str(),
+                                   /* reuse */ true,  // implies MAP_FIXED
+                                   /* reservation */ nullptr,
                                    error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
@@ -1265,6 +1264,7 @@
                                             prot,
                                             /* low_4gb */ false,
                                             /* reuse */ true,
+                                            /* reservation */ nullptr,
                                             error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
@@ -1704,8 +1704,7 @@
                        bool writable,
                        bool program_header_only,
                        bool low_4gb,
-                       std::string* error_msg,
-                       uint8_t* requested_base) {
+                       /*out*/std::string* error_msg) {
   if (file->GetLength() < EI_NIDENT) {
     *error_msg = StringPrintf("File %s is too short to be a valid ELF file",
                               file->GetPath().c_str());
@@ -1728,8 +1727,7 @@
                                                        writable,
                                                        program_header_only,
                                                        low_4gb,
-                                                       error_msg,
-                                                       requested_base);
+                                                       error_msg);
     if (elf_file_impl == nullptr) {
       return nullptr;
     }
@@ -1739,8 +1737,7 @@
                                                        writable,
                                                        program_header_only,
                                                        low_4gb,
-                                                       error_msg,
-                                                       requested_base);
+                                                       error_msg);
     if (elf_file_impl == nullptr) {
       return nullptr;
     }
@@ -1754,7 +1751,7 @@
   }
 }
 
-ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg) {
+ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, /*out*/std::string* error_msg) {
   // low_4gb support not required for this path.
   constexpr bool low_4gb = false;
   if (file->GetLength() < EI_NIDENT) {
@@ -1811,8 +1808,12 @@
     return elf32_->func(__VA_ARGS__); \
   }
 
-bool ElfFile::Load(File* file, bool executable, bool low_4gb, std::string* error_msg) {
-  DELEGATE_TO_IMPL(Load, file, executable, low_4gb, error_msg);
+bool ElfFile::Load(File* file,
+                   bool executable,
+                   bool low_4gb,
+                   /*inout*/MemMap* reservation,
+                   /*out*/std::string* error_msg) {
+  DELEGATE_TO_IMPL(Load, file, executable, low_4gb, reservation, error_msg);
 }
 
 const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index ab9e6fa..8da7e1a 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -26,6 +26,9 @@
 #include "./elf.h"
 
 namespace art {
+
+class MemMap;
+
 template <typename ElfTypes>
 class ElfFileImpl;
 
@@ -42,18 +45,21 @@
                        bool writable,
                        bool program_header_only,
                        bool low_4gb,
-                       std::string* error_msg,
-                       uint8_t* requested_base = nullptr);  // TODO: move arg to before error_msg.
+                       /*out*/std::string* error_msg);
   // Open with specific mmap flags, Always maps in the whole file, not just the
   // program header sections.
   static ElfFile* Open(File* file,
                        int mmap_prot,
                        int mmap_flags,
-                       std::string* error_msg);
+                       /*out*/std::string* error_msg);
   ~ElfFile();
 
   // Load segments into memory based on PT_LOAD program headers
-  bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
+  bool Load(File* file,
+            bool executable,
+            bool low_4gb,
+            /*inout*/MemMap* reservation,
+            /*out*/std::string* error_msg);
 
   const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
 
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 58c38a4..b55b60f 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -48,13 +48,12 @@
                            bool writable,
                            bool program_header_only,
                            bool low_4gb,
-                           std::string* error_msg,
-                           uint8_t* requested_base = nullptr);
+                           /*out*/std::string* error_msg);
   static ElfFileImpl* Open(File* file,
                            int mmap_prot,
                            int mmap_flags,
                            bool low_4gb,
-                           std::string* error_msg);
+                           /*out*/std::string* error_msg);
   ~ElfFileImpl();
 
   const std::string& GetFilePath() const {
@@ -115,7 +114,11 @@
 
   // Load segments into memory based on PT_LOAD program headers.
   // executable is true at run time, false at compile time.
-  bool Load(File* file, bool executable, bool low_4gb, std::string* error_msg);
+  bool Load(File* file,
+            bool executable,
+            bool low_4gb,
+            /*inout*/MemMap* reservation,
+            /*out*/std::string* error_msg);
 
   bool Fixup(Elf_Addr base_address);
   bool FixupDynamic(Elf_Addr base_address);
@@ -131,7 +134,11 @@
   bool Strip(File* file, std::string* error_msg);
 
  private:
-  ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
+  ElfFileImpl(File* file, bool writable, bool program_header_only);
+
+  bool GetLoadedAddressRange(/*out*/uint8_t** vaddr_begin,
+                             /*out*/size_t* vaddr_size,
+                             /*out*/std::string* error_msg) const;
 
   bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
 
@@ -217,9 +224,6 @@
   SymbolTable* symtab_symbol_table_;
   SymbolTable* dynsym_symbol_table_;
 
-  // Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
-  uint8_t* requested_base_;
-
   DISALLOW_COPY_AND_ASSIGN(ElfFileImpl);
 };
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aca169b..fccfce4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -615,13 +615,13 @@
 }
 
 // Visits arguments on the stack placing them into the shadow frame.
-class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
  public:
   BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
                                uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
  private:
   ShadowFrame* const sf_;
@@ -707,7 +707,7 @@
       explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
           : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-      bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
         // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
         // logic. Just always say we want to continue.
         return true;
@@ -824,13 +824,13 @@
 
 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
 // to jobjects.
-class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
  public:
   BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
  private:
   ScopedObjectAccessUnchecked* const soa_;
@@ -959,7 +959,7 @@
 
 // Visitor returning a reference argument at a given position in a Quick stack frame.
 // NOTE: Only used for testing purposes.
-class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
  public:
   GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
                                      const char* shorty,
@@ -972,7 +972,7 @@
           CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
         }
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
     if (cur_pos_ == arg_pos_) {
       Primitive::Type type = GetParamPrimitiveType();
       CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
@@ -1014,7 +1014,7 @@
 }
 
 // Visitor returning all the reference arguments in a Quick stack frame.
-class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
  public:
   GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
                                     bool is_static,
@@ -1022,7 +1022,7 @@
                                     uint32_t shorty_len)
       : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
     Primitive::Type type = GetParamPrimitiveType();
     if (type == Primitive::kPrimNot) {
       StackReference<mirror::Object>* ref_arg =
@@ -1059,13 +1059,13 @@
 
 // Read object references held in arguments from quick frames and place in a JNI local references,
 // so they don't get garbage collected.
-class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
  public:
   RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
                                uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1957,7 +1957,7 @@
   uint32_t num_stack_entries_;
 };
 
-class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
+class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
  public:
   explicit ComputeGenericJniFrameSize(bool critical_native)
     : num_handle_scope_references_(0), critical_native_(critical_native) {}
@@ -2038,10 +2038,10 @@
     return sp8;
   }
 
-  uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
+  uintptr_t PushHandle(mirror::Object* /* ptr */) override;
 
   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
-  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
+  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -2117,7 +2117,7 @@
 
 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
 // of transitioning into native code.
-class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
  public:
   BuildGenericJniFrameVisitor(Thread* self,
                               bool is_static,
@@ -2150,7 +2150,7 @@
     }
   }
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -2168,7 +2168,7 @@
 
  private:
   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
-  class FillJniCall FINAL : public FillNativeCall {
+  class FillJniCall final : public FillNativeCall {
    public:
     FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
                 HandleScope* handle_scope, bool critical_native)
@@ -2177,7 +2177,7 @@
         cur_entry_(0),
         critical_native_(critical_native) {}
 
-    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
 
     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 89694e3..0f0fb69 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -26,7 +26,7 @@
 
 class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use 64-bit ISA for runtime setup to make method size potentially larger
     // than necessary (rather than smaller) during CreateCalleeSaveMethod
     options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -35,7 +35,7 @@
   // Do not do any of the finalization. We don't want to run any code, we don't need the heap
   // prepared, it actually will be a problem with setting the instruction set to x86_64 in
   // SetUpRuntimeOptions.
-  void FinalizeSetup() OVERRIDE {
+  void FinalizeSetup() override {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
 
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 3e2664c..02eeefe 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -90,11 +90,11 @@
   DISALLOW_COPY_AND_ASSIGN(FaultHandler);
 };
 
-class NullPointerHandler FINAL : public FaultHandler {
+class NullPointerHandler final : public FaultHandler {
  public:
   explicit NullPointerHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
   static bool IsValidImplicitCheck(siginfo_t* siginfo) {
     // Our implicit NPE checks always limit the range to a page.
@@ -108,31 +108,31 @@
   DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
 };
 
-class SuspensionHandler FINAL : public FaultHandler {
+class SuspensionHandler final : public FaultHandler {
  public:
   explicit SuspensionHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(SuspensionHandler);
 };
 
-class StackOverflowHandler FINAL : public FaultHandler {
+class StackOverflowHandler final : public FaultHandler {
  public:
   explicit StackOverflowHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(StackOverflowHandler);
 };
 
-class JavaStackTraceHandler FINAL : public FaultHandler {
+class JavaStackTraceHandler final : public FaultHandler {
  public:
   explicit JavaStackTraceHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override NO_THREAD_SAFETY_ANALYSIS;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(JavaStackTraceHandler);
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634..f0a82e0 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@
                                            space::ContinuousSpace* space)
       : ModUnionTableReferenceCache(name, heap, space) {}
 
-  bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+  bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
     return !space_->HasAddress(ref);
   }
 };
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd..40dc6e1 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@
 
 class EmptyMarkObjectVisitor : public MarkObjectVisitor {
  public:
-  mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
-  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+  mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
 };
 
 void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a..8c471bc 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@
   virtual ~ModUnionTableReferenceCache() {}
 
   // Clear and store cards for a space.
-  void ProcessCards() OVERRIDE;
+  void ProcessCards() override;
 
   // Update table based on cleared cards and mark all references to the other spaces.
-  void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+  void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_);
 
-  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+  void VisitObjects(ObjectCallback callback, void* arg) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
   // VisitMarkedRange can't know if the callback will modify the bitmap or not.
-  void Verify() OVERRIDE
+  void Verify() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_);
 
   // Function that tells whether or not to add a reference to the table.
   virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
 
-  virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+  bool ContainsCardFor(uintptr_t addr) override;
 
-  virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void SetCards() OVERRIDE;
+  void SetCards() override;
 
-  virtual void ClearTable() OVERRIDE;
+  void ClearTable() override;
 
  protected:
   // Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@
   virtual ~ModUnionTableCardCache() {}
 
   // Clear and store cards for a space.
-  virtual void ProcessCards() OVERRIDE;
+  void ProcessCards() override;
 
   // Mark all references to the alloc space(s).
-  virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+  void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+  void VisitObjects(ObjectCallback callback, void* arg) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Nothing to verify.
-  virtual void Verify() OVERRIDE {}
+  void Verify() override {}
 
-  virtual void Dump(std::ostream& os) OVERRIDE;
+  void Dump(std::ostream& os) override;
 
-  virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+  bool ContainsCardFor(uintptr_t addr) override;
 
-  virtual void SetCards() OVERRIDE;
+  void SetCards() override;
 
-  virtual void ClearTable() OVERRIDE;
+  void ClearTable() override;
 
  protected:
   // Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71..2a382d7 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -97,13 +97,13 @@
 class CollectVisitedVisitor : public MarkObjectVisitor {
  public:
   explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                 bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                         bool do_atomic_update ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     MarkObject(ref->AsMirrorPtr());
   }
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+  mirror::Object* MarkObject(mirror::Object* obj) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     out_->insert(obj);
@@ -122,7 +122,7 @@
       space::ContinuousSpace* target_space)
       : ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
 
-  bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+  bool ShouldAddReference(const mirror::Object* ref) const override {
     return target_space_->HasAddress(ref);
   }
 
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d1986..b9c1dc6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@
 
   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
   // annotalysis.
-  bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
     if (trace_->GetDepth() >= max_depth_) {
       return false;
     }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7a5f79..b4453d9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -229,7 +229,7 @@
   explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
       : concurrent_copying_(concurrent_copying) {}
 
-  void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -250,7 +250,7 @@
   explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
       : concurrent_copying_(concurrent_copying) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -393,7 +393,7 @@
       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
     ConcurrentCopying* cc = concurrent_copying_;
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
@@ -890,6 +890,21 @@
       }
       // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
       // are from previous GCs and may reference things in the from space.
+      //
+      // Note that we do not need to process the large-object space (the only discontinuous space)
+      // as it contains only large string objects and large primitive array objects, that have no
+      // reference to other objects, except their class. There is no need to scan these large
+      // objects, as the String class and the primitive array classes are expected to never move
+      // during a minor (young-generation) collection:
+      // - In the case where we run with a boot image, these classes are part of the image space,
+      //   which is an immune space.
+      // - In the case where we run without a boot image, these classes are allocated in the region
+      //   space (main space), but they are not expected to move during a minor collection (this
+      //   would only happen if those classes were allocated between a major and a minor
+      //   collections, which is unlikely -- we don't expect any GC to happen before these
+      //   fundamental classes are initialized). Note that these classes could move during a major
+      //   collection though, but this is fine: in that case, the whole heap is traced and the card
+      //   table logic below is not used.
       Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
           space->GetMarkBitmap(),
           space->Begin(),
@@ -1072,7 +1087,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1096,7 +1111,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(concurrent_copying_->is_marking_);
@@ -1291,7 +1306,7 @@
   }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(root != nullptr);
     operator()(root);
   }
@@ -1457,7 +1472,7 @@
         disable_weak_ref_access_(disable_weak_ref_access) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1727,7 +1742,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a deadlock b/31500969.
     CHECK(concurrent_copying_->weak_ref_access_enabled_);
@@ -2333,10 +2348,13 @@
       // If `ref` is on the allocation stack, then it may not be
       // marked live, but considered marked/alive (but not
       // necessarily on the live stack).
-      CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack."
-                                 << " obj=" << obj
-                                 << " ref=" << ref
-                                 << " is_los=" << std::boolalpha << is_los << std::noboolalpha;
+      CHECK(IsOnAllocStack(ref))
+          << "Unmarked ref that's not on the allocation stack."
+          << " obj=" << obj
+          << " ref=" << ref
+          << " is_los=" << std::boolalpha << is_los << std::noboolalpha
+          << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
+          << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha;
     }
   }
 }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0ebe6f0..1a7464a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@
                              bool measure_read_barrier_slow_path = false);
   ~ConcurrentCopying();
 
-  virtual void RunPhases() OVERRIDE
+  void RunPhases() override
       REQUIRES(!immune_gray_stack_lock_,
                !mark_stack_lock_,
                !rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@
 
   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
         ? kGcTypeSticky
         : kGcTypePartial;
   }
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  CollectorType GetCollectorType() const override {
     return kCollectorTypeCC;
   }
-  virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+  void RevokeAllThreadLocalBuffers() override;
   void SetRegionSpace(space::RegionSpace* region_space) {
     DCHECK(region_space != nullptr);
     region_space_ = region_space;
@@ -144,7 +144,7 @@
   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
 
-  virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+  mirror::Object* IsMarked(mirror::Object* from_ref) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -166,21 +166,22 @@
   void Process(mirror::Object* obj, MemberOffset offset)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   template<bool kGrayImmuneObject>
   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
+                  const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
   accounting::ObjectStack* GetAllocationStack();
   accounting::ObjectStack* GetLiveStack();
-  virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+  void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -204,21 +205,21 @@
   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
-                                      ObjPtr<mirror::Reference> reference) OVERRIDE
+  void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+                              ObjPtr<mirror::Reference> reference) override
       REQUIRES_SHARED(Locks::mutator_lock_);
   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+  mirror::Object* MarkObject(mirror::Object* from_ref) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
-                                 bool do_atomic_update) OVERRIDE
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
+                         bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
-                                           bool do_atomic_update) OVERRIDE
+  bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                   bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SweepSystemWeaks(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
@@ -293,7 +294,7 @@
                                                       mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+  void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
   // Set the read barrier mark entrypoints to non-null.
   void ActivateReadBarrierEntrypoints();
 
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 145bd02..7bd87bd 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -41,13 +41,13 @@
 class DummyImageSpace : public space::ImageSpace {
  public:
   DummyImageSpace(MemMap&& map,
-                  accounting::ContinuousSpaceBitmap* live_bitmap,
+                  std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
                   std::unique_ptr<DummyOatFile>&& oat_file,
                   MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
                    /*image_location*/"",
                    std::move(map),
-                   live_bitmap,
+                   std::move(live_bitmap),
                    map.End()),
         oat_map_(std::move(oat_map)) {
     oat_file_ = std::move(oat_file);
@@ -130,7 +130,7 @@
         ImageHeader::kStorageModeUncompressed,
         /*storage_size*/0u);
     return new DummyImageSpace(std::move(map),
-                               live_bitmap.release(),
+                               std::move(live_bitmap),
                                std::move(oat_file),
                                std::move(oat_map));
   }
@@ -167,19 +167,19 @@
                         end,
                         /*limit*/end) {}
 
-  space::SpaceType GetType() const OVERRIDE {
+  space::SpaceType GetType() const override {
     return space::kSpaceTypeMallocSpace;
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return nullptr;
   }
 };
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 58a75ee..23b2719 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@
  public:
   explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
 
-  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+  void VisitRoot(mirror::Object* root, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
   }
@@ -607,7 +607,7 @@
  public:
   explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
 
-  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+  void VisitRoot(mirror::Object* root, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // See if the root is on any space bitmap.
     auto* heap = Runtime::Current()->GetHeap();
@@ -1109,8 +1109,7 @@
  public:
   explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  virtual mirror::Object* IsMarked(mirror::Object* obj)
-      OVERRIDE
+  mirror::Object* IsMarked(mirror::Object* obj) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     mark_sweep_->VerifyIsLive(obj);
     return obj;
@@ -1144,7 +1143,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1154,14 +1153,14 @@
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
     }
   }
 
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     ScopedTrace trace("Marking thread roots");
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index af2bb97..ff9597c 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@
 
   ~MarkSweep() {}
 
-  virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+  void RunPhases() override REQUIRES(!mark_stack_lock_);
   void InitializePhase();
   void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@
     return is_concurrent_;
   }
 
-  virtual GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return kGcTypeFull;
   }
 
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  CollectorType GetCollectorType() const override {
     return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
   }
 
@@ -187,25 +187,25 @@
   void VerifyIsLive(const mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
-  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                           bool do_atomic_update) OVERRIDE
+  bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                   bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
-                          size_t count,
-                          const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
+                  const RootInfo& info) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Marks an object.
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+  mirror::Object* MarkObject(mirror::Object* obj) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -215,8 +215,8 @@
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                 bool do_atomic_update) OVERRIDE
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                         bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@
 
  protected:
   // Returns object if the object is marked in the heap bitmap, otherwise null.
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void MarkObjectNonNull(mirror::Object* obj,
@@ -278,8 +278,7 @@
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void ProcessMarkStack()
-      OVERRIDE
+  void ProcessMarkStack() override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3dd..76c44a3 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@
 class PartialMarkSweep : public MarkSweep {
  public:
   // Virtual as overridden by StickyMarkSweep.
-  virtual GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return kGcTypePartial;
   }
 
@@ -37,7 +37,7 @@
   // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
   // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
   // StickyMarkSweep.
-  virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8..bb42be6 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@
 
   ~SemiSpace() {}
 
-  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
   virtual void InitializePhase();
   virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@
   virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
   void MarkReachableObjects()
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return kGcTypePartial;
   }
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  CollectorType GetCollectorType() const override {
     return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
   }
 
@@ -106,11 +106,11 @@
   void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+  mirror::Object* MarkObject(mirror::Object* root) override
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
-                                 bool do_atomic_update) OVERRIDE
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+                         bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void ScanObject(mirror::Object* obj)
@@ -145,11 +145,12 @@
   void SweepSystemWeaks()
       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
-  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
+                  const RootInfo& info) override
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +163,12 @@
  protected:
   // Returns null if the object is not marked, otherwise returns the forwarding address (same as
   // object for non movable things).
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
-  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
-                                           bool do_atomic_update) OVERRIDE
+  bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+                                   bool do_atomic_update) override
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f..f65413d 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,17 +24,16 @@
 namespace gc {
 namespace collector {
 
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
  public:
-  GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return kGcTypeSticky;
   }
 
   StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
   ~StickyMarkSweep() {}
 
-  virtual void MarkConcurrentRoots(VisitRootFlags flags)
-      OVERRIDE
+  void MarkConcurrentRoots(VisitRootFlags flags) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +41,15 @@
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
-  void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkReachableObjects()
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Sweep(bool swap_bitmaps)
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf06cf9..589e9a4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -312,39 +312,7 @@
   ChangeCollector(desired_collector_type_);
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
-  // Requested begin for the alloc space, to follow the mapped image and oat files
-  uint8_t* requested_alloc_space_begin = nullptr;
-  if (foreground_collector_type_ == kCollectorTypeCC) {
-    // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
-    // image (dex2oat for target).
-    requested_alloc_space_begin = kPreferredAllocSpaceBegin;
-  }
 
-  // Load image space(s).
-  std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
-  if (space::ImageSpace::LoadBootImage(image_file_name,
-                                       image_instruction_set,
-                                       &boot_image_spaces,
-                                       &requested_alloc_space_begin)) {
-    for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
-      boot_image_spaces_.push_back(space.get());
-      AddSpace(space.release());
-    }
-  }
-
-  /*
-  requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-  nonmoving space (non_moving_space_capacity)+-
-                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-????????????????????????????????????????????+-
-                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-main alloc space / bump space 1 (capacity_) +-
-                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-????????????????????????????????????????????+-
-                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-main alloc space2 / bump space 2 (capacity_)+-
-                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-  */
   // We don't have hspace compaction enabled with GSS or CC.
   if (foreground_collector_type_ == kCollectorTypeGSS ||
       foreground_collector_type_ == kCollectorTypeCC) {
@@ -363,21 +331,63 @@
   if (foreground_collector_type_ == kCollectorTypeGSS) {
     separate_non_moving_space = false;
   }
+
+  // Requested begin for the alloc space, to follow the mapped image and oat files
+  uint8_t* request_begin = nullptr;
+  // Calculate the extra space required after the boot image, see allocations below.
+  size_t heap_reservation_size = separate_non_moving_space
+      ? non_moving_space_capacity
+      : ((is_zygote && foreground_collector_type_ != kCollectorTypeCC) ? capacity_ : 0u);
+  heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
+  // Load image space(s).
+  std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
+  MemMap heap_reservation;
+  if (space::ImageSpace::LoadBootImage(image_file_name,
+                                       image_instruction_set,
+                                       heap_reservation_size,
+                                       &boot_image_spaces,
+                                       &heap_reservation)) {
+    DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
+    DCHECK(!boot_image_spaces.empty());
+    request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
+    DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
+        << "request_begin=" << static_cast<const void*>(request_begin)
+        << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
+    for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
+      boot_image_spaces_.push_back(space.get());
+      AddSpace(space.release());
+    }
+  } else {
+    if (foreground_collector_type_ == kCollectorTypeCC) {
+      // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
+      // when there's no image (dex2oat for target).
+      request_begin = kPreferredAllocSpaceBegin;
+    }
+    // Gross hack to make dex2oat deterministic.
+    if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
+      // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
+      // b/26849108
+      request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
+    }
+  }
+
+  /*
+  requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-  nonmoving space (non_moving_space_capacity)+-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-????????????????????????????????????????????+-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-main alloc space / bump space 1 (capacity_) +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-????????????????????????????????????????????+-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-main alloc space2 / bump space 2 (capacity_)+-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+  */
+
   MemMap main_mem_map_1;
   MemMap main_mem_map_2;
 
-  // Gross hack to make dex2oat deterministic.
-  if (foreground_collector_type_ == kCollectorTypeMS &&
-      requested_alloc_space_begin == nullptr &&
-      Runtime::Current()->IsAotCompiler()) {
-    // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
-    // b/26849108
-    requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
-  }
-  uint8_t* request_begin = requested_alloc_space_begin;
-  if (request_begin != nullptr && separate_non_moving_space) {
-    request_begin += non_moving_space_capacity;
-  }
   std::string error_str;
   MemMap non_moving_space_mem_map;
   if (separate_non_moving_space) {
@@ -388,9 +398,16 @@
     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
     // Reserve the non moving mem map before the other two since it needs to be at a specific
     // address.
-    non_moving_space_mem_map = MapAnonymousPreferredAddress(
-        space_name, requested_alloc_space_begin, non_moving_space_capacity, &error_str);
+    DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
+    if (heap_reservation.IsValid()) {
+      non_moving_space_mem_map = heap_reservation.RemapAtEnd(
+          heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
+    } else {
+      non_moving_space_mem_map = MapAnonymousPreferredAddress(
+          space_name, request_begin, non_moving_space_capacity, &error_str);
+    }
     CHECK(non_moving_space_mem_map.IsValid()) << error_str;
+    DCHECK(!heap_reservation.IsValid());
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
   }
@@ -404,14 +421,19 @@
       // If no separate non-moving space and we are the zygote, the main space must come right
       // after the image space to avoid a gap. This is required since we want the zygote space to
       // be adjacent to the image space.
-      main_mem_map_1 = MemMap::MapAnonymous(kMemMapSpaceName[0],
-                                            request_begin,
-                                            capacity_,
-                                            PROT_READ | PROT_WRITE,
-                                            /* low_4gb */ true,
-                                            &error_str);
+      DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
+      main_mem_map_1 = MemMap::MapAnonymous(
+          kMemMapSpaceName[0],
+          request_begin,
+          capacity_,
+          PROT_READ | PROT_WRITE,
+          /* low_4gb */ true,
+          /* reuse */ false,
+          heap_reservation.IsValid() ? &heap_reservation : nullptr,
+          &error_str);
     }
     CHECK(main_mem_map_1.IsValid()) << error_str;
+    DCHECK(!heap_reservation.IsValid());
   }
   if (support_homogeneous_space_compaction ||
       background_collector_type_ == kCollectorTypeSS ||
@@ -437,7 +459,7 @@
                                                                /* can_move_objects */ false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
-        << requested_alloc_space_begin;
+        << non_moving_space_mem_map.Begin();
     AddSpace(non_moving_space_);
   }
   // Create other spaces based on whether or not we have a moving GC.
@@ -1327,7 +1349,7 @@
  public:
   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
   }
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     thread->GetJniEnv()->TrimLocals();
     // If thread is a running mutator, then act on behalf of the trim thread.
     // See the code in ThreadList::RunCheckpoint.
@@ -2213,7 +2235,7 @@
 }
 
 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
  public:
   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
       : SemiSpace(heap, false, "zygote collector"),
@@ -2769,7 +2791,7 @@
   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (root == obj_) {
       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
     }
@@ -2826,7 +2848,7 @@
         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
   }
 
-  virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+  void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (root == nullptr) {
       LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3281,10 @@
 }
 
 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+  mirror::Object* MarkObject(mirror::Object* obj) override {
     return obj;
   }
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
   }
 };
 
@@ -3633,7 +3655,7 @@
  public:
   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
       : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
-  virtual void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->ConcurrentGC(self, cause_, force_full_);
     heap->ClearConcurrentGCRequest();
@@ -3691,7 +3713,7 @@
  public:
   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
 
-  virtual void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->DoPendingCollectorTransition();
     heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3755,7 @@
 class Heap::HeapTrimTask : public HeapTask {
  public:
   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
-  virtual void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->Trim(self);
     heap->ClearPendingTrim(self);
@@ -4176,7 +4198,7 @@
 class Heap::TriggerPostForkCCGcTask : public HeapTask {
  public:
   explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // Trigger a GC, if not already done. The first GC after fork, whenever
     // takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8720a3e..7cbad3b 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,7 +29,7 @@
 
 class HeapTest : public CommonRuntimeTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     MemMap::Init();
     std::string error_msg;
     // Reserve the preferred address to force the heap to use another one for testing.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9b31558..02e84b5 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@
 
 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
 // implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeBumpPointerSpace;
   }
 
@@ -51,27 +51,27 @@
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_);
+      override REQUIRES(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(size_t num_bytes);
   mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
 
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
 
   // NOPS unless we support free lists.
-  size_t Free(Thread*, mirror::Object*) OVERRIDE {
+  size_t Free(Thread*, mirror::Object*) override {
     return 0;
   }
 
-  size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+  size_t FreeList(Thread*, size_t, mirror::Object**) override {
     return 0;
   }
 
@@ -94,16 +94,16 @@
     return GetMemMap()->Size();
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return nullptr;
   }
 
   // Reset the space to empty.
-  void Clear() OVERRIDE REQUIRES(!block_lock_);
+  void Clear() override REQUIRES(!block_lock_);
 
   void Dump(std::ostream& os) const;
 
@@ -122,7 +122,7 @@
     return Begin() == End();
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return true;
   }
 
@@ -141,7 +141,7 @@
   // Allocate a new TLAB, returns false if the allocation failed.
   bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
 
-  BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+  BumpPointerSpace* AsBumpPointerSpace() override {
     return this;
   }
 
@@ -151,7 +151,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!block_lock_);
 
-  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
 
   // Record objects / bytes freed.
   void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,7 +159,7 @@
     bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 66537d5..c63ff71 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -50,39 +50,42 @@
                                size_t capacity, uint8_t* requested_begin, bool can_move_objects);
 
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                                          size_t* usable_size,
-                                          size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
+  mirror::Object* AllocWithGrowth(Thread* self,
+                                  size_t num_bytes,
+                                  size_t* bytes_allocated,
+                                  size_t* usable_size,
+                                  size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                                size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_) {
+  mirror::Object* Alloc(Thread* self,
+                        size_t num_bytes,
+                        size_t* bytes_allocated,
+                        size_t* usable_size,
+                        size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_) {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
     return num_bytes;
   }
 
   // DlMallocSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
 
@@ -103,23 +106,23 @@
     return mspace_;
   }
 
-  size_t Trim() OVERRIDE;
+  size_t Trim() override;
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
   // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+  void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
 
   // Returns the number of bytes that the space has currently obtained from the system. This is
   // greater or equal to the amount of live data in the space.
-  size_t GetFootprint() OVERRIDE;
+  size_t GetFootprint() override;
 
   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
-  size_t GetFootprintLimit() OVERRIDE;
+  size_t GetFootprintLimit() override;
 
   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
-  void SetFootprintLimit(size_t limit) OVERRIDE;
+  void SetFootprintLimit(size_t limit) override;
 
   MallocSpace* CreateInstance(MemMap&& mem_map,
                               const std::string& name,
@@ -128,22 +131,22 @@
                               uint8_t* end,
                               uint8_t* limit,
                               size_t growth_limit,
-                              bool can_move_objects) OVERRIDE;
+                              bool can_move_objects) override;
 
-  uint64_t GetBytesAllocated() OVERRIDE;
-  uint64_t GetObjectsAllocated() OVERRIDE;
+  uint64_t GetBytesAllocated() override;
+  uint64_t GetObjectsAllocated() override;
 
-  virtual void Clear() OVERRIDE;
+  void Clear() override;
 
-  bool IsDlMallocSpace() const OVERRIDE {
+  bool IsDlMallocSpace() const override {
     return true;
   }
 
-  DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+  DlMallocSpace* AsDlMallocSpace() override {
     return this;
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
@@ -165,7 +168,7 @@
       REQUIRES(lock_);
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
-                        size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+                        size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
     return CreateMspace(base, morecore_start, initial_size);
   }
   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 7178627..f308f63 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -63,7 +63,7 @@
 ImageSpace::ImageSpace(const std::string& image_filename,
                        const char* image_location,
                        MemMap&& mem_map,
-                       accounting::ContinuousSpaceBitmap* live_bitmap,
+                       std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
                        uint8_t* end)
     : MemMapSpace(image_filename,
                   std::move(mem_map),
@@ -71,10 +71,10 @@
                   end,
                   end,
                   kGcRetentionPolicyNeverCollect),
+      live_bitmap_(std::move(live_bitmap)),
       oat_file_non_owned_(nullptr),
       image_location_(image_location) {
-  DCHECK(live_bitmap != nullptr);
-  live_bitmap_.reset(live_bitmap);
+  DCHECK(live_bitmap_ != nullptr);
 }
 
 static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
@@ -480,52 +480,16 @@
 }
 
 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
-// friend class), but not declare functions in the header.
+// nested class), but not declare functions in the header.
 class ImageSpace::Loader {
  public:
-  static std::unique_ptr<ImageSpace> Load(const std::string& image_location,
-                                          const std::string& image_filename,
-                                          bool is_zygote,
-                                          bool is_global_cache,
-                                          bool validate_oat_file,
-                                          std::string* error_msg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Should this be a RDWR lock? This is only a defensive measure, as at
-    // this point the image should exist.
-    // However, only the zygote can write into the global dalvik-cache, so
-    // restrict to zygote processes, or any process that isn't using
-    // /data/dalvik-cache (which we assume to be allowed to write there).
-    const bool rw_lock = is_zygote || !is_global_cache;
-
-    // Note that we must not use the file descriptor associated with
-    // ScopedFlock::GetFile to Init the image file. We want the file
-    // descriptor (and the associated exclusive lock) to be released when
-    // we leave Create.
-    ScopedFlock image = LockedFile::Open(image_filename.c_str(),
-                                         rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
-                                         true /* block */,
-                                         error_msg);
-
-    VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
-                  << image_location;
-    // If we are in /system we can assume the image is good. We can also
-    // assume this if we are using a relocated image (i.e. image checksum
-    // matches) since this is only different by the offset. We need this to
-    // make sure that host tests continue to work.
-    // Since we are the boot image, pass null since we load the oat file from the boot image oat
-    // file name.
-    return Init(image_filename.c_str(),
-                image_location.c_str(),
-                validate_oat_file,
-                /* oat_file */nullptr,
-                error_msg);
-  }
-
   static std::unique_ptr<ImageSpace> Init(const char* image_filename,
                                           const char* image_location,
                                           bool validate_oat_file,
                                           const OatFile* oat_file,
-                                          std::string* error_msg)
+                                          /*inout*/MemMap* image_reservation,
+                                          /*inout*/MemMap* oat_reservation,
+                                          /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(image_filename != nullptr);
     CHECK(image_location != nullptr);
@@ -616,22 +580,27 @@
     // image at the image begin, the amount of fixup work required is minimized.
     // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
     // avoid reading proc maps for a mapping failure and slowing everything down.
-    map = LoadImageFile(image_filename,
-                        image_location,
-                        *image_header,
-                        image_header->GetImageBegin(),
-                        file->Fd(),
-                        logger,
-                        image_header->IsPic() ? nullptr : error_msg);
+    // For the boot image, we have already reserved the memory and we load the image
+    // into the `image_reservation`.
+    map = LoadImageFile(
+        image_filename,
+        image_location,
+        *image_header,
+        image_header->GetImageBegin(),
+        file->Fd(),
+        logger,
+        image_reservation,
+        (image_reservation == nullptr && image_header->IsPic()) ? nullptr : error_msg);
     // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
     // relocate in-place.
-    if (!map.IsValid() && image_header->IsPic()) {
+    if (!map.IsValid() && image_reservation == nullptr && image_header->IsPic()) {
       map = LoadImageFile(image_filename,
                           image_location,
                           *image_header,
                           /* address */ nullptr,
                           file->Fd(),
                           logger,
+                          /* image_reservation */ nullptr,
                           error_msg);
     }
     // Were we able to load something and continue?
@@ -641,15 +610,13 @@
     }
     DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
 
-    MemMap image_bitmap_map = MemMap::MapFileAtAddress(nullptr,
-                                                       bitmap_section.Size(),
-                                                       PROT_READ, MAP_PRIVATE,
-                                                       file->Fd(),
-                                                       image_bitmap_offset,
-                                                       /*low_4gb*/false,
-                                                       /*reuse*/false,
-                                                       image_filename,
-                                                       error_msg);
+    MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
+                                              PROT_READ, MAP_PRIVATE,
+                                              file->Fd(),
+                                              image_bitmap_offset,
+                                              /* low_4gb */ false,
+                                              image_filename,
+                                              error_msg);
     if (!image_bitmap_map.IsValid()) {
       *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
       return nullptr;
@@ -694,7 +661,7 @@
     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                      image_location,
                                                      std::move(map),
-                                                     bitmap.release(),
+                                                     std::move(bitmap),
                                                      image_end));
 
     // VerifyImageAllocations() will be called later in Runtime::Init()
@@ -704,7 +671,7 @@
     // set yet at this point.
     if (oat_file == nullptr) {
       TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
-      space->oat_file_ = OpenOatFile(*space, image_filename, error_msg);
+      space->oat_file_ = OpenOatFile(*space, image_filename, oat_reservation, error_msg);
       if (space->oat_file_ == nullptr) {
         DCHECK(!error_msg->empty());
         return nullptr;
@@ -787,7 +754,8 @@
                               uint8_t* address,
                               int fd,
                               TimingLogger& logger,
-                              std::string* error_msg) {
+                              /*inout*/MemMap* image_reservation,
+                              /*out*/std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", &logger);
     const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
     if (storage_mode == ImageHeader::kStorageModeUncompressed) {
@@ -796,10 +764,11 @@
                                       PROT_READ | PROT_WRITE,
                                       MAP_PRIVATE,
                                       fd,
-                                      0,
-                                      /*low_4gb*/true,
-                                      /*reuse*/false,
+                                      /* start */ 0,
+                                      /* low_4gb */ true,
                                       image_filename,
+                                      /* reuse */ false,
+                                      image_reservation,
                                       error_msg);
     }
 
@@ -817,7 +786,9 @@
                                       address,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
-                                      /*low_4gb*/ true,
+                                      /* low_4gb */ true,
+                                      /* reuse */ false,
+                                      image_reservation,
                                       error_msg);
     if (map.IsValid()) {
       const size_t stored_size = image_header.GetDataSize();
@@ -826,8 +797,8 @@
                                         PROT_READ,
                                         MAP_PRIVATE,
                                         fd,
-                                        /*offset*/0,
-                                        /*low_4gb*/false,
+                                        /* offset */ 0,
+                                        /* low_4gb */ false,
                                         image_filename,
                                         error_msg);
       if (!temp_map.IsValid()) {
@@ -1396,6 +1367,7 @@
 
   static std::unique_ptr<OatFile> OpenOatFile(const ImageSpace& image,
                                               const char* image_path,
+                                              /*inout*/MemMap* oat_reservation,
                                               std::string* error_msg) {
     const ImageHeader& image_header = image.GetImageHeader();
     std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);
@@ -1406,10 +1378,10 @@
                                                     oat_filename,
                                                     oat_filename,
                                                     image_header.GetOatDataBegin(),
-                                                    image_header.GetOatFileBegin(),
                                                     !Runtime::Current()->IsAotCompiler(),
-                                                    /*low_4gb*/false,
-                                                    nullptr,
+                                                    /* low_4gb */ false,
+                                                    /* abs_dex_location */ nullptr,
+                                                    oat_reservation,
                                                     error_msg));
     if (oat_file == nullptr) {
       *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
@@ -1489,29 +1461,61 @@
     return cache_filename_;
   }
 
-  bool LoadFromSystem(/*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
-                      /*out*/ uint8_t** oat_file_end,
-                      /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool LoadFromSystem(size_t extra_reservation_size,
+                      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+                      /*out*/MemMap* extra_reservation,
+                      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
     std::vector<std::string> locations;
     if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
       return false;
     }
+    uint32_t image_start;
+    uint32_t image_end;
+    uint32_t oat_end;
+    if (!GetBootImageAddressRange(filename, &image_start, &image_end, &oat_end, error_msg)) {
+      return false;
+    }
+    if (locations.size() > 1u) {
+      std::string last_filename = GetSystemImageFilename(locations.back().c_str(), image_isa_);
+      uint32_t dummy;
+      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+        return false;
+      }
+    }
+    MemMap image_reservation;
+    MemMap oat_reservation;
+    MemMap local_extra_reservation;
+    if (!ReserveBootImageMemory(image_start,
+                                image_end,
+                                oat_end,
+                                extra_reservation_size,
+                                &image_reservation,
+                                &oat_reservation,
+                                &local_extra_reservation,
+                                error_msg)) {
+      return false;
+    }
+
     std::vector<std::unique_ptr<ImageSpace>> spaces;
     spaces.reserve(locations.size());
     for (const std::string& location : locations) {
       filename = GetSystemImageFilename(location.c_str(), image_isa_);
-      spaces.push_back(Loader::Load(location,
-                                    filename,
-                                    is_zygote_,
-                                    is_global_cache_,
-                                    /* validate_oat_file */ false,
-                                    error_msg));
+      spaces.push_back(Load(location,
+                            filename,
+                            /* validate_oat_file */ false,
+                            &image_reservation,
+                            &oat_reservation,
+                            error_msg));
       if (spaces.back() == nullptr) {
         return false;
       }
     }
-    *oat_file_end = GetOatFileEnd(spaces);
+    if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+      return false;
+    }
+
+    *extra_reservation = std::move(local_extra_reservation);
     boot_image_spaces->swap(spaces);
     return true;
   }
@@ -1519,14 +1523,48 @@
   bool LoadFromDalvikCache(
       bool validate_system_checksums,
       bool validate_oat_file,
-      /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
-      /*out*/ uint8_t** oat_file_end,
-      /*out*/ std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+      size_t extra_reservation_size,
+      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/MemMap* extra_reservation,
+      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(DalvikCacheExists());
     std::vector<std::string> locations;
     if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
       return false;
     }
+    uint32_t image_start;
+    uint32_t image_end;
+    uint32_t oat_end;
+    if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, &oat_end, error_msg)) {
+      return false;
+    }
+    if (locations.size() > 1u) {
+      std::string last_filename;
+      if (!GetDalvikCacheFilename(locations.back().c_str(),
+                                  dalvik_cache_.c_str(),
+                                  &last_filename,
+                                  error_msg)) {
+        return false;
+      }
+      uint32_t dummy;
+      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+        return false;
+      }
+    }
+    MemMap image_reservation;
+    MemMap oat_reservation;
+    MemMap local_extra_reservation;
+    if (!ReserveBootImageMemory(image_start,
+                                image_end,
+                                oat_end,
+                                extra_reservation_size,
+                                &image_reservation,
+                                &oat_reservation,
+                                &local_extra_reservation,
+                                error_msg)) {
+      return false;
+    }
+
     std::vector<std::unique_ptr<ImageSpace>> spaces;
     spaces.reserve(locations.size());
     for (const std::string& location : locations) {
@@ -1534,12 +1572,12 @@
       if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
         return false;
       }
-      spaces.push_back(Loader::Load(location,
-                                    filename,
-                                    is_zygote_,
-                                    is_global_cache_,
-                                    validate_oat_file,
-                                    error_msg));
+      spaces.push_back(Load(location,
+                            filename,
+                            validate_oat_file,
+                            &image_reservation,
+                            &oat_reservation,
+                            error_msg));
       if (spaces.back() == nullptr) {
         return false;
       }
@@ -1560,12 +1598,56 @@
         }
       }
     }
-    *oat_file_end = GetOatFileEnd(spaces);
+    if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+      return false;
+    }
+
+    *extra_reservation = std::move(local_extra_reservation);
     boot_image_spaces->swap(spaces);
     return true;
   }
 
  private:
+  std::unique_ptr<ImageSpace> Load(const std::string& image_location,
+                                   const std::string& image_filename,
+                                   bool validate_oat_file,
+                                   /*inout*/MemMap* image_reservation,
+                                   /*inout*/MemMap* oat_reservation,
+                                   /*out*/std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Should this be a RDWR lock? This is only a defensive measure, as at
+    // this point the image should exist.
+    // However, only the zygote can write into the global dalvik-cache, so
+    // restrict to zygote processes, or any process that isn't using
+    // /data/dalvik-cache (which we assume to be allowed to write there).
+    const bool rw_lock = is_zygote_ || !is_global_cache_;
+
+    // Note that we must not use the file descriptor associated with
+    // ScopedFlock::GetFile to Init the image file. We want the file
+    // descriptor (and the associated exclusive lock) to be released when
+    // we leave Create.
+    ScopedFlock image = LockedFile::Open(image_filename.c_str(),
+                                         rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+                                         true /* block */,
+                                         error_msg);
+
+    VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
+                  << image_location;
+    // If we are in /system we can assume the image is good. We can also
+    // assume this if we are using a relocated image (i.e. image checksum
+    // matches) since this is only different by the offset. We need this to
+    // make sure that host tests continue to work.
+    // Since we are the boot image, pass null since we load the oat file from the boot image oat
+    // file name.
+    return Loader::Init(image_filename.c_str(),
+                        image_location.c_str(),
+                        validate_oat_file,
+                        /* oat_file */ nullptr,
+                        image_reservation,
+                        oat_reservation,
+                        error_msg);
+  }
+
   // Extract boot class path from oat file associated with `image_filename`
   // and list all associated image locations.
   static bool GetBootClassPathImageLocations(const std::string& image_location,
@@ -1577,10 +1659,10 @@
                                                     oat_filename,
                                                     oat_filename,
                                                     /* requested_base */ nullptr,
-                                                    /* oat_file_begin */ nullptr,
                                                     /* executable */ false,
                                                     /* low_4gb */ false,
                                                     /* abs_dex_location */ nullptr,
+                                                    /* reservation */ nullptr,
                                                     error_msg));
     if (oat_file == nullptr) {
       *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
@@ -1598,14 +1680,87 @@
     return true;
   }
 
-  uint8_t* GetOatFileEnd(const std::vector<std::unique_ptr<ImageSpace>>& spaces) {
-    DCHECK(std::is_sorted(
-        spaces.begin(),
-        spaces.end(),
-        [](const std::unique_ptr<ImageSpace>& lhs, const std::unique_ptr<ImageSpace>& rhs) {
-          return lhs->GetOatFileEnd() < rhs->GetOatFileEnd();
-        }));
-    return AlignUp(spaces.back()->GetOatFileEnd(), kPageSize);
+  bool GetBootImageAddressRange(const std::string& filename,
+                                /*out*/uint32_t* start,
+                                /*out*/uint32_t* end,
+                                /*out*/uint32_t* oat_end,
+                                /*out*/std::string* error_msg) {
+    ImageHeader system_hdr;
+    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
+      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
+      return false;
+    }
+    *start = reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin());
+    CHECK_ALIGNED(*start, kPageSize);
+    *end = RoundUp(*start + system_hdr.GetImageSize(), kPageSize);
+    *oat_end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
+    return true;
+  }
+
+  bool ReserveBootImageMemory(uint32_t image_start,
+                              uint32_t image_end,
+                              uint32_t oat_end,
+                              size_t extra_reservation_size,
+                              /*out*/MemMap* image_reservation,
+                              /*out*/MemMap* oat_reservation,
+                              /*out*/MemMap* extra_reservation,
+                              /*out*/std::string* error_msg) {
+    DCHECK(!image_reservation->IsValid());
+    size_t total_size =
+        dchecked_integral_cast<size_t>(oat_end - image_start) + extra_reservation_size;
+    *image_reservation =
+        MemMap::MapAnonymous("Boot image reservation",
+                             reinterpret_cast32<uint8_t*>(image_start),
+                             total_size,
+                             PROT_NONE,
+                             /* low_4gb */ true,
+                             /* reuse */ false,
+                             /* reservation */ nullptr,
+                             error_msg);
+    if (!image_reservation->IsValid()) {
+      return false;
+    }
+    DCHECK(!extra_reservation->IsValid());
+    if (extra_reservation_size != 0u) {
+      DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+      DCHECK_LT(extra_reservation_size, image_reservation->Size());
+      uint8_t* split = image_reservation->End() - extra_reservation_size;
+      *extra_reservation = image_reservation->RemapAtEnd(split,
+                                                         "Boot image extra reservation",
+                                                         PROT_NONE,
+                                                         error_msg);
+      if (!extra_reservation->IsValid()) {
+        return false;
+      }
+    }
+    DCHECK(!oat_reservation->IsValid());
+    *oat_reservation = image_reservation->RemapAtEnd(reinterpret_cast32<uint8_t*>(image_end),
+                                                     "Boot image oat reservation",
+                                                     PROT_NONE,
+                                                     error_msg);
+    if (!oat_reservation->IsValid()) {
+      return false;
+    }
+
+    return true;
+  }
+
+  bool CheckReservationsExhausted(const MemMap& image_reservation,
+                                  const MemMap& oat_reservation,
+                                  /*out*/std::string* error_msg) {
+    if (image_reservation.IsValid()) {
+      *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
+                                image_reservation.Begin(),
+                                image_reservation.End());
+      return false;
+    }
+    if (oat_reservation.IsValid()) {
+      *error_msg = StringPrintf("Excessive oat reservation after loading boot image: %p-%p",
+                                image_reservation.Begin(),
+                                image_reservation.End());
+      return false;
+    }
+    return true;
   }
 
   const std::string& image_location_;
@@ -1657,13 +1812,15 @@
 bool ImageSpace::LoadBootImage(
     const std::string& image_location,
     const InstructionSet image_isa,
-    /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
-    /*out*/ uint8_t** oat_file_end) {
+    size_t extra_reservation_size,
+    /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+    /*out*/MemMap* extra_reservation) {
   ScopedTrace trace(__FUNCTION__);
 
   DCHECK(boot_image_spaces != nullptr);
   DCHECK(boot_image_spaces->empty());
-  DCHECK(oat_file_end != nullptr);
+  DCHECK_ALIGNED(extra_reservation_size, kPageSize);
+  DCHECK(extra_reservation != nullptr);
   DCHECK_NE(image_isa, InstructionSet::kNone);
 
   if (image_location.empty()) {
@@ -1720,8 +1877,9 @@
     // If we have system image, validate system image checksums, otherwise validate the oat file.
     if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(),
                                    /* validate_oat_file */ !loader.HasSystem(),
+                                   extra_reservation_size,
                                    boot_image_spaces,
-                                   oat_file_end,
+                                   extra_reservation,
                                    &local_error_msg)) {
       return true;
     }
@@ -1735,7 +1893,10 @@
 
   if (loader.HasSystem() && !relocate) {
     std::string local_error_msg;
-    if (loader.LoadFromSystem(boot_image_spaces, oat_file_end, &local_error_msg)) {
+    if (loader.LoadFromSystem(extra_reservation_size,
+                              boot_image_spaces,
+                              extra_reservation,
+                              &local_error_msg)) {
       return true;
     }
     error_msgs.push_back(local_error_msg);
@@ -1752,8 +1913,9 @@
       if (patch_success) {
         if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
                                        /* validate_oat_file */ false,
+                                       extra_reservation_size,
                                        boot_image_spaces,
-                                       oat_file_end,
+                                       extra_reservation,
                                        &local_error_msg)) {
           return true;
         }
@@ -1777,8 +1939,9 @@
       if (compilation_success) {
         if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
                                        /* validate_oat_file */ false,
+                                       extra_reservation_size,
                                        boot_image_spaces,
-                                       oat_file_end,
+                                       extra_reservation,
                                        &local_error_msg)) {
           return true;
         }
@@ -1834,7 +1997,13 @@
 std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
                                                            const OatFile* oat_file,
                                                            std::string* error_msg) {
-  return Loader::Init(image, image, /*validate_oat_file*/false, oat_file, /*out*/error_msg);
+  return Loader::Init(image,
+                      image,
+                      /* validate_oat_file */ false,
+                      oat_file,
+                      /* image_reservation */ nullptr,
+                      /* oat_reservation */ nullptr,
+                      error_msg);
 }
 
 const OatFile* ImageSpace::GetOatFile() const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 20bce66..a2490ac 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -44,8 +44,9 @@
   static bool LoadBootImage(
       const std::string& image_location,
       const InstructionSet image_isa,
-      /*out*/ std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
-      /*out*/ uint8_t** oat_file_end) REQUIRES_SHARED(Locks::mutator_lock_);
+      size_t extra_reservation_size,
+      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/MemMap* extra_reservation) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to open an existing app image space.
   static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
@@ -86,11 +87,11 @@
     return image_location_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return live_bitmap_.get();
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
     // special cases to test against.
     return live_bitmap_.get();
@@ -102,7 +103,7 @@
   void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
@@ -183,7 +184,7 @@
   ImageSpace(const std::string& name,
              const char* image_location,
              MemMap&& mem_map,
-             accounting::ContinuousSpaceBitmap* live_bitmap,
+             std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
              uint8_t* end);
 
   // The OatFile associated with the image during early startup to
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index a1ffa06..299a413 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -41,16 +41,16 @@
   args.push_back("--dex-file=" + multidex1);
   args.push_back("--dex-file=" + dex2);
   args.push_back("--oat-file=" + oat_location);
-  ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
 
   std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
                                              oat_location.c_str(),
                                              oat_location.c_str(),
-                                             nullptr,
-                                             nullptr,
-                                             false,
-                                             /*low_4gb*/false,
-                                             nullptr,
+                                             /* requested_base */ nullptr,
+                                             /* executable */ false,
+                                             /* low_4gb */ false,
+                                             /* abs_dex_location */ nullptr,
+                                             /* reservation */ nullptr,
                                              &error_msg));
   ASSERT_TRUE(oat != nullptr) << error_msg;
 
@@ -113,7 +113,7 @@
 template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
 class ImageSpaceLoadingTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     if (kImage) {
       options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
                             nullptr);
@@ -152,7 +152,7 @@
 
 class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     const char* android_data = getenv("ANDROID_DATA");
     CHECK(android_data != nullptr);
     old_android_data_ = android_data;
@@ -172,7 +172,7 @@
     ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     int result = unlink(bad_dalvik_cache_.c_str());
     CHECK_EQ(result, 0) << strerror(errno);
     result = rmdir(bad_android_data_.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 76ea9fd..09d0251 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,12 +39,12 @@
 namespace gc {
 namespace space {
 
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
  public:
   explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
   }
 
-  ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+  ~MemoryToolLargeObjectMapSpace() override {
     // Historical note: We were deleting large objects to keep Valgrind happy if there were
     // any large objects such as Dex cache arrays which aren't freed since they are held live
     // by the class linker.
@@ -52,7 +52,7 @@
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE {
+      override {
     mirror::Object* obj =
         LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
                                    usable_size, bytes_tl_bulk_allocated);
@@ -68,21 +68,21 @@
     return object_without_rdz;
   }
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
   }
 
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
     return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
   }
 
-  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+  size_t Free(Thread* self, mirror::Object* obj) override {
     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
     MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     return LargeObjectMapSpace::Free(self, object_with_rdz);
   }
 
-  bool Contains(const mirror::Object* obj) const OVERRIDE {
+  bool Contains(const mirror::Object* obj) const override {
     return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
   }
 
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b69bd91..26c6463 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeLargeObjectSpace;
   }
   void SwapBitmaps();
@@ -49,10 +49,10 @@
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
   virtual ~LargeObjectSpace() {}
 
-  uint64_t GetBytesAllocated() OVERRIDE {
+  uint64_t GetBytesAllocated() override {
     return num_bytes_allocated_;
   }
-  uint64_t GetObjectsAllocated() OVERRIDE {
+  uint64_t GetObjectsAllocated() override {
     return num_objects_allocated_;
   }
   uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@
   uint64_t GetTotalObjectsAllocated() const {
     return total_objects_allocated_;
   }
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
   // LargeObjectSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
-  bool IsAllocSpace() const OVERRIDE {
+  bool IsAllocSpace() const override {
     return true;
   }
-  AllocSpace* AsAllocSpace() OVERRIDE {
+  AllocSpace* AsAllocSpace() override {
     return this;
   }
   collector::ObjectBytePair Sweep(bool swap_bitmaps);
-  virtual bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
   // Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@
     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return Begin() <= byte_obj && byte_obj < End();
   }
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if the large object is a zygote large object. Potentially slow.
@@ -140,11 +140,11 @@
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       REQUIRES(!lock_);
   size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
-  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
-  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
 
  protected:
   struct LargeObject {
@@ -154,8 +154,8 @@
   explicit LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
 
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,22 +164,22 @@
 };
 
 // A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
  public:
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
-  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
-  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+      override REQUIRES(!lock_);
+  size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
   void Dump(std::ostream& os) const REQUIRES(!lock_);
 
-  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
 
  protected:
   FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
@@ -198,8 +198,8 @@
   }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
 
   class SortByPrevFree {
    public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index e4a6f15..6bf2d71 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,7 +133,7 @@
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return can_move_objects_;
   }
 
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 32bd204..33bddfa 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,28 +29,28 @@
           size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
  public:
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE;
+      override;
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_);
+      override REQUIRES(Locks::mutator_lock_);
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
 
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+  void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
 
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
 
   template <typename... Params>
   MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8ad26ba..0bf4f38 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,7 +39,7 @@
 static constexpr bool kCyclicRegionAllocation = true;
 
 // A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
@@ -49,7 +49,7 @@
     kEvacModeForceAll,
   };
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeRegionSpace;
   }
 
@@ -65,14 +65,14 @@
                         /* out */ size_t* bytes_allocated,
                         /* out */ size_t* usable_size,
                         /* out */ size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!region_lock_);
+      override REQUIRES(!region_lock_);
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self,
                                     size_t num_bytes,
                                     /* out */ size_t* bytes_allocated,
                                     /* out */ size_t* usable_size,
                                     /* out */ size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+      override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
   // The main allocation routine.
   template<bool kForEvac>
   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -90,29 +90,29 @@
   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
 
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
-  size_t Free(Thread*, mirror::Object*) OVERRIDE {
+  size_t Free(Thread*, mirror::Object*) override {
     UNIMPLEMENTED(FATAL);
     return 0;
   }
-  size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+  size_t FreeList(Thread*, size_t, mirror::Object**) override {
     UNIMPLEMENTED(FATAL);
     return 0;
   }
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return mark_bitmap_.get();
   }
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return mark_bitmap_.get();
   }
 
-  void Clear() OVERRIDE REQUIRES(!region_lock_);
+  void Clear() override REQUIRES(!region_lock_);
 
   // Remove read and write memory protection from the whole region space,
   // i.e. make memory pages backing the region area not readable and not
@@ -188,7 +188,7 @@
     return num_regions_;
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return true;
   }
 
@@ -197,7 +197,7 @@
     return byte_obj >= Begin() && byte_obj < Limit();
   }
 
-  RegionSpace* AsRegionSpace() OVERRIDE {
+  RegionSpace* AsRegionSpace() override {
     return this;
   }
 
@@ -212,10 +212,10 @@
     WalkInternal<true /* kToSpaceOnly */>(visitor);
   }
 
-  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
     return nullptr;
   }
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   // Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c630826..5162a06 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,24 +52,24 @@
 
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
+      override REQUIRES(!lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_) {
+      override REQUIRES(Locks::mutator_lock_) {
     return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
                                        bytes_tl_bulk_allocated);
   }
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return AllocationSizeNonvirtual<true>(obj, usable_size);
   }
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES_SHARED(Locks::mutator_lock_);
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -93,7 +93,7 @@
   // run without allocating a new run.
   ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
                                                  size_t* bytes_allocated);
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
     return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
   }
   ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -107,13 +107,13 @@
     return rosalloc_;
   }
 
-  size_t Trim() OVERRIDE;
-  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
-  size_t GetFootprint() OVERRIDE;
-  size_t GetFootprintLimit() OVERRIDE;
-  void SetFootprintLimit(size_t limit) OVERRIDE;
+  size_t Trim() override;
+  void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+  size_t GetFootprint() override;
+  size_t GetFootprintLimit() override;
+  void SetFootprintLimit(size_t limit) override;
 
-  void Clear() OVERRIDE;
+  void Clear() override;
 
   MallocSpace* CreateInstance(MemMap&& mem_map,
                               const std::string& name,
@@ -122,10 +122,10 @@
                               uint8_t* end,
                               uint8_t* limit,
                               size_t growth_limit,
-                              bool can_move_objects) OVERRIDE;
+                              bool can_move_objects) override;
 
-  uint64_t GetBytesAllocated() OVERRIDE;
-  uint64_t GetObjectsAllocated() OVERRIDE;
+  uint64_t GetBytesAllocated() override;
+  uint64_t GetObjectsAllocated() override;
 
   size_t RevokeThreadLocalBuffers(Thread* thread);
   size_t RevokeAllThreadLocalBuffers();
@@ -135,11 +135,11 @@
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
-  bool IsRosAllocSpace() const OVERRIDE {
+  bool IsRosAllocSpace() const override {
     return true;
   }
 
-  RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+  RosAllocSpace* AsRosAllocSpace() override {
     return this;
   }
 
@@ -149,7 +149,7 @@
 
   virtual ~RosAllocSpace();
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
     rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
   }
 
@@ -174,7 +174,7 @@
                               size_t* usable_size, size_t* bytes_tl_bulk_allocated);
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
-                        size_t maximum_size, bool low_memory_mode) OVERRIDE {
+                        size_t maximum_size, bool low_memory_mode) override {
     return CreateRosAlloc(
         base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
   }
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4e173a8..545e3d8 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@
     return mark_bitmap_.get();
   }
 
-  virtual bool IsDiscontinuousSpace() const OVERRIDE {
+  bool IsDiscontinuousSpace() const override {
     return true;
   }
 
@@ -409,14 +409,14 @@
 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
  public:
-  bool IsAllocSpace() const OVERRIDE {
+  bool IsAllocSpace() const override {
     return true;
   }
-  AllocSpace* AsAllocSpace() OVERRIDE {
+  AllocSpace* AsAllocSpace() override {
     return this;
   }
 
-  bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+  bool IsContinuousMemMapAllocSpace() const override {
     return true;
   }
   ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@
   // Clear the space back to an empty space.
   virtual void Clear() = 0;
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return live_bitmap_.get();
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return mark_bitmap_.get();
   }
 
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 200c79f..1f73577 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,7 +27,7 @@
 namespace space {
 
 // A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
  public:
   // Returns the remaining storage in the out_map field.
   static ZygoteSpace* Create(const std::string& name,
@@ -38,28 +38,28 @@
 
   void Dump(std::ostream& os) const;
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeZygoteSpace;
   }
 
-  ZygoteSpace* AsZygoteSpace() OVERRIDE {
+  ZygoteSpace* AsZygoteSpace() override {
     return this;
   }
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
 
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+  size_t Free(Thread* self, mirror::Object* ptr) override;
 
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
 
   // ZygoteSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
 
@@ -71,13 +71,13 @@
     return objects_allocated_.load(std::memory_order_seq_cst);
   }
 
-  void Clear() OVERRIDE;
+  void Clear() override;
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4..ef85b39 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@
   }
   virtual ~SystemWeakHolder() {}
 
-  void Allow() OVERRIDE
+  void Allow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@
     new_weak_condition_.Broadcast(Thread::Current());
   }
 
-  void Disallow() OVERRIDE
+  void Disallow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@
     allow_new_system_weak_ = false;
   }
 
-  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
       REQUIRES(!allow_disallow_lock_) {
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
     new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 897ab01..07725b9 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -44,7 +44,7 @@
         disallow_count_(0),
         sweep_count_(0) {}
 
-  void Allow() OVERRIDE
+  void Allow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Allow();
@@ -52,7 +52,7 @@
     allow_count_++;
   }
 
-  void Disallow() OVERRIDE
+  void Disallow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Disallow();
@@ -60,7 +60,7 @@
     disallow_count_++;
   }
 
-  void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+  void Broadcast(bool broadcast_for_checkpoint) override
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
 
@@ -70,7 +70,7 @@
     }
   }
 
-  void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+  void Sweep(IsMarkedVisitor* visitor) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce..7cb678b 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@
      : HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
        max_recursion_(max_recursion) {
   }
-  virtual void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     if (max_recursion_ > 0) {
       task_processor_->AddTask(self,
                                new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@
   WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
       : task_processor_(task_processor), done_running_(done_running) {
   }
-  virtual void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     task_processor_->RunAllTasks(self);
     done_running_->store(true, std::memory_order_seq_cst);
   }
@@ -105,7 +105,7 @@
   TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
      : HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
   }
-  virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+  void Run(Thread* thread ATTRIBUTE_UNUSED) override {
     ASSERT_EQ(*counter_, expected_counter_);
     ++*counter_;
   }
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index d6a2fa0..5d234ea 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -198,7 +198,7 @@
   CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
 
   void VisitRoot(mirror::Object* obj, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (obj != nullptr && visited_->insert(obj).second) {
       std::ostringstream oss;
       oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 986e28e..0bd43f9 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -133,7 +133,7 @@
 // critical.
 class SingleRootVisitor : public RootVisitor {
  private:
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(*roots[i], info);
@@ -141,7 +141,7 @@
   }
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info) OVERRIDE
+                          const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(roots[i]->AsMirrorPtr(), info);
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index a9230e0..464c2b7 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -80,9 +80,9 @@
 DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
 #define STRING_DEX_CACHE_ELEMENT_SIZE 8
 DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
-#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 511
+#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
 DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
-#define METHOD_DEX_CACHE_HASH_BITS 9
+#define METHOD_DEX_CACHE_HASH_BITS 10
 DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
 #define CARD_TABLE_CARD_SHIFT 0xa
 DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 28a2302..9eaf1ec 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -259,7 +259,7 @@
 
 // Scoped handle storage of a fixed size that is stack allocated.
 template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
  public:
   explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
   ALWAYS_INLINE ~StackHandleScope();
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index a41d284..4c7efe6 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -27,7 +27,7 @@
 
 class HiddenApiTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // Do the normal setup.
     CommonRuntimeTest::SetUp();
     self_ = Thread::Current();
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3f44928..e8a47d1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -303,7 +303,7 @@
   }
   virtual ~EndianOutputBuffered() {}
 
-  void UpdateU4(size_t offset, uint32_t new_value) OVERRIDE {
+  void UpdateU4(size_t offset, uint32_t new_value) override {
     DCHECK_LE(offset, length_ - 4);
     buffer_[offset + 0] = static_cast<uint8_t>((new_value >> 24) & 0xFF);
     buffer_[offset + 1] = static_cast<uint8_t>((new_value >> 16) & 0xFF);
@@ -312,12 +312,12 @@
   }
 
  protected:
-  void HandleU1List(const uint8_t* values, size_t count) OVERRIDE {
+  void HandleU1List(const uint8_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     buffer_.insert(buffer_.end(), values, values + count);
   }
 
-  void HandleU1AsU2List(const uint8_t* values, size_t count) OVERRIDE {
+  void HandleU1AsU2List(const uint8_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     // All 8-bits are grouped in 2 to make 16-bit block like Java Char
     if (count & 1) {
@@ -330,7 +330,7 @@
     }
   }
 
-  void HandleU2List(const uint16_t* values, size_t count) OVERRIDE {
+  void HandleU2List(const uint16_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint16_t value = *values;
@@ -340,7 +340,7 @@
     }
   }
 
-  void HandleU4List(const uint32_t* values, size_t count) OVERRIDE {
+  void HandleU4List(const uint32_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint32_t value = *values;
@@ -352,7 +352,7 @@
     }
   }
 
-  void HandleU8List(const uint64_t* values, size_t count) OVERRIDE {
+  void HandleU8List(const uint64_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint64_t value = *values;
@@ -368,7 +368,7 @@
     }
   }
 
-  void HandleEndRecord() OVERRIDE {
+  void HandleEndRecord() override {
     DCHECK_EQ(buffer_.size(), length_);
     if (kIsDebugBuild && started_) {
       uint32_t stored_length =
@@ -388,7 +388,7 @@
   std::vector<uint8_t> buffer_;
 };
 
-class FileEndianOutput FINAL : public EndianOutputBuffered {
+class FileEndianOutput final : public EndianOutputBuffered {
  public:
   FileEndianOutput(File* fp, size_t reserved_size)
       : EndianOutputBuffered(reserved_size), fp_(fp), errors_(false) {
@@ -402,7 +402,7 @@
   }
 
  protected:
-  void HandleFlush(const uint8_t* buffer, size_t length) OVERRIDE {
+  void HandleFlush(const uint8_t* buffer, size_t length) override {
     if (!errors_) {
       errors_ = !fp_->WriteFully(buffer, length);
     }
@@ -413,14 +413,14 @@
   bool errors_;
 };
 
-class VectorEndianOuputput FINAL : public EndianOutputBuffered {
+class VectorEndianOuputput final : public EndianOutputBuffered {
  public:
   VectorEndianOuputput(std::vector<uint8_t>& data, size_t reserved_size)
       : EndianOutputBuffered(reserved_size), full_data_(data) {}
   ~VectorEndianOuputput() {}
 
  protected:
-  void HandleFlush(const uint8_t* buf, size_t length) OVERRIDE {
+  void HandleFlush(const uint8_t* buf, size_t length) override {
     size_t old_size = full_data_.size();
     full_data_.resize(old_size + length);
     memcpy(full_data_.data() + old_size, buf, length);
@@ -604,7 +604,7 @@
   }
 
   void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+      override REQUIRES_SHARED(Locks::mutator_lock_);
   void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
                       uint32_t thread_serial);
 
diff --git a/runtime/image.cc b/runtime/image.cc
index b7a872c..028c515 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' };  // Half DexCache F&M arrays.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' };  // Image relocations.
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4196e19..b42433c 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -85,7 +85,7 @@
   explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
       : instrumentation_(instrumentation) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
     instrumentation_->InstallStubsForClass(klass.Ptr());
     return true;  // we visit all classes.
   }
@@ -264,7 +264,7 @@
 // existing instrumentation frames.
 static void InstrumentationInstallStack(Thread* thread, void* arg)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  struct InstallStackVisitor FINAL : public StackVisitor {
+  struct InstallStackVisitor final : public StackVisitor {
     InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
         : StackVisitor(thread_in, context, kInstrumentationStackWalk),
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -273,7 +273,7 @@
           last_return_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
       if (m == nullptr) {
         if (kVerboseInstrumentation) {
@@ -429,7 +429,7 @@
     REQUIRES(Locks::mutator_lock_) {
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
 
-  struct RestoreStackVisitor FINAL : public StackVisitor {
+  struct RestoreStackVisitor final : public StackVisitor {
     RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
                         Instrumentation* instrumentation)
         : StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
@@ -439,7 +439,7 @@
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
           frames_removed_(0) {}
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (instrumentation_stack_->size() == 0) {
         return false;  // Stop.
       }
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 8ac26af..9146245 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,7 @@
 namespace art {
 namespace instrumentation {
 
-class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class TestInstrumentationListener final : public instrumentation::InstrumentationListener {
  public:
   TestInstrumentationListener()
     : received_method_enter_event(false),
@@ -59,7 +59,7 @@
                      Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                      ArtMethod* method ATTRIBUTE_UNUSED,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_enter_event = true;
   }
 
@@ -68,7 +68,7 @@
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_object_event = true;
   }
 
@@ -77,7 +77,7 @@
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const JValue& return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_event = true;
   }
 
@@ -85,7 +85,7 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_unwind_event = true;
   }
 
@@ -93,7 +93,7 @@
                   Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                   ArtMethod* method ATTRIBUTE_UNUSED,
                   uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_dex_pc_moved_event = true;
   }
 
@@ -102,7 +102,7 @@
                  ArtMethod* method ATTRIBUTE_UNUSED,
                  uint32_t dex_pc ATTRIBUTE_UNUSED,
                  ArtField* field ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_read_event = true;
   }
 
@@ -112,7 +112,7 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     ArtField* field ATTRIBUTE_UNUSED,
                     Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_object_event = true;
   }
 
@@ -122,19 +122,19 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     ArtField* field ATTRIBUTE_UNUSED,
                     const JValue& field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_event = true;
   }
 
   void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
                        Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_thrown_event = true;
   }
 
   void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
                         Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_handled_event = true;
   }
 
@@ -142,7 +142,7 @@
               ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_branch_event = true;
   }
 
@@ -151,12 +151,12 @@
                                 ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 ArtMethod* callee ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_invoke_virtual_or_interface_event = true;
   }
 
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_watched_frame_pop  = true;
   }
 
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b56c48d..8b4fe44 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -86,7 +86,7 @@
 
 class TestPredicate : public IsMarkedVisitor {
  public:
-  mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Object* IsMarked(mirror::Object* s) override REQUIRES_SHARED(Locks::mutator_lock_) {
     bool erased = false;
     for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
       if (*it == s) {
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 69dae31..17b3cd4 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -116,10 +116,10 @@
 UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
 
 // java.lang.Long.rotateRight(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
 
 // java.lang.Long.rotateLeft(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
 
 // java.lang.Long.signum(J)I
 UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index f76b86c..0e4cf27 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -159,14 +159,14 @@
   }
 
   int64_t GetVRegLong(size_t i) const {
-    DCHECK_LT(i, NumberOfVRegs());
+    DCHECK_LT(i + 1, NumberOfVRegs());
     const uint32_t* vreg = &vregs_[i];
     typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
     return *reinterpret_cast<unaligned_int64*>(vreg);
   }
 
   double GetVRegDouble(size_t i) const {
-    DCHECK_LT(i, NumberOfVRegs());
+    DCHECK_LT(i + 1, NumberOfVRegs());
     const uint32_t* vreg = &vregs_[i];
     typedef const double unaligned_double __attribute__ ((aligned (4)));
     return *reinterpret_cast<unaligned_double*>(vreg);
@@ -220,7 +220,7 @@
   }
 
   void SetVRegLong(size_t i, int64_t val) {
-    DCHECK_LT(i, NumberOfVRegs());
+    DCHECK_LT(i + 1, NumberOfVRegs());
     uint32_t* vreg = &vregs_[i];
     typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
     *reinterpret_cast<unaligned_int64*>(vreg) = val;
@@ -233,7 +233,7 @@
   }
 
   void SetVRegDouble(size_t i, double val) {
-    DCHECK_LT(i, NumberOfVRegs());
+    DCHECK_LT(i + 1, NumberOfVRegs());
     uint32_t* vreg = &vregs_[i];
     typedef double unaligned_double __attribute__ ((aligned (4)));
     *reinterpret_cast<unaligned_double*>(vreg) = val;
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 25ac6e2..452a76b 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -27,12 +27,12 @@
 
 class StackVisitor;
 
-class JavaFrameRootInfo FINAL : public RootInfo {
+class JavaFrameRootInfo final : public RootInfo {
  public:
   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
   }
-  void Describe(std::ostream& os) const OVERRIDE
+  void Describe(std::ostream& os) const override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t GetVReg() const {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5..a6bc029 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -399,7 +399,7 @@
 
 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
   struct CollectClasses : public ClassVisitor {
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       classes_.push_back(klass.Ptr());
       return true;
     }
@@ -576,7 +576,7 @@
   memory_use_.AddValue(bytes);
 }
 
-class JitCompileTask FINAL : public Task {
+class JitCompileTask final : public Task {
  public:
   enum TaskKind {
     kAllocateProfile,
@@ -596,7 +596,7 @@
     soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
   }
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     if (kind_ == kCompile) {
       Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
@@ -611,7 +611,7 @@
     ProfileSaver::NotifyJitActivity();
   }
 
-  void Finalize() OVERRIDE {
+  void Finalize() override {
     delete this;
   }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d9c7900..2b2898c 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -168,11 +168,6 @@
   ScopedTrace trace(__PRETTY_FUNCTION__);
   CHECK_GE(max_capacity, initial_capacity);
 
-  // Generating debug information is for using the Linux perf tool on
-  // host which does not work with ashmem.
-  // Also, targets linux and fuchsia do not support ashmem.
-  bool use_ashmem = !generate_debug_info && !kIsTargetLinux && !kIsTargetFuchsia;
-
   // With 'perf', we want a 1-1 mapping between an address and a method.
   // We aren't able to keep method pointers live during the instrumentation method entry trampoline
   // so we will just disable jit-gc if we are doing that.
@@ -212,8 +207,8 @@
       kProtData,
       /* low_4gb */ true,
       /* reuse */ false,
-      &error_str,
-      use_ashmem);
+      /* reservation */ nullptr,
+      &error_str);
   if (!data_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
@@ -233,7 +228,7 @@
   uint8_t* divider = data_map.Begin() + data_size;
 
   MemMap code_map = data_map.RemapAtEnd(
-      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str, use_ashmem);
+      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
   if (!code_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -1088,14 +1083,14 @@
   }
 }
 
-class MarkCodeVisitor FINAL : public StackVisitor {
+class MarkCodeVisitor final : public StackVisitor {
  public:
   MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
         code_cache_(code_cache_in),
         bitmap_(code_cache_->GetLiveBitmap()) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
     if (method_header == nullptr) {
       return true;
@@ -1113,12 +1108,12 @@
   CodeCacheBitmap* const bitmap_;
 };
 
-class MarkCodeClosure FINAL : public Closure {
+class MarkCodeClosure final : public Closure {
  public:
   MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
       : code_cache_(code_cache), barrier_(barrier) {}
 
-  void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedTrace trace(__PRETTY_FUNCTION__);
     DCHECK(thread == Thread::Current() || thread->IsSuspended());
     MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 6ccda8b..d9ef922 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -255,7 +255,7 @@
         class_loaders_(class_loaders) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     class_loaders_->push_back(hs_->NewHandle(class_loader));
   }
 
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 8424610..f695c8f 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -40,7 +40,7 @@
 
 class ProfileCompilationInfoTest : public CommonRuntimeTest {
  public:
-  void PostRuntimeCreate() OVERRIDE {
+  void PostRuntimeCreate() override {
     allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 74e4a30..4049c6e 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@
   }
 
 
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     CommonRuntimeTest::TearDown();
   }
 
@@ -137,7 +137,7 @@
 
 class JavaVmExtStackTraceTest : public JavaVmExtTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
   }
 };
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a25049e..3040b90 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@
     }
   }
 
-  virtual void TearDown() OVERRIDE {
+  void TearDown() override {
     CleanUpJniEnv();
     CommonCompilerTest::TearDown();
   }
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c3e167c..811ee51 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -63,7 +63,7 @@
 using StringDexCacheType = std::atomic<StringDexCachePair>;
 
 // C++ mirror of java.lang.Class
-class MANAGED Class FINAL : public Object {
+class MANAGED Class final : public Object {
  public:
   // A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
   // this is the value.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 87f4f0a..8401b66 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -141,7 +141,7 @@
 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
 
 // C++ mirror of java.lang.DexCache.
-class MANAGED DexCache FINAL : public Object {
+class MANAGED DexCache final : public Object {
  public:
   // Size of java.lang.DexCache.class.
   static uint32_t ClassSize(PointerSize pointer_size);
@@ -157,12 +157,12 @@
                 "String dex cache size is not a power of 2.");
 
   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
-  static constexpr size_t kDexCacheFieldCacheSize = 512;
+  static constexpr size_t kDexCacheFieldCacheSize = 1024;
   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
                 "Field dex cache size is not a power of 2.");
 
   // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
-  static constexpr size_t kDexCacheMethodCacheSize = 512;
+  static constexpr size_t kDexCacheMethodCacheSize = 1024;
   static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
                 "Method dex cache size is not a power of 2.");
 
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 7a70cae..e9e7ca8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@
 
 class DexCacheMethodHandlesTest : public DexCacheTest {
  protected:
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonRuntimeTest::SetUpRuntimeOptions(options);
   }
 };
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d72c786..9e3c9af 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -23,7 +23,7 @@
 namespace art {
 namespace mirror {
 
-class MANAGED IfTable FINAL : public ObjectArray<Object> {
+class MANAGED IfTable final : public ObjectArray<Object> {
  public:
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index db511d6..7775de3 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -26,7 +26,7 @@
 namespace mirror {
 
 // C++ mirror of java.lang.reflect.Proxy.
-class MANAGED Proxy FINAL : public Object {
+class MANAGED Proxy final : public Object {
  private:
   HeapReference<Object> h_;
 
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 55a2ef0..37ac575 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -27,7 +27,7 @@
 namespace mirror {
 
 // C++ mirror of java.lang.StackTraceElement
-class MANAGED StackTraceElement FINAL : public Object {
+class MANAGED StackTraceElement final : public Object {
  public:
   String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0e2fc90..d08717c 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -40,7 +40,7 @@
 };
 
 // C++ mirror of java.lang.String
-class MANAGED String FINAL : public Object {
+class MANAGED String final : public Object {
  public:
   // Size of java.lang.String.class.
   static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 56c953b..864e1ea 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -353,7 +353,7 @@
 //
 
 template <typename T>
-class JValueByteSwapper FINAL {
+class JValueByteSwapper final {
  public:
   static void ByteSwap(JValue* value);
   static void MaybeByteSwap(bool byte_swap, JValue* value) {
@@ -392,7 +392,7 @@
  public:
   explicit AtomicGetAccessor(JValue* result) : result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     StoreResult(atom->load(MO), result_);
   }
@@ -406,7 +406,7 @@
  public:
   explicit AtomicSetAccessor(T new_value) : new_value_(new_value) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     atom->store(new_value_, MO);
   }
@@ -431,7 +431,7 @@
   AtomicStrongCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     bool success = atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
     StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -453,7 +453,7 @@
   AtomicStrongCompareAndExchangeAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
     StoreResult(expected_value_, result_);
@@ -475,7 +475,7 @@
   AtomicWeakCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     bool success = atom->compare_exchange_weak(expected_value_, desired_value_, MOS, MOF);
     StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -496,7 +496,7 @@
  public:
   AtomicGetAndSetAccessor(T new_value, JValue* result) : new_value_(new_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->exchange(new_value_, MO);
     StoreResult(old_value, result_);
@@ -540,7 +540,7 @@
  public:
   AtomicGetAndAddAccessor(T addend, JValue* result) : addend_(addend), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     constexpr bool kIsFloatingPoint = std::is_floating_point<T>::value;
     T old_value = AtomicGetAndAddOperator<T, kIsFloatingPoint, MO>::Apply(addr, addend_);
     StoreResult(old_value, result_);
@@ -562,7 +562,7 @@
  public:
   AtomicGetAndAddWithByteSwapAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* const atom = reinterpret_cast<std::atomic<T>*>(addr);
     T current_value = atom->load(std::memory_order_relaxed);
     T sum;
@@ -591,7 +591,7 @@
  public:
   AtomicGetAndBitwiseOrAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_or(value_, MO);
     StoreResult(old_value, result_);
@@ -610,7 +610,7 @@
  public:
   AtomicGetAndBitwiseAndAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_and(value_, MO);
     StoreResult(old_value, result_);
@@ -630,7 +630,7 @@
  public:
   AtomicGetAndBitwiseXorAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_xor(value_, MO);
     StoreResult(old_value, result_);
@@ -679,7 +679,7 @@
   explicit TypeAdaptorAccessor(Object::Accessor<U>* inner_accessor)
       : inner_accessor_(inner_accessor) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     static_assert(sizeof(T) == sizeof(U), "bad conversion");
     inner_accessor_->Access(reinterpret_cast<U*>(addr));
   }
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 7eabe1d..02aa1a8 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -184,7 +184,7 @@
     if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
       // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
       // enough that it's OK to walk the stack twice.
-      struct NextMethodVisitor FINAL : public StackVisitor {
+      struct NextMethodVisitor final : public StackVisitor {
         explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
             : StackVisitor(thread,
                            nullptr,
@@ -193,7 +193,7 @@
               count_(0),
               method_(nullptr),
               dex_pc_(0) {}
-        bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+        bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
           ArtMethod* m = GetMethod();
           if (m->IsRuntimeMethod()) {
             // Continue if this is a runtime method.
@@ -271,7 +271,7 @@
 
 // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
 
-struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+struct NthCallerWithDexPcVisitor final : public StackVisitor {
   explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -279,7 +279,7 @@
         dex_pc_(0),
         current_frame_number_(0),
         wanted_frame_number_(frame) {}
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m == nullptr || m->IsRuntimeMethod()) {
       // Runtime method, upcall, or resolution issue. Skip.
@@ -514,7 +514,7 @@
                 if (should_dump_stacks) {
                   // Very long contention. Dump stacks.
                   struct CollectStackTrace : public Closure {
-                    void Run(art::Thread* thread) OVERRIDE
+                    void Run(art::Thread* thread) override
                         REQUIRES_SHARED(art::Locks::mutator_lock_) {
                       thread->DumpJavaStack(oss);
                     }
@@ -1577,7 +1577,7 @@
  public:
   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
 
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Monitor::Deflate(self_, object)) {
       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index 5c962c3..c943402 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -54,7 +54,7 @@
     kEndStackWalk,
   };
 
-  bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() final REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       return true;
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index bff8d76..c88748f 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -34,7 +34,7 @@
 
 class MonitorTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use a smaller heap
     SetUpRuntimeOptionsForFillHeap(options);
 
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 7ac4086..6f98a6d 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -207,7 +207,7 @@
    public:
     explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       klass->DumpClass(LOG_STREAM(ERROR), flags_);
       return true;
     }
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9b3fd16..0e61940 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -332,7 +332,7 @@
   explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     ObjPtr<mirror::String> string = root->AsString();
     table_->operator[](string->ToModifiedUtf8()) = string;
   }
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 5b47eac..72dae47 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -120,9 +120,9 @@
         : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           class_set_(class_set) {}
 
-    ~NonDebuggableStacksVisitor() OVERRIDE {}
+    ~NonDebuggableStacksVisitor() override {}
 
-    bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES(Locks::mutator_lock_) {
       if (GetMethod()->IsRuntimeMethod()) {
         return true;
       }
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 9c777cc..496a6f3 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -21,22 +21,22 @@
 
 namespace art {
 
-class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
+class NoopCompilerCallbacks final : public CompilerCallbacks {
  public:
   NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
   ~NoopCompilerCallbacks() {}
 
-  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
+  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {
   }
 
-  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
+  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
 
   // This is only used by compilers which need to be able to run without relocation even when it
   // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
   // to disable the relocation since both deal with writing out the images directly.
-  bool IsRelocationPossible() OVERRIDE { return false; }
+  bool IsRelocationPossible() override { return false; }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; }
+  verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c7daef8..8f84a4b 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -102,12 +102,12 @@
                                   const std::string& elf_filename,
                                   const std::string& location,
                                   uint8_t* requested_base,
-                                  uint8_t* oat_file_begin,
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
                                   const char* abs_dex_location,
-                                  std::string* error_msg);
+                                  /*inout*/MemMap* reservation,  // Where to load if not null.
+                                  /*out*/std::string* error_msg);
 
   template <typename kOatFileBaseSubType>
   static OatFileBase* OpenOatFile(int zip_fd,
@@ -116,12 +116,12 @@
                                   const std::string& vdex_filename,
                                   const std::string& oat_filename,
                                   uint8_t* requested_base,
-                                  uint8_t* oat_file_begin,
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
                                   const char* abs_dex_location,
-                                  std::string* error_msg);
+                                  /*inout*/MemMap* reservation,  // Where to load if not null.
+                                  /*out*/std::string* error_msg);
 
  protected:
   OatFileBase(const std::string& filename, bool executable) : OatFile(filename, executable) {}
@@ -143,18 +143,18 @@
                 std::string* error_msg);
 
   virtual bool Load(const std::string& elf_filename,
-                    uint8_t* oat_file_begin,
                     bool writable,
                     bool executable,
                     bool low_4gb,
-                    std::string* error_msg) = 0;
+                    /*inout*/MemMap* reservation,  // Where to load if not null.
+                    /*out*/std::string* error_msg) = 0;
 
   virtual bool Load(int oat_fd,
-                    uint8_t* oat_file_begin,
                     bool writable,
                     bool executable,
                     bool low_4gb,
-                    std::string* error_msg) = 0;
+                    /*inout*/MemMap* reservation,  // Where to load if not null.
+                    /*out*/std::string* error_msg) = 0;
 
   bool ComputeFields(uint8_t* requested_base,
                      const std::string& file_path,
@@ -188,21 +188,21 @@
                                       const std::string& elf_filename,
                                       const std::string& location,
                                       uint8_t* requested_base,
-                                      uint8_t* oat_file_begin,
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
                                       const char* abs_dex_location,
-                                      std::string* error_msg) {
+                                      /*inout*/MemMap* reservation,
+                                      /*out*/std::string* error_msg) {
   std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(location, executable));
 
   ret->PreLoad();
 
   if (!ret->Load(elf_filename,
-                 oat_file_begin,
                  writable,
                  executable,
                  low_4gb,
+                 reservation,
                  error_msg)) {
     return nullptr;
   }
@@ -231,19 +231,19 @@
                                       const std::string& vdex_location,
                                       const std::string& oat_location,
                                       uint8_t* requested_base,
-                                      uint8_t* oat_file_begin,
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
                                       const char* abs_dex_location,
-                                      std::string* error_msg) {
+                                      /*inout*/MemMap* reservation,
+                                      /*out*/std::string* error_msg) {
   std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
 
   if (!ret->Load(oat_fd,
-                 oat_file_begin,
                  writable,
                  executable,
                  low_4gb,
+                 reservation,
                  error_msg)) {
     return nullptr;
   }
@@ -889,7 +889,7 @@
 // OatFile via dlopen //
 ////////////////////////
 
-class DlOpenOatFile FINAL : public OatFileBase {
+class DlOpenOatFile final : public OatFileBase {
  public:
   DlOpenOatFile(const std::string& filename, bool executable)
       : OatFileBase(filename, executable),
@@ -911,7 +911,7 @@
 
  protected:
   const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
-                                          std::string* error_msg) const OVERRIDE {
+                                          std::string* error_msg) const override {
     const uint8_t* ptr =
         reinterpret_cast<const uint8_t*>(dlsym(dlopen_handle_, symbol_name.c_str()));
     if (ptr == nullptr) {
@@ -920,26 +920,31 @@
     return ptr;
   }
 
-  void PreLoad() OVERRIDE;
+  void PreLoad() override;
 
   bool Load(const std::string& elf_filename,
-            uint8_t* oat_file_begin,
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            /*inout*/MemMap* reservation,  // Where to load if not null.
+            /*out*/std::string* error_msg) override;
 
-  bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
+  bool Load(int oat_fd ATTRIBUTE_UNUSED,
+            bool writable ATTRIBUTE_UNUSED,
+            bool executable ATTRIBUTE_UNUSED,
+            bool low_4gb ATTRIBUTE_UNUSED,
+            /*inout*/MemMap* reservation ATTRIBUTE_UNUSED,
+            /*out*/std::string* error_msg ATTRIBUTE_UNUSED) override {
     return false;
   }
 
   // Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
-  void PreSetup(const std::string& elf_filename) OVERRIDE;
+  void PreSetup(const std::string& elf_filename) override;
 
  private:
   bool Dlopen(const std::string& elf_filename,
-              uint8_t* oat_file_begin,
-              std::string* error_msg);
+              /*inout*/MemMap* reservation,  // Where to load if not null.
+              /*out*/std::string* error_msg);
 
   // On the host, if the same library is loaded again with dlopen the same
   // file handle is returned. This differs from the behavior of dlopen on the
@@ -952,12 +957,13 @@
   // Guarded by host_dlopen_handles_lock_;
   static std::unordered_set<void*> host_dlopen_handles_;
 
+  // Reservation and dummy memory map objects corresponding to the regions mapped by dlopen.
+  // Note: Must be destroyed after dlclose() as it can hold the owning reservation.
+  std::vector<MemMap> dlopen_mmaps_;
+
   // dlopen handle during runtime.
   void* dlopen_handle_;  // TODO: Unique_ptr with custom deleter.
 
-  // Dummy memory map objects corresponding to the regions mapped by dlopen.
-  std::vector<MemMap> dlopen_mmaps_;
-
   // The number of shared objects the linker told us about before loading. Used to
   // (optimistically) optimize the PreSetup stage (see comment there).
   size_t shared_objects_before_;
@@ -975,9 +981,9 @@
 #else
   // Count the entries in dl_iterate_phdr we get at this point in time.
   struct dl_iterate_context {
-    static int callback(struct dl_phdr_info *info ATTRIBUTE_UNUSED,
+    static int callback(dl_phdr_info* info ATTRIBUTE_UNUSED,
                         size_t size ATTRIBUTE_UNUSED,
-                        void *data) {
+                        void* data) {
       reinterpret_cast<dl_iterate_context*>(data)->count++;
       return 0;  // Continue iteration.
     }
@@ -990,11 +996,11 @@
 }
 
 bool DlOpenOatFile::Load(const std::string& elf_filename,
-                         uint8_t* oat_file_begin,
                          bool writable,
                          bool executable,
                          bool low_4gb,
-                         std::string* error_msg) {
+                         /*inout*/MemMap* reservation,  // Where to load if not null.
+                         /*out*/std::string* error_msg) {
   // Use dlopen only when flagged to do so, and when it's OK to load things executable.
   // TODO: Also try when not executable? The issue here could be re-mapping as writable (as
   //       !executable is a sign that we may want to patch), which may not be allowed for
@@ -1027,19 +1033,19 @@
     }
   }
 
-  bool success = Dlopen(elf_filename, oat_file_begin, error_msg);
+  bool success = Dlopen(elf_filename, reservation, error_msg);
   DCHECK(dlopen_handle_ != nullptr || !success);
 
   return success;
 }
 
 bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
-                           uint8_t* oat_file_begin,
-                           std::string* error_msg) {
+                           /*inout*/MemMap* reservation,
+                           /*out*/std::string* error_msg) {
 #ifdef __APPLE__
   // The dl_iterate_phdr syscall is missing.  There is similar API on OSX,
   // but let's fallback to the custom loading code for the time being.
-  UNUSED(elf_filename, oat_file_begin);
+  UNUSED(elf_filename, reservation);
   *error_msg = "Dlopen unsupported on Mac.";
   return false;
 #else
@@ -1056,15 +1062,85 @@
                                                                 //    times).
                     ANDROID_DLEXT_FORCE_FIXED_VADDR;            // Take a non-zero vaddr as absolute
                                                                 //   (non-pic boot image).
-    if (oat_file_begin != nullptr) {                            //
-      extinfo.flags |= ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS;     // Use the requested addr if
-      extinfo.reserved_addr = oat_file_begin;                   // vaddr = 0.
-    }                                                           //   (pic boot image).
+    if (reservation != nullptr) {
+      if (!reservation->IsValid()) {
+        *error_msg = StringPrintf("Invalid reservation for %s", elf_filename.c_str());
+        return false;
+      }
+      extinfo.flags |= ANDROID_DLEXT_RESERVED_ADDRESS;          // Use the reserved memory range.
+      extinfo.reserved_addr = reservation->Begin();
+      extinfo.reserved_size = reservation->Size();
+    }
     dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
+    if (reservation != nullptr && dlopen_handle_ != nullptr) {
+      // Find used pages from the reservation.
+      struct dl_iterate_context {
+        static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
+          auto* context = reinterpret_cast<dl_iterate_context*>(data);
+          static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
+          using Elf_Half = Elf64_Half;
+
+          // See whether this callback corresponds to the file which we have just loaded.
+          uint8_t* reservation_begin = context->reservation->Begin();
+          bool contained_in_reservation = false;
+          for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
+            if (info->dlpi_phdr[i].p_type == PT_LOAD) {
+              uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
+                  info->dlpi_phdr[i].p_vaddr);
+              size_t memsz = info->dlpi_phdr[i].p_memsz;
+              size_t offset = static_cast<size_t>(vaddr - reservation_begin);
+              if (offset < context->reservation->Size()) {
+                contained_in_reservation = true;
+                DCHECK_LE(memsz, context->reservation->Size() - offset);
+              } else if (vaddr < reservation_begin) {
+                // Check that there's no overlap with the reservation.
+                DCHECK_LE(memsz, static_cast<size_t>(reservation_begin - vaddr));
+              }
+              break;  // It is sufficient to check the first PT_LOAD header.
+            }
+          }
+
+          if (contained_in_reservation) {
+            for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
+              if (info->dlpi_phdr[i].p_type == PT_LOAD) {
+                uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
+                    info->dlpi_phdr[i].p_vaddr);
+                size_t memsz = info->dlpi_phdr[i].p_memsz;
+                size_t offset = static_cast<size_t>(vaddr - reservation_begin);
+                DCHECK_LT(offset, context->reservation->Size());
+                DCHECK_LE(memsz, context->reservation->Size() - offset);
+                context->max_size = std::max(context->max_size, offset + memsz);
+              }
+            }
+
+            return 1;  // Stop iteration and return 1 from dl_iterate_phdr.
+          }
+          return 0;  // Continue iteration and return 0 from dl_iterate_phdr when finished.
+        }
+
+        const MemMap* const reservation;
+        size_t max_size = 0u;
+      };
+      dl_iterate_context context = { reservation };
+
+      if (dl_iterate_phdr(dl_iterate_context::callback, &context) == 0) {
+        LOG(FATAL) << "Could not find the shared object mmapped to the reservation.";
+        UNREACHABLE();
+      }
+
+      // Take ownership of the memory used by the shared object. dlopen() does not assume
+      // full ownership of this memory and dlclose() shall just remap it as zero pages with
+      // PROT_NONE. We need to unmap the memory when destroying this oat file.
+      dlopen_mmaps_.push_back(reservation->TakeReservedMemory(context.max_size));
+    }
 #else
-    UNUSED(oat_file_begin);
     static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,
                   "host_dlopen_handles_ will leak handles");
+    if (reservation != nullptr) {
+      *error_msg = StringPrintf("dlopen() into reserved memory is unsupported on host for '%s'.",
+                                elf_filename.c_str());
+      return false;
+    }
     MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
     dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
     if (dlopen_handle_ != nullptr) {
@@ -1092,8 +1168,11 @@
   UNREACHABLE();
 #else
   struct dl_iterate_context {
-    static int callback(struct dl_phdr_info *info, size_t /* size */, void *data) {
+    static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
       auto* context = reinterpret_cast<dl_iterate_context*>(data);
+      static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
+      using Elf_Half = Elf64_Half;
+
       context->shared_objects_seen++;
       if (context->shared_objects_seen < context->shared_objects_before) {
         // We haven't been called yet for anything we haven't seen before. Just continue.
@@ -1104,7 +1183,7 @@
 
       // See whether this callback corresponds to the file which we have just loaded.
       bool contains_begin = false;
-      for (int i = 0; i < info->dlpi_phnum; i++) {
+      for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
         if (info->dlpi_phdr[i].p_type == PT_LOAD) {
           uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
               info->dlpi_phdr[i].p_vaddr);
@@ -1117,7 +1196,7 @@
       }
       // Add dummy mmaps for this file.
       if (contains_begin) {
-        for (int i = 0; i < info->dlpi_phnum; i++) {
+        for (Elf_Half i = 0; i < info->dlpi_phnum; i++) {
           if (info->dlpi_phdr[i].p_type == PT_LOAD) {
             uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
                 info->dlpi_phdr[i].p_vaddr);
@@ -1156,20 +1235,19 @@
 // OatFile via our own ElfFile implementation //
 ////////////////////////////////////////////////
 
-class ElfOatFile FINAL : public OatFileBase {
+class ElfOatFile final : public OatFileBase {
  public:
   ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
 
   static ElfOatFile* OpenElfFile(int zip_fd,
                                  File* file,
                                  const std::string& location,
-                                 uint8_t* requested_base,
-                                 uint8_t* oat_file_begin,  // Override base if not null
                                  bool writable,
                                  bool executable,
                                  bool low_4gb,
                                  const char* abs_dex_location,
-                                 std::string* error_msg);
+                                 /*inout*/MemMap* reservation,  // Where to load if not null.
+                                 /*out*/std::string* error_msg);
 
   bool InitializeFromElfFile(int zip_fd,
                              ElfFile* elf_file,
@@ -1179,7 +1257,7 @@
 
  protected:
   const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
-                                          std::string* error_msg) const OVERRIDE {
+                                          std::string* error_msg) const override {
     const uint8_t* ptr = elf_file_->FindDynamicSymbolAddress(symbol_name);
     if (ptr == nullptr) {
       *error_msg = "(Internal implementation could not find symbol)";
@@ -1187,33 +1265,33 @@
     return ptr;
   }
 
-  void PreLoad() OVERRIDE {
+  void PreLoad() override {
   }
 
   bool Load(const std::string& elf_filename,
-            uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            /*inout*/MemMap* reservation,  // Where to load if not null.
+            /*out*/std::string* error_msg) override;
 
   bool Load(int oat_fd,
-            uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            /*inout*/MemMap* reservation,  // Where to load if not null.
+            /*out*/std::string* error_msg) override;
 
-  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
+  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
   }
 
  private:
   bool ElfFileOpen(File* file,
-                   uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
                    bool writable,
                    bool executable,
                    bool low_4gb,
-                   std::string* error_msg);
+                   /*inout*/MemMap* reservation,  // Where to load if not null.
+                   /*out*/std::string* error_msg);
 
  private:
   // Backing memory map for oat file during cross compilation.
@@ -1225,20 +1303,19 @@
 ElfOatFile* ElfOatFile::OpenElfFile(int zip_fd,
                                     File* file,
                                     const std::string& location,
-                                    uint8_t* requested_base,
-                                    uint8_t* oat_file_begin,  // Override base if not null
                                     bool writable,
                                     bool executable,
                                     bool low_4gb,
                                     const char* abs_dex_location,
-                                    std::string* error_msg) {
+                                    /*inout*/MemMap* reservation,  // Where to load if not null.
+                                    /*out*/std::string* error_msg) {
   ScopedTrace trace("Open elf file " + location);
   std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, executable));
   bool success = oat_file->ElfFileOpen(file,
-                                       oat_file_begin,
                                        writable,
                                        low_4gb,
                                        executable,
+                                       reservation,
                                        error_msg);
   if (!success) {
     CHECK(!error_msg->empty());
@@ -1246,7 +1323,7 @@
   }
 
   // Complete the setup.
-  if (!oat_file->ComputeFields(requested_base, file->GetPath(), error_msg)) {
+  if (!oat_file->ComputeFields(/* requested_base */ nullptr, file->GetPath(), error_msg)) {
     return nullptr;
   }
 
@@ -1279,11 +1356,11 @@
 }
 
 bool ElfOatFile::Load(const std::string& elf_filename,
-                      uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
                       bool writable,
                       bool executable,
                       bool low_4gb,
-                      std::string* error_msg) {
+                      /*inout*/MemMap* reservation,
+                      /*out*/std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
   if (file == nullptr) {
@@ -1291,57 +1368,56 @@
     return false;
   }
   return ElfOatFile::ElfFileOpen(file.get(),
-                                 oat_file_begin,
                                  writable,
                                  executable,
                                  low_4gb,
+                                 reservation,
                                  error_msg);
 }
 
 bool ElfOatFile::Load(int oat_fd,
-                      uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
                       bool writable,
                       bool executable,
                       bool low_4gb,
-                      std::string* error_msg) {
+                      /*inout*/MemMap* reservation,
+                      /*out*/std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   if (oat_fd != -1) {
-    std::unique_ptr<File> file = std::make_unique<File>(oat_fd, false);
-    file->DisableAutoClose();
+    int duped_fd = DupCloexec(oat_fd);
+    std::unique_ptr<File> file = std::make_unique<File>(duped_fd, false);
     if (file == nullptr) {
       *error_msg = StringPrintf("Failed to open oat filename for reading: %s",
                                 strerror(errno));
       return false;
     }
     return ElfOatFile::ElfFileOpen(file.get(),
-                                   oat_file_begin,
                                    writable,
                                    executable,
                                    low_4gb,
+                                   reservation,
                                    error_msg);
   }
   return false;
 }
 
 bool ElfOatFile::ElfFileOpen(File* file,
-                             uint8_t* oat_file_begin,
                              bool writable,
                              bool executable,
                              bool low_4gb,
-                             std::string* error_msg) {
+                             /*inout*/MemMap* reservation,
+                             /*out*/std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   // TODO: rename requested_base to oat_data_begin
   elf_file_.reset(ElfFile::Open(file,
                                 writable,
                                 /*program_header_only*/true,
                                 low_4gb,
-                                error_msg,
-                                oat_file_begin));
+                                error_msg));
   if (elf_file_ == nullptr) {
     DCHECK(!error_msg->empty());
     return false;
   }
-  bool loaded = elf_file_->Load(file, executable, low_4gb, error_msg);
+  bool loaded = elf_file_->Load(file, executable, low_4gb, reservation, error_msg);
   DCHECK(loaded || !error_msg->empty());
   return loaded;
 }
@@ -1392,11 +1468,11 @@
                        const std::string& oat_filename,
                        const std::string& oat_location,
                        uint8_t* requested_base,
-                       uint8_t* oat_file_begin,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
-                       std::string* error_msg) {
+                       /*inout*/MemMap* reservation,
+                       /*out*/std::string* error_msg) {
   ScopedTrace trace("Open oat file " + oat_location);
   CHECK(!oat_filename.empty()) << oat_location;
   CheckLocation(oat_location);
@@ -1419,11 +1495,11 @@
                                                                  oat_filename,
                                                                  oat_location,
                                                                  requested_base,
-                                                                 oat_file_begin,
                                                                  false /* writable */,
                                                                  executable,
                                                                  low_4gb,
                                                                  abs_dex_location,
+                                                                 reservation,
                                                                  error_msg);
   if (with_dlopen != nullptr) {
     return with_dlopen;
@@ -1449,11 +1525,11 @@
                                                                 oat_filename,
                                                                 oat_location,
                                                                 requested_base,
-                                                                oat_file_begin,
                                                                 false /* writable */,
                                                                 executable,
                                                                 low_4gb,
                                                                 abs_dex_location,
+                                                                reservation,
                                                                 error_msg);
   return with_internal;
 }
@@ -1463,11 +1539,11 @@
                        int oat_fd,
                        const std::string& oat_location,
                        uint8_t* requested_base,
-                       uint8_t* oat_file_begin,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
-                       std::string* error_msg) {
+                       /*inout*/MemMap* reservation,
+                       /*out*/std::string* error_msg) {
   CHECK(!oat_location.empty()) << oat_location;
 
   std::string vdex_location = GetVdexFilename(oat_location);
@@ -1478,11 +1554,11 @@
                                                                 vdex_location,
                                                                 oat_location,
                                                                 requested_base,
-                                                                oat_file_begin,
                                                                 false /* writable */,
                                                                 executable,
                                                                 low_4gb,
                                                                 abs_dex_location,
+                                                                reservation,
                                                                 error_msg);
   return with_internal;
 }
@@ -1496,12 +1572,11 @@
   return ElfOatFile::OpenElfFile(zip_fd,
                                  file,
                                  location,
-                                 nullptr,
-                                 nullptr,
-                                 true,
-                                 false,
+                                 /* writable */ true,
+                                 /* executable */ false,
                                  /*low_4gb*/false,
                                  abs_dex_location,
+                                 /* reservation */ nullptr,
                                  error_msg);
 }
 
@@ -1514,12 +1589,11 @@
   return ElfOatFile::OpenElfFile(zip_fd,
                                  file,
                                  location,
-                                 nullptr,
-                                 nullptr,
-                                 false,
-                                 false,
+                                 /* writable */ false,
+                                 /* executable */ false,
                                  /*low_4gb*/false,
                                  abs_dex_location,
+                                 /* reservation */ nullptr,
                                  error_msg);
 }
 
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4ed26fa..f20c603 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -86,11 +86,11 @@
                        const std::string& filename,
                        const std::string& location,
                        uint8_t* requested_base,
-                       uint8_t* oat_file_begin,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
-                       std::string* error_msg);
+                       /*inout*/MemMap* reservation,  // Where to load if not null.
+                       /*out*/std::string* error_msg);
 
   // Similar to OatFile::Open(const std::string...), but accepts input vdex and
   // odex files as file descriptors. We also take zip_fd in case the vdex does not
@@ -100,11 +100,11 @@
                        int oat_fd,
                        const std::string& oat_location,
                        uint8_t* requested_base,
-                       uint8_t* oat_file_begin,
                        bool executable,
                        bool low_4gb,
                        const char* abs_dex_location,
-                       std::string* error_msg);
+                       /*inout*/MemMap* reservation,  // Where to load if not null.
+                       /*out*/std::string* error_msg);
 
   // Open an oat file from an already opened File.
   // Does not use dlopen underneath so cannot be used for runtime use
@@ -146,7 +146,7 @@
 
   const OatHeader& GetOatHeader() const;
 
-  class OatMethod FINAL {
+  class OatMethod final {
    public:
     void LinkMethod(ArtMethod* method) const;
 
@@ -201,7 +201,7 @@
     friend class OatClass;
   };
 
-  class OatClass FINAL {
+  class OatClass final {
    public:
     ClassStatus GetStatus() const {
       return status_;
@@ -444,7 +444,7 @@
 // support forward declarations of inner classes, and we want to
 // forward-declare OatDexFile so that we can store an opaque pointer to an
 // OatDexFile in DexFile.
-class OatDexFile FINAL {
+class OatDexFile final {
  public:
   // Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
   std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index f7c74cc..4ed7e35 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -36,7 +36,6 @@
 #include "exec_utils.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
-#include "hidden_api.h"
 #include "image.h"
 #include "oat.h"
 #include "runtime.h"
@@ -182,30 +181,6 @@
   return false;
 }
 
-bool OatFileAssistant::Lock(std::string* error_msg) {
-  CHECK(error_msg != nullptr);
-  CHECK(flock_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
-
-  // Note the lock will only succeed for secondary dex files and in test
-  // environment.
-  //
-  // The lock *will fail* for all primary apks in a production environment.
-  // The app does not have permissions to create locks next to its dex location
-  // (be it system, data or vendor parition). We also cannot use the odex or
-  // oat location for the same reasoning.
-  //
-  // This is best effort and if it fails it's unlikely that we will be able
-  // to generate oat files anyway.
-  std::string lock_file_name = dex_location_ + "." + GetInstructionSetString(isa_) + ".flock";
-
-  flock_ = LockedFile::Open(lock_file_name.c_str(), error_msg);
-  if (flock_.get() == nullptr) {
-    unlink(lock_file_name.c_str());
-    return false;
-  }
-  return true;
-}
-
 int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
                                       bool profile_changed,
                                       bool downgrade,
@@ -221,72 +196,10 @@
   return -dexopt_needed;
 }
 
-// Figure out the currently specified compile filter option in the runtime.
-// Returns true on success, false if the compiler filter is invalid, in which
-// case error_msg describes the problem.
-static bool GetRuntimeCompilerFilterOption(CompilerFilter::Filter* filter,
-                                           std::string* error_msg) {
-  CHECK(filter != nullptr);
-  CHECK(error_msg != nullptr);
-
-  *filter = OatFileAssistant::kDefaultCompilerFilterForDexLoading;
-  for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
-    if (option.starts_with("--compiler-filter=")) {
-      const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
-      if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, filter)) {
-        *error_msg = std::string("Unknown --compiler-filter value: ")
-                   + std::string(compiler_filter_string);
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
 bool OatFileAssistant::IsUpToDate() {
   return GetBestInfo().Status() == kOatUpToDate;
 }
 
-OatFileAssistant::ResultOfAttemptToUpdate
-OatFileAssistant::MakeUpToDate(bool profile_changed,
-                               ClassLoaderContext* class_loader_context,
-                               std::string* error_msg) {
-  // The method doesn't use zip_fd_ and directly opens dex files at dex_locations_.
-  CHECK_EQ(-1, zip_fd_) << "MakeUpToDate should not be called with zip_fd";
-
-  CompilerFilter::Filter target;
-  if (!GetRuntimeCompilerFilterOption(&target, error_msg)) {
-    return kUpdateNotAttempted;
-  }
-
-  OatFileInfo& info = GetBestInfo();
-  // TODO(calin, jeffhao): the context should really be passed to GetDexOptNeeded: b/62269291.
-  // This is actually not trivial in the current logic as it will interact with the collision
-  // check:
-  //   - currently, if the context does not match but we have no collisions we still accept the
-  //     oat file.
-  //   - if GetDexOptNeeded would return kDex2OatFromScratch for a context mismatch and we make
-  //     the oat code up to date the collision check becomes useless.
-  //   - however, MakeUpToDate will not always succeed (e.g. for primary apks, or for dex files
-  //     loaded in other processes). So it boils down to how far do we want to complicate
-  //     the logic in order to enable the use of oat files. Maybe its time to try simplify it.
-  switch (info.GetDexOptNeeded(
-        target, profile_changed, /*downgrade*/ false, class_loader_context)) {
-    case kNoDexOptNeeded:
-      return kUpdateSucceeded;
-
-    // TODO: For now, don't bother with all the different ways we can call
-    // dex2oat to generate the oat file. Always generate the oat file as if it
-    // were kDex2OatFromScratch.
-    case kDex2OatFromScratch:
-    case kDex2OatForBootImage:
-    case kDex2OatForRelocation:
-    case kDex2OatForFilter:
-      return GenerateOatFileNoChecks(info, target, class_loader_context, error_msg);
-  }
-  UNREACHABLE();
-}
-
 std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() {
   return GetBestInfo().ReleaseFileForUse();
 }
@@ -615,243 +528,6 @@
   return true;
 }
 
-// Prepare a subcomponent of the odex directory.
-// (i.e. create and set the expected permissions on the path `dir`).
-static bool PrepareDirectory(const std::string& dir, std::string* error_msg) {
-  struct stat dir_stat;
-  if (TEMP_FAILURE_RETRY(stat(dir.c_str(), &dir_stat)) == 0) {
-    // The directory exists. Check if it is indeed a directory.
-    if (!S_ISDIR(dir_stat.st_mode)) {
-      *error_msg = dir + " is not a dir";
-      return false;
-    } else {
-      // The dir is already on disk.
-      return true;
-    }
-  }
-
-  // Failed to stat. We need to create the directory.
-  if (errno != ENOENT) {
-    *error_msg = "Could not stat isa dir " + dir + ":" + strerror(errno);
-    return false;
-  }
-
-  mode_t mode = S_IRWXU | S_IXGRP | S_IXOTH;
-  if (mkdir(dir.c_str(), mode) != 0) {
-    *error_msg = "Could not create dir " + dir + ":" + strerror(errno);
-    return false;
-  }
-  if (chmod(dir.c_str(), mode) != 0) {
-    *error_msg = "Could not create the oat dir " + dir + ":" + strerror(errno);
-    return false;
-  }
-  return true;
-}
-
-// Prepares the odex directory for the given dex location.
-static bool PrepareOdexDirectories(const std::string& dex_location,
-                                   const std::string& expected_odex_location,
-                                   InstructionSet isa,
-                                   std::string* error_msg) {
-  std::string actual_odex_location;
-  std::string oat_dir;
-  std::string isa_dir;
-  if (!DexLocationToOdexNames(
-        dex_location, isa, &actual_odex_location, &oat_dir, &isa_dir, error_msg)) {
-    return false;
-  }
-  DCHECK_EQ(expected_odex_location, actual_odex_location);
-
-  if (!PrepareDirectory(oat_dir, error_msg)) {
-    return false;
-  }
-  if (!PrepareDirectory(isa_dir, error_msg)) {
-    return false;
-  }
-  return true;
-}
-
-class Dex2oatFileWrapper {
- public:
-  explicit Dex2oatFileWrapper(File* file)
-      : file_(file),
-        unlink_file_at_destruction_(true) {
-  }
-
-  ~Dex2oatFileWrapper() {
-    if (unlink_file_at_destruction_ && (file_ != nullptr)) {
-      file_->Erase(/*unlink*/ true);
-    }
-  }
-
-  File* GetFile() { return file_.get(); }
-
-  void DisableUnlinkAtDestruction() {
-    unlink_file_at_destruction_ = false;
-  }
-
- private:
-  std::unique_ptr<File> file_;
-  bool unlink_file_at_destruction_;
-};
-
-OatFileAssistant::ResultOfAttemptToUpdate OatFileAssistant::GenerateOatFileNoChecks(
-      OatFileAssistant::OatFileInfo& info,
-      CompilerFilter::Filter filter,
-      const ClassLoaderContext* class_loader_context,
-      std::string* error_msg) {
-  CHECK(error_msg != nullptr);
-
-  Runtime* runtime = Runtime::Current();
-  if (!runtime->IsDex2OatEnabled()) {
-    *error_msg = "Generation of oat file for dex location " + dex_location_
-      + " not attempted because dex2oat is disabled.";
-    return kUpdateNotAttempted;
-  }
-
-  if (info.Filename() == nullptr) {
-    *error_msg = "Generation of oat file for dex location " + dex_location_
-      + " not attempted because the oat file name could not be determined.";
-    return kUpdateNotAttempted;
-  }
-  const std::string& oat_file_name = *info.Filename();
-  const std::string& vdex_file_name = GetVdexFilename(oat_file_name);
-
-  // dex2oat ignores missing dex files and doesn't report an error.
-  // Check explicitly here so we can detect the error properly.
-  // TODO: Why does dex2oat behave that way?
-  struct stat dex_path_stat;
-  if (TEMP_FAILURE_RETRY(stat(dex_location_.c_str(), &dex_path_stat)) != 0) {
-    *error_msg = "Could not access dex location " + dex_location_ + ":" + strerror(errno);
-    return kUpdateNotAttempted;
-  }
-
-  // If this is the odex location, we need to create the odex file layout (../oat/isa/..)
-  if (!info.IsOatLocation()) {
-    if (!PrepareOdexDirectories(dex_location_, oat_file_name, isa_, error_msg)) {
-      return kUpdateNotAttempted;
-    }
-  }
-
-  // Set the permissions for the oat and the vdex files.
-  // The user always gets read and write while the group and others propagate
-  // the reading access of the original dex file.
-  mode_t file_mode = S_IRUSR | S_IWUSR |
-      (dex_path_stat.st_mode & S_IRGRP) |
-      (dex_path_stat.st_mode & S_IROTH);
-
-  Dex2oatFileWrapper vdex_file_wrapper(OS::CreateEmptyFile(vdex_file_name.c_str()));
-  File* vdex_file = vdex_file_wrapper.GetFile();
-  if (vdex_file == nullptr) {
-    *error_msg = "Generation of oat file " + oat_file_name
-      + " not attempted because the vdex file " + vdex_file_name
-      + " could not be opened.";
-    return kUpdateNotAttempted;
-  }
-
-  if (fchmod(vdex_file->Fd(), file_mode) != 0) {
-    *error_msg = "Generation of oat file " + oat_file_name
-      + " not attempted because the vdex file " + vdex_file_name
-      + " could not be made world readable.";
-    return kUpdateNotAttempted;
-  }
-
-  Dex2oatFileWrapper oat_file_wrapper(OS::CreateEmptyFile(oat_file_name.c_str()));
-  File* oat_file = oat_file_wrapper.GetFile();
-  if (oat_file == nullptr) {
-    *error_msg = "Generation of oat file " + oat_file_name
-      + " not attempted because the oat file could not be created.";
-    return kUpdateNotAttempted;
-  }
-
-  if (fchmod(oat_file->Fd(), file_mode) != 0) {
-    *error_msg = "Generation of oat file " + oat_file_name
-      + " not attempted because the oat file could not be made world readable.";
-    return kUpdateNotAttempted;
-  }
-
-  std::vector<std::string> args;
-  args.push_back("--dex-file=" + dex_location_);
-  args.push_back("--output-vdex-fd=" + std::to_string(vdex_file->Fd()));
-  args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
-  args.push_back("--oat-location=" + oat_file_name);
-  args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
-  const std::string dex2oat_context = class_loader_context == nullptr
-        ? OatFile::kSpecialSharedLibrary
-        : class_loader_context->EncodeContextForDex2oat(/*base_dir*/ "");
-  args.push_back("--class-loader-context=" + dex2oat_context);
-
-  if (!Dex2Oat(args, error_msg)) {
-    return kUpdateFailed;
-  }
-
-  if (vdex_file->FlushCloseOrErase() != 0) {
-    *error_msg = "Unable to close vdex file " + vdex_file_name;
-    return kUpdateFailed;
-  }
-
-  if (oat_file->FlushCloseOrErase() != 0) {
-    *error_msg = "Unable to close oat file " + oat_file_name;
-    return kUpdateFailed;
-  }
-
-  // Mark that the odex file has changed and we should try to reload.
-  info.Reset();
-  // We have compiled successfully. Disable the auto-unlink.
-  vdex_file_wrapper.DisableUnlinkAtDestruction();
-  oat_file_wrapper.DisableUnlinkAtDestruction();
-
-  return kUpdateSucceeded;
-}
-
-bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
-                               std::string* error_msg) {
-  Runtime* runtime = Runtime::Current();
-  std::string image_location = ImageLocation();
-  if (image_location.empty()) {
-    *error_msg = "No image location found for Dex2Oat.";
-    return false;
-  }
-
-  std::vector<std::string> argv;
-  argv.push_back(runtime->GetCompilerExecutable());
-  if (runtime->IsJavaDebuggable()) {
-    argv.push_back("--debuggable");
-  }
-  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
-  if (!runtime->IsVerificationEnabled()) {
-    argv.push_back("--compiler-filter=verify-none");
-  }
-
-  if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
-    argv.push_back("--runtime-arg");
-    argv.push_back("-Xhidden-api-checks");
-  }
-
-  if (runtime->MustRelocateIfPossible()) {
-    argv.push_back("--runtime-arg");
-    argv.push_back("-Xrelocate");
-  } else {
-    argv.push_back("--runtime-arg");
-    argv.push_back("-Xnorelocate");
-  }
-
-  if (!kIsTargetBuild) {
-    argv.push_back("--host");
-  }
-
-  argv.push_back("--boot-image=" + image_location);
-
-  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-  argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
-  argv.insert(argv.end(), args.begin(), args.end());
-
-  std::string command_line(android::base::Join(argv, ' '));
-  return Exec(argv, error_msg);
-}
-
 bool OatFileAssistant::DexLocationToOdexFilename(const std::string& location,
                                                  InstructionSet isa,
                                                  std::string* odex_filename,
@@ -885,16 +561,6 @@
   return GetDalvikCacheFilename(location.c_str(), cache_dir.c_str(), oat_filename, error_msg);
 }
 
-std::string OatFileAssistant::ImageLocation() {
-  Runtime* runtime = Runtime::Current();
-  const std::vector<gc::space::ImageSpace*>& image_spaces =
-      runtime->GetHeap()->GetBootImageSpaces();
-  if (image_spaces.empty()) {
-    return "";
-  }
-  return image_spaces[0]->GetImageLocation();
-}
-
 const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
   if (!required_dex_checksums_attempted_) {
     required_dex_checksums_attempted_ = true;
@@ -1165,22 +831,22 @@
                                     vdex_fd_,
                                     oat_fd_,
                                     filename_.c_str(),
-                                    nullptr,
-                                    nullptr,
+                                    /* requested_base */ nullptr,
                                     executable,
-                                    false /* low_4gb */,
+                                    /* low_4gb */ false,
                                     oat_file_assistant_->dex_location_.c_str(),
+                                    /* reservation */ nullptr,
                                     &error_msg));
         }
       } else {
         file_.reset(OatFile::Open(/* zip_fd */ -1,
                                   filename_.c_str(),
                                   filename_.c_str(),
-                                  nullptr,
-                                  nullptr,
+                                  /* requested_base */ nullptr,
                                   executable,
-                                  false /* low_4gb */,
+                                  /* low_4gb */ false,
                                   oat_file_assistant_->dex_location_.c_str(),
+                                  /* reservation */ nullptr,
                                   &error_msg));
       }
       if (file_.get() == nullptr) {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index a6d0961..dbfbdf9 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -48,11 +48,6 @@
 // dex location is in the boot class path.
 class OatFileAssistant {
  public:
-  // The default compile filter to use when optimizing dex file at load time if they
-  // are out of date.
-  static const CompilerFilter::Filter kDefaultCompilerFilterForDexLoading =
-      CompilerFilter::kQuicken;
-
   enum DexOptNeeded {
     // No dexopt should (or can) be done to update the apk/jar.
     // Matches Java: dalvik.system.DexFile.NO_DEXOPT_NEEDED = 0
@@ -144,24 +139,6 @@
   // path.
   bool IsInBootClassPath();
 
-  // Obtains a lock on the target oat file.
-  // Only one OatFileAssistant object can hold the lock for a target oat file
-  // at a time. The Lock is released automatically when the OatFileAssistant
-  // object goes out of scope. The Lock() method must not be called if the
-  // lock has already been acquired.
-  //
-  // Returns true on success.
-  // Returns false on error, in which case error_msg will contain more
-  // information on the error.
-  //
-  // The 'error_msg' argument must not be null.
-  //
-  // This is intended to be used to avoid race conditions when multiple
-  // processes generate oat files, such as when a foreground Activity and
-  // a background Service both use DexClassLoaders pointing to the same dex
-  // file.
-  bool Lock(std::string* error_msg);
-
   // Return what action needs to be taken to produce up-to-date code for this
   // dex location. If "downgrade" is set to false, it verifies if the current
   // compiler filter is at least as good as an oat file generated with the
@@ -187,33 +164,6 @@
   // irrespective of the compiler filter of the up-to-date code.
   bool IsUpToDate();
 
-  // Return code used when attempting to generate updated code.
-  enum ResultOfAttemptToUpdate {
-    kUpdateFailed,        // We tried making the code up to date, but
-                          // encountered an unexpected failure.
-    kUpdateNotAttempted,  // We wanted to update the code, but determined we
-                          // should not make the attempt.
-    kUpdateSucceeded      // We successfully made the code up to date
-                          // (possibly by doing nothing).
-  };
-
-  // Attempts to generate or relocate the oat file as needed to make it up to
-  // date based on the current runtime and compiler options.
-  // profile_changed should be true to indicate the profile has recently
-  // changed for this dex location.
-  //
-  // If the dex files need to be made up to date, class_loader_context will be
-  // passed to dex2oat.
-  //
-  // Returns the result of attempting to update the code.
-  //
-  // If the result is not kUpdateSucceeded, the value of error_msg will be set
-  // to a string describing why there was a failure or the update was not
-  // attempted. error_msg must not be null.
-  ResultOfAttemptToUpdate MakeUpToDate(bool profile_changed,
-                                       ClassLoaderContext* class_loader_context,
-                                       std::string* error_msg);
-
   // Returns an oat file that can be used for loading dex files.
   // Returns null if no suitable oat file was found.
   //
@@ -284,18 +234,6 @@
   // Returns the status of the oat file for the dex location.
   OatStatus OatFileStatus();
 
-  // Executes dex2oat using the current runtime configuration overridden with
-  // the given arguments. This does not check to see if dex2oat is enabled in
-  // the runtime configuration.
-  // Returns true on success.
-  //
-  // If there is a failure, the value of error_msg will be set to a string
-  // describing why there was failure. error_msg must not be null.
-  //
-  // TODO: The OatFileAssistant probably isn't the right place to have this
-  // function.
-  static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
-
   // Constructs the odex file name for the given dex location.
   // Returns true on success, in which case odex_filename is set to the odex
   // file name.
@@ -436,20 +374,6 @@
     bool file_released_ = false;
   };
 
-  // Generate the oat file for the given info from the dex file using the
-  // current runtime compiler options, the specified filter and class loader
-  // context.
-  // This does not check the current status before attempting to generate the
-  // oat file.
-  //
-  // If the result is not kUpdateSucceeded, the value of error_msg will be set
-  // to a string describing why there was a failure or the update was not
-  // attempted. error_msg must not be null.
-  ResultOfAttemptToUpdate GenerateOatFileNoChecks(OatFileInfo& info,
-                                                  CompilerFilter::Filter target,
-                                                  const ClassLoaderContext* class_loader_context,
-                                                  std::string* error_msg);
-
   // Return info for the best oat file.
   OatFileInfo& GetBestInfo();
 
@@ -473,13 +397,6 @@
   // location.
   OatStatus GivenOatFileStatus(const OatFile& file);
 
-  // Returns the current image location.
-  // Returns an empty string if the image location could not be retrieved.
-  //
-  // TODO: This method should belong with an image file manager, not
-  // the oat file assistant.
-  static std::string ImageLocation();
-
   // Gets the dex checksums required for an up-to-date oat file.
   // Returns cached_required_dex_checksums if the required checksums were
   // located. Returns null if the required checksums were not found.  The
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0b3c61d..5a29978 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -41,11 +41,6 @@
 
 namespace art {
 
-static const std::string kSpecialSharedLibrary = "&";  // NOLINT [runtime/string] [4]
-static ClassLoaderContext* kSpecialSharedLibraryContext = nullptr;
-
-static constexpr char kDex2oatCmdLineHiddenApiArg[] = " --runtime-arg -Xhidden-api-checks";
-
 class OatFileAssistantTest : public DexoptTest {
  public:
   void VerifyOptimizationStatus(const std::string& file,
@@ -68,14 +63,6 @@
   }
 };
 
-class OatFileAssistantNoDex2OatTest : public DexoptTest {
- public:
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
-    DexoptTest::SetUpRuntimeOptions(options);
-    options->push_back(std::make_pair("-Xnodex2oat", nullptr));
-  }
-};
-
 class ScopedNonWritable {
  public:
   explicit ScopedNonWritable(const std::string& dex_location) {
@@ -109,6 +96,97 @@
   return geteuid() == 0;
 }
 
+// Case: We have a MultiDEX file and up-to-date ODEX file for it with relative
+// encoded dex locations.
+// Expect: The oat file status is kNoDexOptNeeded.
+TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
+  std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
+  std::string odex_location = GetOdexDir() + "/RelativeEncodedDexLocation.odex";
+
+  // Create the dex file
+  Copy(GetMultiDexSrc1(), dex_location);
+
+  // Create the oat file with relative encoded dex location.
+  std::vector<std::string> args = {
+    "--dex-file=" + dex_location,
+    "--dex-location=" + std::string("RelativeEncodedDexLocation.jar"),
+    "--oat-file=" + odex_location,
+    "--compiler-filter=speed"
+  };
+
+  std::string error_msg;
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+  // Verify we can load both dex files.
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+  ASSERT_TRUE(oat_file.get() != nullptr);
+  EXPECT_TRUE(oat_file->IsExecutable());
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+  EXPECT_EQ(2u, dex_files.size());
+}
+
+TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
+  std::string dex_location = GetScratchDir() + "/TestDex.jar";
+  std::string odex_location = GetOdexDir() + "/TestDex.odex";
+  std::string context_location = GetScratchDir() + "/ContextDex.jar";
+  Copy(GetDexSrc1(), dex_location);
+  Copy(GetDexSrc2(), context_location);
+
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+  std::string context_str = "PCL[" + context_location + "]";
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+  ASSERT_TRUE(context != nullptr);
+  ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+  std::string error_msg;
+  std::vector<std::string> args;
+  args.push_back("--dex-file=" + dex_location);
+  args.push_back("--oat-file=" + odex_location);
+  args.push_back("--class-loader-context=" + context_str);
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+  EXPECT_NE(nullptr, oat_file.get());
+  EXPECT_EQ(context->EncodeContextForOatFile(""),
+            oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
+}
+
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
+  std::string dex_location = GetScratchDir() + "/TestDex.jar";
+  std::string odex_location = GetOdexDir() + "/TestDex.odex";
+  std::string context_location = GetScratchDir() + "/ContextDex.jar";
+  Copy(GetDexSrc1(), dex_location);
+  Copy(GetDexSrc2(), context_location);
+
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+  std::string context_str = "PCL[" + context_location + "]";
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+  ASSERT_TRUE(context != nullptr);
+  ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+  std::string error_msg;
+  std::vector<std::string> args;
+  args.push_back("--dex-file=" + dex_location);
+  args.push_back("--oat-file=" + odex_location);
+  args.push_back("--class-loader-context=" + context_str);
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+  // A relative context simulates a dependent split context.
+  std::unique_ptr<ClassLoaderContext> relative_context =
+      ClassLoaderContext::Create("PCL[ContextDex.jar]");
+  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+            oat_file_assistant.GetDexOptNeeded(
+                CompilerFilter::kDefaultCompilerFilter,
+                /* downgrade */ false,
+                /* profile_changed */ false,
+                relative_context.get()));
+}
+
 // Case: We have a DEX file, but no OAT file for it.
 // Expect: The status is kDex2OatNeeded.
 TEST_F(OatFileAssistantTest, DexNoOat) {
@@ -145,11 +223,6 @@
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
 
-  // Trying to make the oat file up to date should not fail or crash.
-  std::string error_msg;
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-          oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-
   // Trying to get the best oat file should fail, but not crash.
   std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
   EXPECT_EQ(nullptr, oat_file.get());
@@ -584,37 +657,6 @@
   EXPECT_EQ(OatFileAssistant::kOatDexOutOfDate, oat_file_assistant.OatFileStatus());
 }
 
-// Case: We have a MultiDEX file and up-to-date ODEX file for it with relative
-// encoded dex locations.
-// Expect: The oat file status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
-  std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
-  std::string odex_location = GetOdexDir() + "/RelativeEncodedDexLocation.odex";
-
-  // Create the dex file
-  Copy(GetMultiDexSrc1(), dex_location);
-
-  // Create the oat file with relative encoded dex location.
-  std::vector<std::string> args;
-  args.push_back("--dex-file=" + dex_location);
-  args.push_back("--dex-location=" + std::string("RelativeEncodedDexLocation.jar"));
-  args.push_back("--oat-file=" + odex_location);
-  args.push_back("--compiler-filter=speed");
-
-  std::string error_msg;
-  ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
-
-  // Verify we can load both dex files.
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
-
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  ASSERT_TRUE(oat_file.get() != nullptr);
-  EXPECT_TRUE(oat_file->IsExecutable());
-  std::vector<std::unique_ptr<const DexFile>> dex_files;
-  dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
-  EXPECT_EQ(2u, dex_files.size());
-}
-
 // Case: We have a DEX file and an OAT file out of date with respect to the
 // dex checksum.
 TEST_F(OatFileAssistantTest, OatDexOutOfDate) {
@@ -872,13 +914,6 @@
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
 
-  // Make the oat file up to date. This should have no effect.
-  std::string error_msg;
-  Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
-          error_msg;
-
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
 
@@ -1037,35 +1072,6 @@
   EXPECT_EQ(1u, dex_files.size());
 }
 
-// Case: We don't have a DEX file and can't write the oat file.
-// Expect: We should fail to generate the oat file without crashing.
-TEST_F(OatFileAssistantTest, GenNoDex) {
-  if (IsExecutedAsRoot()) {
-    // We cannot simulate non writable locations when executed as root: b/38000545.
-    LOG(ERROR) << "Test skipped because it's running as root";
-    return;
-  }
-
-  std::string dex_location = GetScratchDir() + "/GenNoDex.jar";
-
-  ScopedNonWritable scoped_non_writable(dex_location);
-  ASSERT_TRUE(scoped_non_writable.IsSuccessful());
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
-  std::string error_msg;
-  Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
-  // We should get kUpdateSucceeded from MakeUpToDate since there's nothing
-  // that can be done in this situation.
-  ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-
-  // Verify it didn't create an oat in the default location (dalvik-cache).
-  OatFileAssistant ofm(dex_location.c_str(), kRuntimeISA, false);
-  EXPECT_EQ(OatFileAssistant::kOatCannotOpen, ofm.OatFileStatus());
-  // Verify it didn't create the odex file in the default location (../oat/isa/...odex)
-  EXPECT_EQ(OatFileAssistant::kOatCannotOpen, ofm.OdexFileStatus());
-}
-
 // Turn an absolute path into a path relative to the current working
 // directory.
 static std::string MakePathRelative(const std::string& target) {
@@ -1131,13 +1137,6 @@
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
-
-  // Trying to make it up to date should have no effect.
-  std::string error_msg;
-  Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-  EXPECT_TRUE(error_msg.empty());
 }
 
 // Case: Non-standard extension for dex file.
@@ -1156,11 +1155,12 @@
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
 }
 
+
 // A task to generate a dex location. Used by the RaceToGenerate test.
 class RaceGenerateTask : public Task {
  public:
-  explicit RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
-    : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr)
+  RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
+      : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr)
   {}
 
   void Run(Thread* self ATTRIBUTE_UNUSED) {
@@ -1169,6 +1169,21 @@
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     std::vector<std::string> error_msgs;
     const OatFile* oat_file = nullptr;
+    {
+      // Create the oat file.
+      std::vector<std::string> args;
+      args.push_back("--dex-file=" + dex_location_);
+      args.push_back("--oat-file=" + oat_location_);
+      std::string error_msg;
+      if (kIsTargetBuild) {
+        // Don't check whether dex2oat is successful: given we're running kNumThreads in
+        // parallel, low memory killer might just kill some of the dex2oat invocations.
+        DexoptTest::Dex2Oat(args, &error_msg);
+      } else {
+        ASSERT_TRUE(DexoptTest::Dex2Oat(args, &error_msg)) << error_msg;
+      }
+    }
+
     dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
         dex_location_.c_str(),
         Runtime::Current()->GetSystemClassLoader(),
@@ -1176,8 +1191,9 @@
         &oat_file,
         &error_msgs);
     CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
-    CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
-    loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+    if (dex_files[0]->GetOatDexFile() != nullptr) {
+      loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+    }
     CHECK_EQ(loaded_oat_file_, oat_file);
   }
 
@@ -1191,12 +1207,8 @@
   const OatFile* loaded_oat_file_;
 };
 
-// Test the case where multiple processes race to generate an oat file.
-// This simulates multiple processes using multiple threads.
-//
-// We want unique Oat files to be loaded even when there is a race to load.
-// TODO: The test case no longer tests locking the way it was intended since we now get multiple
-// copies of the same Oat files mapped at different locations.
+// Test the case where dex2oat invocations race with multiple processes trying to
+// load the oat file.
 TEST_F(OatFileAssistantTest, RaceToGenerate) {
   std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
   std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
@@ -1209,31 +1221,32 @@
   // take a while to generate.
   Copy(GetLibCoreDexFileNames()[0], dex_location);
 
-  const int kNumThreads = 32;
+  const size_t kNumThreads = 32;
   Thread* self = Thread::Current();
   ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
   std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
-  for (int i = 0; i < kNumThreads; i++) {
+  for (size_t i = 0; i < kNumThreads; i++) {
     std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location));
     thread_pool.AddTask(self, task.get());
     tasks.push_back(std::move(task));
   }
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self, true, false);
+  thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
 
-  // Verify every task got a unique oat file.
+  // Verify that tasks which got an oat file got a unique one.
   std::set<const OatFile*> oat_files;
   for (auto& task : tasks) {
     const OatFile* oat_file = task->GetLoadedOatFile();
-    EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
-    oat_files.insert(oat_file);
+    if (oat_file != nullptr) {
+      EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
+      oat_files.insert(oat_file);
+    }
   }
 }
 
-// Case: We have a DEX file and an ODEX file, no OAT file, and dex2oat is
-// disabled.
+// Case: We have a DEX file and an ODEX file, and no OAT file,
 // Expect: We should load the odex file non-executable.
-TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
+TEST_F(DexoptTest, LoadDexOdexNoOat) {
   std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
   std::string odex_location = GetOdexDir() + "/LoadDexOdexNoOat.odex";
 
@@ -1252,10 +1265,9 @@
   EXPECT_EQ(1u, dex_files.size());
 }
 
-// Case: We have a MultiDEX file and an ODEX file, no OAT file, and dex2oat is
-// disabled.
+// Case: We have a MultiDEX file and an ODEX file, and no OAT file.
 // Expect: We should load the odex file non-executable.
-TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
+TEST_F(DexoptTest, LoadMultiDexOdexNoOat) {
   std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
   std::string odex_location = GetOdexDir() + "/LoadMultiDexOdexNoOat.odex";
 
@@ -1274,36 +1286,6 @@
   EXPECT_EQ(2u, dex_files.size());
 }
 
-TEST_F(OatFileAssistantTest, RuntimeCompilerFilterOptionUsed) {
-  std::string dex_location = GetScratchDir() + "/RuntimeCompilerFilterOptionUsed.jar";
-  Copy(GetDexSrc1(), dex_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  std::string error_msg;
-  Runtime::Current()->AddCompilerOption("--compiler-filter=quicken");
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
-          error_msg;
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
-  EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-
-  Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg))
-          << error_msg;
-  EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
-  EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
-
-  Runtime::Current()->AddCompilerOption("--compiler-filter=bogus");
-  EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg));
-}
-
 TEST(OatFileAssistantUtilsTest, DexLocationToOdexFilename) {
   std::string error_msg;
   std::string odex_file;
@@ -1350,112 +1332,6 @@
   }
 }
 
-// Verify that when no compiler filter is passed the default one from OatFileAssistant is used.
-TEST_F(OatFileAssistantTest, DefaultMakeUpToDateFilter) {
-  std::string dex_location = GetScratchDir() + "/TestDex.jar";
-  Copy(GetDexSrc1(), dex_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  const CompilerFilter::Filter default_filter =
-      OatFileAssistant::kDefaultCompilerFilterForDexLoading;
-  std::string error_msg;
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
-      oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg)) <<
-          error_msg;
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(default_filter));
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  EXPECT_NE(nullptr, oat_file.get());
-  EXPECT_EQ(default_filter, oat_file->GetCompilerFilter());
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithSpecialSharedLibrary) {
-  std::string dex_location = GetScratchDir() + "/TestDex.jar";
-  Copy(GetDexSrc1(), dex_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  const CompilerFilter::Filter default_filter =
-      OatFileAssistant::kDefaultCompilerFilterForDexLoading;
-  std::string error_msg;
-  int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(default_filter));
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  EXPECT_NE(nullptr, oat_file.get());
-  EXPECT_EQ(kSpecialSharedLibrary,
-            oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
-  std::string dex_location = GetScratchDir() + "/TestDex.jar";
-  std::string context_location = GetScratchDir() + "/ContextDex.jar";
-  Copy(GetDexSrc1(), dex_location);
-  Copy(GetDexSrc2(), context_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  const CompilerFilter::Filter default_filter =
-      OatFileAssistant::kDefaultCompilerFilterForDexLoading;
-  std::string error_msg;
-  std::string context_str = "PCL[" + context_location + "]";
-  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
-  ASSERT_TRUE(context != nullptr);
-  ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
-
-  int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(default_filter, false, false, context.get()));
-
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  EXPECT_NE(nullptr, oat_file.get());
-  EXPECT_EQ(context->EncodeContextForOatFile(""),
-      oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiDisabled) {
-  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
-      hiddenapi::EnforcementPolicy::kNoChecks);
-
-  std::string dex_location = GetScratchDir() + "/TestDexHiddenApiDisabled.jar";
-  Copy(GetDexSrc1(), dex_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-  std::string error_msg;
-  int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  EXPECT_NE(nullptr, oat_file.get());
-
-  const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
-  EXPECT_NE(nullptr, cmd_line);
-  EXPECT_EQ(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
-}
-
-TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiEnabled) {
-  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
-      hiddenapi::EnforcementPolicy::kBlacklistOnly);
-
-  std::string dex_location = GetScratchDir() + "/TestDexHiddenApiEnabled.jar";
-  Copy(GetDexSrc1(), dex_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-  std::string error_msg;
-  int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
-  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
-  EXPECT_NE(nullptr, oat_file.get());
-
-  const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
-  EXPECT_NE(nullptr, cmd_line);
-  EXPECT_NE(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
-}
-
 TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
   std::string dex_location = GetScratchDir() + "/TestDex.jar";
   std::string context_location = GetScratchDir() + "/ContextDex.jar";
@@ -1464,19 +1340,12 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
-  const CompilerFilter::Filter default_filter =
-      OatFileAssistant::kDefaultCompilerFilterForDexLoading;
   std::string error_msg;
   std::string context_str = "PCL[" + context_location + "]";
   std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
   ASSERT_TRUE(context != nullptr);
   ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
 
-  int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(default_filter, false, false, context.get()));
-
   // Update the context by overriding the jar file.
   Copy(GetMultiDexSrc2(), context_location);
   std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
@@ -1484,88 +1353,10 @@
   // DexOptNeeded should advise compilation from scratch.
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
             oat_file_assistant.GetDexOptNeeded(
-                  default_filter, false, false, updated_context.get()));
-}
-
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
-  std::string dex_location = GetScratchDir() + "/TestDex.jar";
-  std::string context_location = GetScratchDir() + "/ContextDex.jar";
-  Copy(GetDexSrc1(), dex_location);
-  Copy(GetDexSrc2(), context_location);
-
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  const CompilerFilter::Filter default_filter =
-      OatFileAssistant::kDefaultCompilerFilterForDexLoading;
-  std::string error_msg;
-  std::string context_str = "PCL[" + context_location + "]";
-  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
-  ASSERT_TRUE(context != nullptr);
-  ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
-
-  int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
-  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-
-  // A relative context simulates a dependent split context.
-  std::unique_ptr<ClassLoaderContext> relative_context =
-      ClassLoaderContext::Create("PCL[ContextDex.jar]");
-  EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(
-                default_filter, false, false, relative_context.get()));
-}
-
-TEST_F(OatFileAssistantTest, SystemOdex) {
-  std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
-  std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
-  std::string system_location = GetAndroidRoot() + "/OatUpToDate.jar";
-
-  std::string error_msg;
-
-  Copy(GetDexSrc1(), dex_location);
-  EXPECT_FALSE(LocationIsOnSystem(dex_location.c_str()));
-
-  {
-    OatFileAssistant oat_file_assistant(dex_location.c_str(),
-                                        kRuntimeISA,
-                                        true,
-                                        false);
-    int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-    EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
-  }
-
-  {
-    OatFileAssistant oat_file_assistant(dex_location.c_str(),
-                                        kRuntimeISA,
-                                        true,
-                                        true);
-    int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-    EXPECT_FALSE(oat_file_assistant.GetBestOatFile()->IsExecutable());
-  }
-
-  Copy(GetDexSrc1(), system_location);
-  EXPECT_TRUE(LocationIsOnSystem(system_location.c_str()));
-
-  {
-    OatFileAssistant oat_file_assistant(system_location.c_str(),
-                                        kRuntimeISA,
-                                        true,
-                                        false);
-    int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-    EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
-  }
-
-  {
-    OatFileAssistant oat_file_assistant(system_location.c_str(),
-                                        kRuntimeISA,
-                                        true,
-                                        true);
-    int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
-    EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
-  }
+                  CompilerFilter::kDefaultCompilerFilter,
+                  /* downgrade */ false,
+                  /* profile_changed */ false,
+                  updated_context.get()));
 }
 
 // TODO: More Tests:
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 59a1045..bcad4a3 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -465,57 +465,15 @@
                                       !runtime->IsAotCompiler(),
                                       only_use_system_oat_files_);
 
-  // Lock the target oat location to avoid races generating and loading the
-  // oat file.
-  std::string error_msg;
-  if (!oat_file_assistant.Lock(/*out*/&error_msg)) {
-    // Don't worry too much if this fails. If it does fail, it's unlikely we
-    // can generate an oat file anyway.
-    VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
-  }
-
-  const OatFile* source_oat_file = nullptr;
-
-  if (!oat_file_assistant.IsUpToDate()) {
-    // Update the oat file on disk if we can, based on the --compiler-filter
-    // option derived from the current runtime options.
-    // This may fail, but that's okay. Best effort is all that matters here.
-    // TODO(calin): b/64530081 b/66984396. Pass a null context to verify and compile
-    // secondary dex files in isolation (and avoid to extract/verify the main apk
-    // if it's in the class path). Note this trades correctness for performance
-    // since the resulting slow down is unacceptable in some cases until b/64530081
-    // is fixed.
-    // We still pass the class loader context when the classpath string of the runtime
-    // is not empty, which is the situation when ART is invoked standalone.
-    ClassLoaderContext* actual_context = Runtime::Current()->GetClassPathString().empty()
-        ? nullptr
-        : context.get();
-    switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/ false,
-                                            actual_context,
-                                            /*out*/ &error_msg)) {
-      case OatFileAssistant::kUpdateFailed:
-        LOG(WARNING) << error_msg;
-        break;
-
-      case OatFileAssistant::kUpdateNotAttempted:
-        // Avoid spamming the logs if we decided not to attempt making the oat
-        // file up to date.
-        VLOG(oat) << error_msg;
-        break;
-
-      case OatFileAssistant::kUpdateSucceeded:
-        // Nothing to do.
-        break;
-    }
-  }
-
   // Get the oat file on disk.
   std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
   VLOG(oat) << "OatFileAssistant(" << dex_location << ").GetBestOatFile()="
             << reinterpret_cast<uintptr_t>(oat_file.get())
             << " (executable=" << (oat_file != nullptr ? oat_file->IsExecutable() : false) << ")";
 
+  const OatFile* source_oat_file = nullptr;
   CheckCollisionResult check_collision_result = CheckCollisionResult::kPerformedHasCollisions;
+  std::string error_msg;
   if ((class_loader != nullptr || dex_elements != nullptr) && oat_file != nullptr) {
     // Prevent oat files from being loaded if no class_loader or dex_elements are provided.
     // This can happen when the deprecated DexFile.<init>(String) is called directly, and it
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 12dfe20..51d8fca 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -77,11 +77,11 @@
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                    oat_location.c_str(),
                                                    oat_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr);
 
@@ -105,11 +105,11 @@
     std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
                                                      oat_location.c_str(),
                                                      oat_location.c_str(),
-                                                     nullptr,
-                                                     nullptr,
-                                                     false,
-                                                     /*low_4gb*/false,
+                                                     /* requested_base */ nullptr,
+                                                     /* executable */ false,
+                                                     /* low_4gb */ false,
                                                      dex_location.c_str(),
+                                                     /* reservation */ nullptr,
                                                      &error_msg));
     ASSERT_TRUE(odex_file != nullptr);
     ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
@@ -120,13 +120,13 @@
 
   // And try to load again.
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
-                                                   oat_location.c_str(),
-                                                   oat_location.c_str(),
-                                                   nullptr,
-                                                   nullptr,
-                                                   false,
-                                                   /*low_4gb*/false,
+                                                   oat_location,
+                                                   oat_location,
+                                                   /* requested_base */ nullptr,
+                                                   /* executable */ false,
+                                                   /* low_4gb */ false,
                                                    dex_location.c_str(),
+                                                   /* reservation */ nullptr,
                                                    &error_msg));
   EXPECT_TRUE(odex_file == nullptr);
   EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index a44e5a4..4d16eb5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -220,9 +220,6 @@
       .Define({"-Xrelocate", "-Xnorelocate"})
           .WithValues({true, false})
           .IntoKey(M::Relocate)
-      .Define({"-Xdex2oat", "-Xnodex2oat"})
-          .WithValues({true, false})
-          .IntoKey(M::Dex2Oat)
       .Define({"-Ximage-dex2oat", "-Xnoimage-dex2oat"})
           .WithValues({true, false})
           .IntoKey(M::ImageDex2Oat)
@@ -518,9 +515,12 @@
     LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
   }
 
-  if (args.GetOrDefault(M::UseJitCompilation) && args.GetOrDefault(M::Interpret)) {
-    Usage("-Xusejit:true and -Xint cannot be specified together");
-    Exit(0);
+  if (args.GetOrDefault(M::Interpret)) {
+    if (args.Exists(M::UseJitCompilation) && *args.Get(M::UseJitCompilation)) {
+      Usage("-Xusejit:true and -Xint cannot be specified together\n");
+      Exit(0);
+    }
+    args.Set(M::UseJitCompilation, false);
   }
 
   // Set a default boot class path if we didn't get an explicit one via command line.
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 36dea60..f1e485b 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -30,7 +30,7 @@
 
 class ProxyTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
     // The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
     // CommonRuntimeTest so we need to do that now.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7f5717f..7b92151 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -58,7 +58,7 @@
       full_fragment_done_(false) {}
 
 // Finds catch handler.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
+class CatchBlockStackVisitor final : public StackVisitor {
  public:
   CatchBlockStackVisitor(Thread* self,
                          Context* context,
@@ -72,7 +72,7 @@
         skip_frames_(skip_frames) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     if (method == nullptr) {
@@ -350,7 +350,7 @@
 }
 
 // Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
+class DeoptimizeStackVisitor final : public StackVisitor {
  public:
   DeoptimizeStackVisitor(Thread* self,
                          Context* context,
@@ -399,7 +399,7 @@
     }
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     ArtMethod* method = GetMethod();
     if (method == nullptr || single_frame_done_) {
@@ -667,14 +667,14 @@
 }
 
 // Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
+class DumpFramesWithTypeStackVisitor final : public StackVisitor {
  public:
   explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         show_details_(show_details) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     if (show_details_) {
       LOG(INFO) << "|> pc   = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 30d4587..6878cc0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -216,7 +216,6 @@
       must_relocate_(false),
       is_concurrent_gc_enabled_(true),
       is_explicit_gc_disabled_(false),
-      dex2oat_enabled_(true),
       image_dex2oat_enabled_(true),
       default_stack_size_(0),
       heap_(nullptr),
@@ -1195,7 +1194,6 @@
   must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
   is_zygote_ = runtime_options.Exists(Opt::Zygote);
   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
-  dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
   image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
   dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
 
@@ -2638,7 +2636,7 @@
   explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
       : instrumentation_(instrumentation) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
     for (auto& m : klass->GetMethods(pointer_size)) {
       const void* code = m.GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f98d7b9..f0bf754 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -142,10 +142,6 @@
     return must_relocate_;
   }
 
-  bool IsDex2OatEnabled() const {
-    return dex2oat_enabled_ && IsImageDex2OatEnabled();
-  }
-
   bool IsImageDex2OatEnabled() const {
     return image_dex2oat_enabled_;
   }
@@ -846,7 +842,6 @@
   bool must_relocate_;
   bool is_concurrent_gc_enabled_;
   bool is_explicit_gc_disabled_;
-  bool dex2oat_enabled_;
   bool image_dex2oat_enabled_;
 
   std::string compiler_executable_;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ed0472f..aaedb23 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -50,7 +50,7 @@
 
 class RuntimeCallbacksTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
 
     Thread* self = Thread::Current();
@@ -60,7 +60,7 @@
     AddListener();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     {
       Thread* self = Thread::Current();
       ScopedObjectAccess soa(self);
@@ -101,10 +101,10 @@
   }
 
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&cb_);
   }
 
@@ -117,7 +117,7 @@
   };
 
   struct Callback : public ThreadLifecycleCallback {
-    void ThreadStart(Thread* self) OVERRIDE {
+    void ThreadStart(Thread* self) override {
       if (state == CallbackState::kBase) {
         state = CallbackState::kStarted;
         stored_self = self;
@@ -126,7 +126,7 @@
       }
     }
 
-    void ThreadDeath(Thread* self) OVERRIDE {
+    void ThreadDeath(Thread* self) override {
       if (state == CallbackState::kStarted && self == stored_self) {
         state = CallbackState::kDied;
       } else {
@@ -219,10 +219,10 @@
 
 class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveClassLoadCallback(&cb_);
   }
 
@@ -252,14 +252,14 @@
   }
 
   struct Callback : public ClassLoadCallback {
-    virtual void ClassPreDefine(const char* descriptor,
-                                Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
-                                Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
-                                const DexFile& initial_dex_file,
-                                const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
-                                /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
-                                /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
-        OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    void ClassPreDefine(const char* descriptor,
+                        Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
+                        Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
+                        const DexFile& initial_dex_file,
+                        const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+                        /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
+                        /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+        REQUIRES_SHARED(Locks::mutator_lock_) {
       const std::string& location = initial_dex_file.GetLocation();
       std::string event =
           std::string("PreDefine:") + descriptor + " <" +
@@ -267,14 +267,14 @@
       data.push_back(event);
     }
 
-    void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string tmp;
       std::string event = std::string("Load:") + klass->GetDescriptor(&tmp);
       data.push_back(event);
     }
 
     void ClassPrepare(Handle<mirror::Class> temp_klass,
-                      Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+                      Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string tmp, tmp2;
       std::string event = std::string("Prepare:") + klass->GetDescriptor(&tmp)
           + "[" + temp_klass->GetDescriptor(&tmp2) + "]";
@@ -319,15 +319,15 @@
 
 class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&cb_);
   }
 
   struct Callback : public RuntimeSigQuitCallback {
-    void SigQuit() OVERRIDE {
+    void SigQuit() override {
       ++sigquit_count;
     }
 
@@ -362,20 +362,20 @@
 
 class RuntimePhaseCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&cb_);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     // Bypass RuntimeCallbacksTest::TearDown, as the runtime is already gone.
     CommonRuntimeTest::TearDown();
   }
 
   struct Callback : public RuntimePhaseCallback {
-    void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) OVERRIDE {
+    void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) override {
       if (p == RuntimePhaseCallback::RuntimePhase::kInitialAgents) {
         if (start_seen > 0 || init_seen > 0 || death_seen > 0) {
           LOG(FATAL) << "Unexpected order";
@@ -434,10 +434,10 @@
 
 class MonitorWaitCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(&cb_);
   }
 
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ef21f9f..ae1e08f 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -21,7 +21,7 @@
 
 // This file defines the list of keys for RuntimeOptions.
 // These can be used with RuntimeOptions.Get/Set/etc, for example:
-//         RuntimeOptions opt; bool* dex2oat_enabled = opt.Get(RuntimeOptions::Dex2Oat);
+//         RuntimeOptions opt; bool* image_dex2oat_enabled = opt.Get(RuntimeOptions::ImageDex2Oat);
 //
 // Column Descriptions:
 //                   <<Type>>             <<Key Name>>                  <<Default Value>>
@@ -69,7 +69,7 @@
 RUNTIME_OPTIONS_KEY (Unit,                LowMemoryMode)
 RUNTIME_OPTIONS_KEY (bool,                UseTLAB,                        (kUseTlab || kUseReadBarrier))
 RUNTIME_OPTIONS_KEY (bool,                EnableHSpaceCompactForOOM,      true)
-RUNTIME_OPTIONS_KEY (bool,                UseJitCompilation,              false)
+RUNTIME_OPTIONS_KEY (bool,                UseJitCompilation,              true)
 RUNTIME_OPTIONS_KEY (bool,                DumpNativeStackOnSigQuit,       true)
 RUNTIME_OPTIONS_KEY (bool,                MadviseRandomAccess,            false)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold)
@@ -88,7 +88,6 @@
 RUNTIME_OPTIONS_KEY (std::string,         JniTrace)
 RUNTIME_OPTIONS_KEY (std::string,         PatchOat)
 RUNTIME_OPTIONS_KEY (bool,                Relocate,                       kDefaultMustRelocate)
-RUNTIME_OPTIONS_KEY (bool,                Dex2Oat,                        true)
 RUNTIME_OPTIONS_KEY (bool,                ImageDex2Oat,                   true)
 RUNTIME_OPTIONS_KEY (bool,                Interpret,                      false) // -Xint
                                                         // Disable the compiler for CC (for now).
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ce99fb9..eb9c661 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -461,7 +461,7 @@
     NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
         : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
 
-    bool VisitFrame() OVERRIDE {
+    bool VisitFrame() override {
       frames++;
       return true;
     }
@@ -487,7 +487,7 @@
           next_dex_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (found_frame_) {
         ArtMethod* method = GetMethod();
         if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -520,7 +520,7 @@
     explicit DescribeStackVisitor(Thread* thread_in)
         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
       return true;
     }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index df7f19d..19fe4ea 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -25,6 +25,12 @@
 #include <sys/resource.h>
 #include <sys/time.h>
 
+#if __has_feature(hwaddress_sanitizer)
+#include <sanitizer/hwasan_interface.h>
+#else
+#define __hwasan_tag_pointer(p, t) (p)
+#endif
+
 #include <algorithm>
 #include <bitset>
 #include <cerrno>
@@ -623,7 +629,9 @@
 #endif
       volatile char space[kPageSize - (kAsanMultiplier * 256)];
       char sink ATTRIBUTE_UNUSED = space[zero];  // NOLINT
-      if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) {
+      // Remove tag from the pointer. Nop in non-hwasan builds.
+      uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0));
+      if (addr >= target + kPageSize) {
         Touch(target);
       }
       zero *= 2;  // Try to avoid tail recursion.
@@ -1486,7 +1494,7 @@
  public:
   explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     wrapped_->Run(self);
     barrier_.Pass(self);
   }
@@ -1844,7 +1852,7 @@
   static constexpr size_t kMaxRepetition = 3u;
 
   VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     ObjPtr<mirror::Class> c = m->GetDeclaringClass();
@@ -1883,24 +1891,24 @@
     return VisitMethodResult::kContinueMethod;
   }
 
-  VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+  VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
     return VisitMethodResult::kContinueMethod;
   }
 
   void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - waiting on ", ThreadList::kInvalidThreadId);
   }
   void VisitSleepingObject(mirror::Object* obj)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - sleeping on ", ThreadList::kInvalidThreadId);
   }
   void VisitBlockedOnObject(mirror::Object* obj,
                             ThreadState state,
                             uint32_t owner_tid)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const char* msg;
     switch (state) {
@@ -1919,7 +1927,7 @@
     PrintObject(obj, msg, owner_tid);
   }
   void VisitLockedObject(mirror::Object* obj)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - locked ", ThreadList::kInvalidThreadId);
   }
@@ -2216,7 +2224,7 @@
 
   // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
   void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+      override NO_THREAD_SAFETY_ANALYSIS {
     if (self_->HoldsLock(entered_monitor)) {
       LOG(WARNING) << "Calling MonitorExit on object "
                    << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -2845,7 +2853,7 @@
 
    protected:
     VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
           soaa_, m, GetDexPc(/* abort on error */ false));
@@ -2856,7 +2864,7 @@
       return VisitMethodResult::kContinueMethod;
     }
 
-    VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+    VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
       lock_objects_.push_back({});
       lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
 
@@ -2866,24 +2874,24 @@
     }
 
     void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitSleepingObject(mirror::Object* obj)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitBlockedOnObject(mirror::Object* obj,
                               ThreadState state ATTRIBUTE_UNUSED,
                               uint32_t owner_tid ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitLockedObject(mirror::Object* obj)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
     }
@@ -3450,7 +3458,7 @@
 
 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
 //       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor FINAL : public StackVisitor {
+struct CurrentMethodVisitor final : public StackVisitor {
   CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread,
@@ -3461,7 +3469,7 @@
         method_(nullptr),
         dex_pc_(0),
         abort_on_error_(abort_on_error) {}
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       // Continue if this is a runtime method.
@@ -3857,7 +3865,7 @@
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     VerifyObject(root);
   }
 };
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9222024..cddc275 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -199,7 +199,7 @@
 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
 
 // A closure used by Thread::Dump.
-class DumpCheckpoint FINAL : public Closure {
+class DumpCheckpoint final : public Closure {
  public:
   DumpCheckpoint(std::ostream* os, bool dump_native_stack)
       : os_(os),
@@ -211,7 +211,7 @@
     }
   }
 
-  void Run(Thread* thread) OVERRIDE {
+  void Run(Thread* thread) override {
     // Note thread and self may not be equal if thread was already suspended at the point of the
     // request.
     Thread* self = Thread::Current();
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 1986eec..949fabe 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -383,9 +383,6 @@
     }
   };
   std::unique_ptr<File, decltype(deleter)> trace_file(trace_file_in.release(), deleter);
-  if (trace_file != nullptr) {
-    trace_file->DisableAutoClose();
-  }
 
   Thread* self = Thread::Current();
   {
@@ -420,7 +417,7 @@
     if (the_trace_ != nullptr) {
       LOG(ERROR) << "Trace already in progress, ignoring this request";
     } else {
-      enable_stats = (flags && kTraceCountAllocs) != 0;
+      enable_stats = (flags & kTraceCountAllocs) != 0;
       the_trace_ = new Trace(trace_file.release(), buffer_size, flags, output_mode, trace_mode);
       if (trace_mode == TraceMode::kSampling) {
         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
@@ -608,7 +605,7 @@
   Runtime* runtime = Runtime::Current();
 
   // Enable count of allocs if specified in the flags.
-  bool enable_stats = (the_trace->flags_ && kTraceCountAllocs) != 0;
+  bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0;
 
   {
     gc::ScopedGCCriticalSection gcs(self,
diff --git a/runtime/trace.h b/runtime/trace.h
index 1fae250..5d96493 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -102,7 +102,7 @@
 // Class for recording event traces. Trace data is either collected
 // synchronously during execution (TracingMode::kMethodTracingActive),
 // or by a separate sampling thread (TracingMode::kSampleProfilingActive).
-class Trace FINAL : public instrumentation::InstrumentationListener {
+class Trace final : public instrumentation::InstrumentationListener {
  public:
   enum TraceFlag {
     kTraceCountAllocs = 1,
@@ -181,57 +181,57 @@
                      ArtMethod* method,
                      uint32_t dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void MethodExited(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void MethodUnwind(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void InvokeVirtualOrInterface(Thread* thread,
                                 Handle<mirror::Object> this_object,
                                 ArtMethod* caller,
                                 uint32_t dex_pc,
                                 ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
   static std::vector<ArtMethod*>* AllocStackTrace();
   // Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7adf140..de6edd2 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -39,7 +39,7 @@
 }  // namespace mirror
 class InternTable;
 
-class Transaction FINAL {
+class Transaction final {
  public:
   static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
   static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index a7dc225..452cd8e 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -150,10 +150,11 @@
       (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
       unquicken ? MAP_PRIVATE : MAP_SHARED,
       file_fd,
-      0 /* start offset */,
+      /* start */ 0u,
       low_4gb,
-      mmap_reuse,
       vdex_filename.c_str(),
+      mmap_reuse,
+      /* reservation */ nullptr,
       error_msg);
   if (!mmap.IsValid()) {
     *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e67067c..e5e71a4 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -25,7 +25,7 @@
 namespace art {
 namespace verifier {
 
-class InstructionFlags FINAL {
+class InstructionFlags final {
  public:
   InstructionFlags() : flags_(0) {}
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 29da376..3099b23 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -378,11 +378,11 @@
 };
 
 // Bottom type.
-class ConflictType FINAL : public RegType {
+class ConflictType final : public RegType {
  public:
-  bool IsConflict() const OVERRIDE { return true; }
+  bool IsConflict() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
   static const ConflictType* GetInstance() PURE;
@@ -396,7 +396,7 @@
   // Destroy the singleton instance.
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kConflict;
   }
 
@@ -414,11 +414,11 @@
 // A variant of the bottom type used to specify an undefined value in the
 // incoming registers.
 // Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType FINAL : public RegType {
+class UndefinedType final : public RegType {
  public:
-  bool IsUndefined() const OVERRIDE { return true; }
+  bool IsUndefined() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
   static const UndefinedType* GetInstance() PURE;
@@ -432,7 +432,7 @@
   // Destroy the singleton instance.
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -453,7 +453,7 @@
                 const StringPiece& descriptor,
                 uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 };
 
 class Cat1Type : public PrimitiveType {
@@ -462,10 +462,10 @@
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
-class IntegerType FINAL : public Cat1Type {
+class IntegerType final : public Cat1Type {
  public:
-  bool IsInteger() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsInteger() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
                                            const StringPiece& descriptor,
                                            uint16_t cache_id)
@@ -473,7 +473,7 @@
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kInteger;
   }
 
@@ -487,10 +487,10 @@
   static const IntegerType* instance_;
 };
 
-class BooleanType FINAL : public Cat1Type {
+class BooleanType final : public Cat1Type {
  public:
-  bool IsBoolean() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsBoolean() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
                                            const StringPiece& descriptor,
                                            uint16_t cache_id)
@@ -498,7 +498,7 @@
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kBoolean;
   }
 
@@ -513,10 +513,10 @@
   static const BooleanType* instance_;
 };
 
-class ByteType FINAL : public Cat1Type {
+class ByteType final : public Cat1Type {
  public:
-  bool IsByte() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsByte() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
                                         const StringPiece& descriptor,
                                         uint16_t cache_id)
@@ -524,7 +524,7 @@
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kByte;
   }
 
@@ -538,10 +538,10 @@
   static const ByteType* instance_;
 };
 
-class ShortType FINAL : public Cat1Type {
+class ShortType final : public Cat1Type {
  public:
-  bool IsShort() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsShort() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
                                          const StringPiece& descriptor,
                                          uint16_t cache_id)
@@ -549,7 +549,7 @@
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kShort;
   }
 
@@ -562,10 +562,10 @@
   static const ShortType* instance_;
 };
 
-class CharType FINAL : public Cat1Type {
+class CharType final : public Cat1Type {
  public:
-  bool IsChar() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsChar() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
                                         const StringPiece& descriptor,
                                         uint16_t cache_id)
@@ -573,7 +573,7 @@
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kChar;
   }
 
@@ -587,10 +587,10 @@
   static const CharType* instance_;
 };
 
-class FloatType FINAL : public Cat1Type {
+class FloatType final : public Cat1Type {
  public:
-  bool IsFloat() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsFloat() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
                                          const StringPiece& descriptor,
                                          uint16_t cache_id)
@@ -598,7 +598,7 @@
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kFloat;
   }
 
@@ -619,11 +619,11 @@
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
-class LongLoType FINAL : public Cat2Type {
+class LongLoType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsLongLo() const OVERRIDE { return true; }
-  bool IsLong() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsLongLo() const override { return true; }
+  bool IsLong() const override { return true; }
   static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
                                           const StringPiece& descriptor,
                                           uint16_t cache_id)
@@ -631,7 +631,7 @@
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kLongLo;
   }
 
@@ -645,10 +645,10 @@
   static const LongLoType* instance_;
 };
 
-class LongHiType FINAL : public Cat2Type {
+class LongHiType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsLongHi() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsLongHi() const override { return true; }
   static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
                                           const StringPiece& descriptor,
                                           uint16_t cache_id)
@@ -656,7 +656,7 @@
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -670,11 +670,11 @@
   static const LongHiType* instance_;
 };
 
-class DoubleLoType FINAL : public Cat2Type {
+class DoubleLoType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsDoubleLo() const OVERRIDE { return true; }
-  bool IsDouble() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsDoubleLo() const override { return true; }
+  bool IsDouble() const override { return true; }
   static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
                                             const StringPiece& descriptor,
                                             uint16_t cache_id)
@@ -682,7 +682,7 @@
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kDoubleLo;
   }
 
@@ -696,10 +696,10 @@
   static const DoubleLoType* instance_;
 };
 
-class DoubleHiType FINAL : public Cat2Type {
+class DoubleHiType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual bool IsDoubleHi() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsDoubleHi() const override { return true; }
   static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
                                             const StringPiece& descriptor,
                                             uint16_t cache_id)
@@ -707,7 +707,7 @@
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -751,30 +751,30 @@
     }
   }
 
-  bool IsZero() const OVERRIDE {
+  bool IsZero() const override {
     return IsPreciseConstant() && ConstantValue() == 0;
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return IsPreciseConstant() && ConstantValue() == 1;
   }
 
-  bool IsConstantChar() const OVERRIDE {
+  bool IsConstantChar() const override {
     return IsConstant() && ConstantValue() >= 0 &&
            ConstantValue() <= std::numeric_limits<uint16_t>::max();
   }
-  bool IsConstantByte() const OVERRIDE {
+  bool IsConstantByte() const override {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<int8_t>::min() &&
            ConstantValue() <= std::numeric_limits<int8_t>::max();
   }
-  bool IsConstantShort() const OVERRIDE {
+  bool IsConstantShort() const override {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<int16_t>::min() &&
            ConstantValue() <= std::numeric_limits<int16_t>::max();
   }
-  virtual bool IsConstantTypes() const OVERRIDE { return true; }
+  bool IsConstantTypes() const override { return true; }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -782,7 +782,7 @@
   const uint32_t constant_;
 };
 
-class PreciseConstType FINAL : public ConstantType {
+class PreciseConstType final : public ConstantType {
  public:
   PreciseConstType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -790,94 +790,94 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsPreciseConstant() const OVERRIDE { return true; }
+  bool IsPreciseConstant() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class PreciseConstLoType FINAL : public ConstantType {
+class PreciseConstLoType final : public ConstantType {
  public:
   PreciseConstLoType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsPreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsPreciseConstantLo() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class PreciseConstHiType FINAL : public ConstantType {
+class PreciseConstHiType final : public ConstantType {
  public:
   PreciseConstHiType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsPreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsPreciseConstantHi() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstType FINAL : public ConstantType {
+class ImpreciseConstType final : public ConstantType {
  public:
   ImpreciseConstType(uint32_t constat, uint16_t cache_id)
        REQUIRES_SHARED(Locks::mutator_lock_)
        : ConstantType(constat, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstant() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstant() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstLoType FINAL : public ConstantType {
+class ImpreciseConstLoType final : public ConstantType {
  public:
   ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstantLo() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstHiType FINAL : public ConstantType {
+class ImpreciseConstHiType final : public ConstantType {
  public:
   ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstantHi() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
 // Special "null" type that captures the semantics of null / bottom.
-class NullType FINAL : public RegType {
+class NullType final : public RegType {
  public:
-  bool IsNull() const OVERRIDE {
+  bool IsNull() const override {
     return true;
   }
 
@@ -892,15 +892,15 @@
 
   static void Destroy();
 
-  std::string Dump() const OVERRIDE {
+  std::string Dump() const override {
     return "null";
   }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 
-  bool IsConstantTypes() const OVERRIDE {
+  bool IsConstantTypes() const override {
     return true;
   }
 
@@ -925,15 +925,15 @@
                     uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
-  bool IsUninitializedTypes() const OVERRIDE;
-  bool IsNonZeroReferenceTypes() const OVERRIDE;
+  bool IsUninitializedTypes() const override;
+  bool IsNonZeroReferenceTypes() const override;
 
   uint32_t GetAllocationPc() const {
     DCHECK(IsUninitializedTypes());
     return allocation_pc_;
   }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 
@@ -942,7 +942,7 @@
 };
 
 // Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType FINAL : public UninitializedType {
+class UninitializedReferenceType final : public UninitializedType {
  public:
   UninitializedReferenceType(ObjPtr<mirror::Class> klass,
                              const StringPiece& descriptor,
@@ -953,16 +953,16 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUninitializedReference() const OVERRIDE { return true; }
+  bool IsUninitializedReference() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 // Similar to UnresolvedReferenceType but not yet having been passed to a
 // constructor.
-class UnresolvedUninitializedRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedRefType final : public UninitializedType {
  public:
   UnresolvedUninitializedRefType(const StringPiece& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
@@ -971,19 +971,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedAndUninitializedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // Similar to UninitializedReferenceType but special case for the this argument
 // of a constructor.
-class UninitializedThisReferenceType FINAL : public UninitializedType {
+class UninitializedThisReferenceType final : public UninitializedType {
  public:
   UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
                                  const StringPiece& descriptor,
@@ -993,17 +993,17 @@
     CheckConstructorInvariants(this);
   }
 
-  virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
+  bool IsUninitializedThisReference() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
-class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedThisRefType final : public UninitializedType {
  public:
   UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
                                      uint16_t cache_id)
@@ -1012,19 +1012,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
+  bool IsUnresolvedAndUninitializedThisReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // A type of register holding a reference to an Object of type GetClass or a
 // sub-class.
-class ReferenceType FINAL : public RegType {
+class ReferenceType final : public RegType {
  public:
   ReferenceType(ObjPtr<mirror::Class> klass,
                 const StringPiece& descriptor,
@@ -1033,15 +1033,15 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsReference() const OVERRIDE { return true; }
+  bool IsReference() const override { return true; }
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+  bool IsNonZeroReferenceTypes() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1049,22 +1049,22 @@
 // A type of register holding a reference to an Object of type GetClass and only
 // an object of that
 // type.
-class PreciseReferenceType FINAL : public RegType {
+class PreciseReferenceType final : public RegType {
  public:
   PreciseReferenceType(ObjPtr<mirror::Class> klass,
                        const StringPiece& descriptor,
                        uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsPreciseReference() const OVERRIDE { return true; }
+  bool IsPreciseReference() const override { return true; }
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+  bool IsNonZeroReferenceTypes() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1076,9 +1076,9 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE;
+  bool IsNonZeroReferenceTypes() const override;
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1086,7 +1086,7 @@
 // Similar to ReferenceType except the Class couldn't be loaded. Assignability
 // and other tests made
 // of this type must be conservative.
-class UnresolvedReferenceType FINAL : public UnresolvedType {
+class UnresolvedReferenceType final : public UnresolvedType {
  public:
   UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1094,18 +1094,18 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass FINAL : public UnresolvedType {
+class UnresolvedSuperClass final : public UnresolvedType {
  public:
   UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
                        uint16_t cache_id)
@@ -1116,19 +1116,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+  bool IsUnresolvedSuperClass() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
   uint16_t GetUnresolvedSuperClassChildId() const {
     DCHECK(IsUnresolvedSuperClass());
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   const uint16_t unresolved_child_id_;
   const RegTypeCache* const reg_type_cache_;
@@ -1136,7 +1136,7 @@
 
 // A merge of unresolved (and resolved) types. If the types were resolved this may be
 // Conflict or another known ReferenceType.
-class UnresolvedMergedType FINAL : public UnresolvedType {
+class UnresolvedMergedType final : public UnresolvedType {
  public:
   // Note: the constructor will copy the unresolved BitVector, not use it directly.
   UnresolvedMergedType(const RegType& resolved,
@@ -1154,17 +1154,17 @@
     return unresolved_types_;
   }
 
-  bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedMergedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsObjectArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   const RegTypeCache* const reg_type_cache_;
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 15a38f3..0430d20 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -1042,7 +1042,7 @@
 
 class RegTypeOOMTest : public RegTypeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     SetUpRuntimeOptionsForFillHeap(options);
 
     // We must not appear to be a compiler, or we'll abort on the host.
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 8b66529..e726500 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -36,11 +36,11 @@
   static CodeSimulatorArm64* CreateCodeSimulatorArm64();
   virtual ~CodeSimulatorArm64();
 
-  void RunFrom(intptr_t code_buffer) OVERRIDE;
+  void RunFrom(intptr_t code_buffer) override;
 
-  bool GetCReturnBool() const OVERRIDE;
-  int32_t GetCReturnInt32() const OVERRIDE;
-  int64_t GetCReturnInt64() const OVERRIDE;
+  bool GetCReturnBool() const override;
+  int32_t GetCReturnInt32() const override;
+  int64_t GetCReturnInt64() const override;
 
  private:
   CodeSimulatorArm64();
diff --git a/test/071-dexfile-map-clean/run b/test/071-dexfile-map-clean/run
index 9c100ec..afa2ff7 100755
--- a/test/071-dexfile-map-clean/run
+++ b/test/071-dexfile-map-clean/run
@@ -14,13 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Run without dex2oat so that we don't create oat/vdex files
-# when trying to load the secondary dex file.
-
-# In this way, the secondary dex file will be forced to be
-# loaded directly.
-#
-# In addition, make sure we call 'sync'
+# Make sure we call 'sync'
 # before executing dalvikvm because otherwise
 # it's highly likely the pushed JAR files haven't
 # been committed to permanent storage yet,
@@ -28,4 +22,4 @@
 # the memory is dirty (despite being file-backed).
 # (Note: this was reproducible 100% of the time on
 # a target angler device).
-./default-run "$@" --no-dex2oat --sync
+./default-run "$@" --sync
diff --git a/test/071-dexfile-map-clean/src/Main.java b/test/071-dexfile-map-clean/src/Main.java
index 8a196dd..e784ac6 100644
--- a/test/071-dexfile-map-clean/src/Main.java
+++ b/test/071-dexfile-map-clean/src/Main.java
@@ -90,19 +90,6 @@
       return true;
     }
 
-    // This test takes relies on dex2oat being skipped.
-    // (enforced in 'run' file by using '--no-dex2oat'
-    //
-    // This could happen in a non-test situation
-    // if a secondary dex file is loaded (but not yet maintenance-mode compiled)
-    // with JIT.
-    //
-    // Or it could also happen if a secondary dex file is loaded and forced
-    // into running into the interpreter (e.g. duplicate classes).
-    //
-    // Rather than relying on those weird fallbacks,
-    // we force the runtime not to dex2oat the dex file to ensure
-    // this test is repeatable and less brittle.
     private static void testDexMemoryMaps() throws Exception {
         // Ensure that the secondary dex file is mapped clean (directly from JAR file).
         String smaps = new String(Files.readAllBytes(Paths.get("/proc/self/smaps")));
diff --git a/test/116-nodex2oat/expected.txt b/test/116-nodex2oat/expected.txt
index 157dfc4..c6c7daa 100644
--- a/test/116-nodex2oat/expected.txt
+++ b/test/116-nodex2oat/expected.txt
@@ -1,9 +1,2 @@
-Run -Xnodex2oat
 JNI_OnLoad called
-Has oat is false, is dex2oat enabled is false.
-Run -Xdex2oat
-JNI_OnLoad called
-Has oat is true, is dex2oat enabled is true.
-Run default
-JNI_OnLoad called
-Has oat is true, is dex2oat enabled is true.
+Has oat is false.
diff --git a/test/116-nodex2oat/run b/test/116-nodex2oat/run
index d7984ce..9063685 100755
--- a/test/116-nodex2oat/run
+++ b/test/116-nodex2oat/run
@@ -24,20 +24,4 @@
   exit 1
 fi
 
-# Make sure we can run without an oat file.
-echo "Run -Xnodex2oat"
-${RUN} ${flags} --runtime-option -Xnodex2oat
-return_status1=$?
-
-# Make sure we can run with the oat file.
-echo "Run -Xdex2oat"
-${RUN} ${flags} --runtime-option -Xdex2oat
-return_status2=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
 ${RUN} ${flags}
-return_status3=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3)
diff --git a/test/116-nodex2oat/src/Main.java b/test/116-nodex2oat/src/Main.java
index 229735f..5491c49 100644
--- a/test/116-nodex2oat/src/Main.java
+++ b/test/116-nodex2oat/src/Main.java
@@ -17,17 +17,8 @@
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
-    System.out.println(
-        "Has oat is " + hasOatFile() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
-
-    if (hasOatFile() && !isDex2OatEnabled()) {
-      throw new Error("Application with dex2oat disabled runs with an oat file");
-    } else if (!hasOatFile() && isDex2OatEnabled()) {
-      throw new Error("Application with dex2oat enabled runs without an oat file");
-    }
+    System.out.println("Has oat is " + hasOatFile() + ".");
   }
 
   private native static boolean hasOatFile();
-
-  private native static boolean isDex2OatEnabled();
 }
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
index 0cd4715..7a24e31 100644
--- a/test/117-nopatchoat/expected.txt
+++ b/test/117-nopatchoat/expected.txt
@@ -1,12 +1,3 @@
-Run without dex2oat/patchoat
 JNI_OnLoad called
-dex2oat & patchoat are disabled, has oat is true, has executable oat is expected.
-This is a function call
-Run with dexoat/patchoat
-JNI_OnLoad called
-dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
-This is a function call
-Run default
-JNI_OnLoad called
-dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
+Has oat is true, has executable oat is expected.
 This is a function call
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
index 0627fe5..4c33f7a 100755
--- a/test/117-nopatchoat/run
+++ b/test/117-nopatchoat/run
@@ -34,20 +34,4 @@
   exit 1
 fi
 
-# Make sure we can run without relocation
-echo "Run without dex2oat/patchoat"
-${RUN} ${flags} --runtime-option -Xnodex2oat
-return_status1=$?
-
-# Make sure we can run with the oat file.
-echo "Run with dexoat/patchoat"
-${RUN} ${flags} --runtime-option -Xdex2oat
-return_status2=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
 ${RUN} ${flags}
-return_status3=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3)
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index 816eb17..ef47ab9 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -23,18 +23,13 @@
     // Hitting this condition should be rare and ideally we would prevent it from happening but
     // there is no way to do so without major changes to the run-test framework.
     boolean executable_correct = (needsRelocation() ?
-        hasExecutableOat() == (isDex2OatEnabled() || isRelocationDeltaZero()) :
+        hasExecutableOat() == isRelocationDeltaZero() :
         hasExecutableOat() == true);
 
     System.out.println(
-        "dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
-        ", has oat is " + hasOatFile() + ", has executable oat is " + (
+        "Has oat is " + hasOatFile() + ", has executable oat is " + (
         executable_correct ? "expected" : "not expected") + ".");
 
-    if (!hasOatFile() && isDex2OatEnabled()) {
-      throw new Error("Application with dex2oat enabled runs without an oat file");
-    }
-
     System.out.println(functionCall());
   }
 
@@ -47,8 +42,6 @@
     return ret.substring(0, ret.length() - 1);
   }
 
-  private native static boolean isDex2OatEnabled();
-
   private native static boolean needsRelocation();
 
   private native static boolean hasOatFile();
diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run
index e1e2577..d68b0a0 100644
--- a/test/118-noimage-dex2oat/run
+++ b/test/118-noimage-dex2oat/run
@@ -47,12 +47,12 @@
 
 # Make sure we can run without an oat file.
 echo "Run -Xnoimage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat
+${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat
 return_status1=$?
 
 # Make sure we cannot run without an oat file without fallback.
 echo "Run -Xnoimage-dex2oat -Xno-dex-file-fallback"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat \
+${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat \
   --runtime-option -Xno-dex-file-fallback
 return_status2=$?
 
diff --git a/test/134-nodex2oat-nofallback/run b/test/134-nodex2oat-nofallback/run
index 33265ac..359d22d 100755
--- a/test/134-nodex2oat-nofallback/run
+++ b/test/134-nodex2oat-nofallback/run
@@ -17,6 +17,6 @@
 flags="${@}"
 
 # Make sure we cannot run without an oat file without fallback.
-${RUN} ${flags} --runtime-option -Xnodex2oat --runtime-option -Xno-dex-file-fallback
+${RUN} ${flags} --runtime-option -Xno-dex-file-fallback
 # Suppress the exit value. This isn't expected to be successful.
 echo "Exit status:" $?
diff --git a/test/134-nodex2oat-nofallback/src/Main.java b/test/134-nodex2oat-nofallback/src/Main.java
index 086ffb9..73f67c4 100644
--- a/test/134-nodex2oat-nofallback/src/Main.java
+++ b/test/134-nodex2oat-nofallback/src/Main.java
@@ -17,17 +17,8 @@
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
-    System.out.println(
-        "Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
-
-    if (hasOat() && !isDex2OatEnabled()) {
-      throw new Error("Application with dex2oat disabled runs with an oat file");
-    } else if (!hasOat() && isDex2OatEnabled()) {
-      throw new Error("Application with dex2oat enabled runs without an oat file");
-    }
+    System.out.println("Has oat is " + hasOat());
   }
 
   private native static boolean hasOat();
-
-  private native static boolean isDex2OatEnabled();
 }
diff --git a/test/138-duplicate-classes-check2/run b/test/138-duplicate-classes-check2/run
deleted file mode 100755
index 8494ad9..0000000
--- a/test/138-duplicate-classes-check2/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# We want to run as no-dex-file-fallback to confirm that even though the -ex file has a symbolic
-# reference to A, there's no class-def, so we don't detect a collision.
-exec ${RUN} --runtime-option -Xno-dex-file-fallback "${@}"
diff --git a/test/147-stripped-dex-fallback/run b/test/147-stripped-dex-fallback/run
index 37c3e1f..1f1d22e 100755
--- a/test/147-stripped-dex-fallback/run
+++ b/test/147-stripped-dex-fallback/run
@@ -21,4 +21,4 @@
   exit 1
 fi
 
-${RUN} ${flags} --strip-dex --runtime-option -Xnodex2oat
+${RUN} ${flags} --strip-dex
diff --git a/test/167-visit-locks/visit_locks.cc b/test/167-visit-locks/visit_locks.cc
index e79c880..8955f5a 100644
--- a/test/167-visit-locks/visit_locks.cc
+++ b/test/167-visit-locks/visit_locks.cc
@@ -42,7 +42,7 @@
         : StackVisitor(thread, context, StackWalkKind::kIncludeInlinedFrames) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
 
       // Ignore runtime methods.
diff --git a/test/1945-proxy-method-arguments/get_args.cc b/test/1945-proxy-method-arguments/get_args.cc
index 211ae10..859e229 100644
--- a/test/1945-proxy-method-arguments/get_args.cc
+++ b/test/1945-proxy-method-arguments/get_args.cc
@@ -27,7 +27,7 @@
 namespace {
 
 // Visit a proxy method Quick frame at a given depth.
-class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
+class GetProxyQuickFrameVisitor final : public StackVisitor {
  public:
   GetProxyQuickFrameVisitor(art::Thread* target, art::Context* ctx, size_t frame_depth)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -36,7 +36,7 @@
         frame_depth_(frame_depth),
         quick_frame_(nullptr) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (GetMethod()->IsRuntimeMethod()) {
       return true;
     }
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
index 0799b6e..424e9f1 100644
--- a/test/203-multi-checkpoint/multi_checkpoint.cc
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -28,7 +28,7 @@
   bool second_run;
   bool second_run_interleaved;
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
     if (!first_run_start) {
       CHECK(!second_run);
@@ -62,7 +62,7 @@
 }
 
 struct SetupClosure : public Closure {
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
     ScopedObjectAccess soa(self);
     MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 093a93f..db3f1f4 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -29,7 +29,7 @@
 
 namespace art {
 
-class TestFaultHandler FINAL : public FaultHandler {
+class TestFaultHandler final : public FaultHandler {
  public:
   explicit TestFaultHandler(FaultManager* manager)
       : FaultHandler(manager),
@@ -40,6 +40,7 @@
                                          /* prot */ PROT_NONE,
                                          /* low_4gb */ false,
                                          /* reuse */ false,
+                                         /* reservation */ nullptr,
                                          /* error_msg */ &map_error_,
                                          /* use_ashmem */ false)),
         was_hit_(false) {
@@ -51,7 +52,7 @@
     manager_->RemoveHandler(this);
   }
 
-  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
     CHECK_EQ(sig, SIGSEGV);
     CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
              GetTargetPointer()) << "Segfault on unexpected address!";
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index 374e136..25ec3f5 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -30,26 +30,52 @@
   public static int p(float arg) {
     return (arg > 5.0f) ? 0 : -1;
   }
+  
+  /// CHECK-START-{ARM,ARM64}: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[23,25]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,23,25]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,23,25]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[23,25]
+  /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
+  /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:22
+  /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
+  /// CHECK-DAG:                    TryBoundary
+  
+  /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,23,25]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,23,25]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,23,25]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[23,25]
+  /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
+  /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:22
+  /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
+  /// CHECK-DAG:                    TryBoundary
 
-  /// CHECK-START: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+  // X86 and X86_64 generate at use site the ArrayLength, meaning only the BoundsCheck will have environment uses.
+  /// CHECK-START-{X86,X86_64}: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[25]
   /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25]
   /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25]
   /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:22
   /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
   /// CHECK-DAG:                    TryBoundary
 
-  /// CHECK-START-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,25]
   /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25]
   /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25]
   /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:22
   /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
   /// CHECK-DAG:                    TryBoundary
+
   //
   // A value live at a throwing instruction in a try block may be copied by
   // the exception handler to its location at the top of the catch block.
@@ -60,22 +86,44 @@
     }
   }
 
-  /// CHECK-START: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,19,21]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,19,21]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[]
+  /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
+  /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:18
+  /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+
+  /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,19,21]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,19,21]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,19,21]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[19,21]
+  /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
+  /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:18
+  /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
+
+  /// CHECK-START-{X86,X86_64}: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[]
   /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21]
   /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21]
   /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:18
   /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
 
-  /// CHECK-START-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,21]
   /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21]
   /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21]
   /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[21]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:18
   /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
   public static void testBoundsCheck(int x, Object y, int[] a) {
     a[1] = x;
@@ -90,12 +138,22 @@
   /// CHECK-DAG:                    NullCheck             env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:18
   /// CHECK-DAG:                    ArrayLength                                                               liveness:20
   /// CHECK-DAG:                    Deoptimize            env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
-
-  /// CHECK-START-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+  
+  /// CHECK-START-{ARM,ARM64}-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[13,21,25]
   /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[13,21,25]
   /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[13,21,25]
-  /// CHECK-DAG:  <<Const0:i\d+>>   IntConstant 0         env_uses:[19,25]
+  /// CHECK-DAG:  <<Const0:i\d+>>   IntConstant 0         env_uses:[21,25]
+  /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:12
+  /// CHECK-DAG:                    NullCheck             env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:18
+  /// CHECK-DAG:                    ArrayLength                                                               liveness:20
+  /// CHECK-DAG:                    Deoptimize            env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:24
+
+  /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[13,21,25]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[13,21,25]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[13,21,25]
+  /// CHECK-DAG:  <<Const0:i\d+>>   IntConstant 0         env_uses:[21,25]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:12
   /// CHECK-DAG:                    NullCheck             env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:18
   /// CHECK-DAG:                    ArrayLength                                                               liveness:20
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b17be6b..b5166ce 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -35,7 +35,7 @@
   explicit FindPointerAllocatorVisitor(void* ptr) : is_found(false), ptr_(ptr) {}
 
   bool Visit(LinearAlloc* alloc)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     is_found = alloc->Contains(ptr_);
     return !is_found;
   }
diff --git a/test/638-checker-inline-cache-intrinsic/run b/test/638-checker-inline-cache-intrinsic/run
index f43681d..1540310 100644
--- a/test/638-checker-inline-cache-intrinsic/run
+++ b/test/638-checker-inline-cache-intrinsic/run
@@ -14,4 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Set threshold to 100 to math the iterations done in the test.
+# Pass --verbose-methods to only generate the CFG of these methods.
 exec ${RUN} --jit --runtime-option -Xjitthreshold:100 -Xcompiler-option --verbose-methods=inlineMonomorphic,knownReceiverType,stringEquals $@
diff --git a/test/652-deopt-intrinsic/run b/test/652-deopt-intrinsic/run
index 97d1ff1..1acedf9 100755
--- a/test/652-deopt-intrinsic/run
+++ b/test/652-deopt-intrinsic/run
@@ -15,4 +15,8 @@
 # limitations under the License.
 
 # Ensure this test is not subject to code collection.
-exec ${RUN} "$@" --runtime-option -Xjitinitialsize:32M
+# We also need at least a few invocations of the method Main.$noinline$doCall
+# to ensure the inline cache sees the two types being passed to the method. Pass
+# a large number in case there's some weights on some invocation kinds (eg
+# compiler to interpreter transitions).
+exec ${RUN} "$@" --runtime-option -Xjitinitialsize:32M --runtime-option -Xjitthreshold:1000
diff --git a/test/667-jit-jni-stub/run b/test/667-jit-jni-stub/run
index f235c6b..b7ce913 100755
--- a/test/667-jit-jni-stub/run
+++ b/test/667-jit-jni-stub/run
@@ -16,4 +16,4 @@
 
 # Disable AOT compilation of JNI stubs.
 # Ensure this test is not subject to unexpected code collection.
-${RUN} "${@}" --no-prebuild --no-dex2oat --runtime-option -Xjitinitialsize:32M
+${RUN} "${@}" --no-prebuild --runtime-option -Xjitinitialsize:32M
diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java
index 794308d..234c5da 100644
--- a/test/667-jit-jni-stub/src/Main.java
+++ b/test/667-jit-jni-stub/src/Main.java
@@ -18,7 +18,7 @@
   public static void main(String[] args) throws Exception {
     System.loadLibrary(args[0]);
     if (isAotCompiled(Main.class, "hasJit")) {
-      throw new Error("This test must be run with --no-prebuild --no-dex2oat!");
+      throw new Error("This test must be run with --no-prebuild!");
     }
     if (!hasJit()) {
       return;
diff --git a/test/677-fsi/expected.txt b/test/677-fsi/expected.txt
index c7fb8fe..2b07343 100644
--- a/test/677-fsi/expected.txt
+++ b/test/677-fsi/expected.txt
@@ -1,3 +1,2 @@
 oat file has dex code, but APK has uncompressed dex code
-oat file has dex code, but APK has uncompressed dex code
 Hello World
diff --git a/test/677-fsi2/expected.txt b/test/677-fsi2/expected.txt
index de00847..557db03 100644
--- a/test/677-fsi2/expected.txt
+++ b/test/677-fsi2/expected.txt
@@ -1,4 +1 @@
-Run default
-Hello World
-Run without dex2oat
 Hello World
diff --git a/test/677-fsi2/run b/test/677-fsi2/run
index 039a6a7..651f082 100644
--- a/test/677-fsi2/run
+++ b/test/677-fsi2/run
@@ -14,12 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-echo "Run default"
 ${RUN} $@ --runtime-option -Xonly-use-system-oat-files
-return_status1=$?
-
-echo "Run without dex2oat"
-${RUN} $@ --no-dex2oat --runtime-option -Xonly-use-system-oat-files
-return_status2=$?
-
-(exit $return_status1) && (exit $return_status2)
diff --git a/test/718-zipfile-finalizer/expected.txt b/test/718-zipfile-finalizer/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/718-zipfile-finalizer/expected.txt
diff --git a/test/718-zipfile-finalizer/info.txt b/test/718-zipfile-finalizer/info.txt
new file mode 100644
index 0000000..c8b827e
--- /dev/null
+++ b/test/718-zipfile-finalizer/info.txt
@@ -0,0 +1,2 @@
+Test that ZipFile.finalize doesn't throw exceptions
+in the presence of a not fully constructed instance.
diff --git a/test/718-zipfile-finalizer/src/Main.java b/test/718-zipfile-finalizer/src/Main.java
new file mode 100644
index 0000000..3eb439b
--- /dev/null
+++ b/test/718-zipfile-finalizer/src/Main.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.zip.ZipFile;
+
+public class Main {
+      public static void main(String[] args) throws Exception {
+        // By throwing an exception when setting up arguments of
+        // the constructor, we end up with a not fully constructed
+        // ZipFile.
+        try {
+          new ZipFile(null, throwException(), null);
+          throw new Error("Expected Exception");
+        } catch (Exception e) {
+          // expected
+        }
+
+        // Run finalizers. The golden file of this test checks
+        // that no exception is thrown from finalizers.
+        System.gc();
+        System.runFinalization();
+    }
+
+    public static int throwException() throws Exception {
+      throw new Exception();
+    }
+}
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 57c0274..521f9a6 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -87,7 +87,7 @@
     jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
                 jlong size ATTRIBUTE_UNUSED,
                 jlong* tag_ptr ATTRIBUTE_UNUSED,
-                jint length ATTRIBUTE_UNUSED) OVERRIDE {
+                jint length ATTRIBUTE_UNUSED) override {
       counter++;
       if (counter == stop_after) {
         return JVMTI_VISIT_ABORT;
@@ -120,7 +120,7 @@
     jintArray lengths) {
   class DataIterationConfig : public IterationConfig {
    public:
-    jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
+    jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) override {
       class_tags_.push_back(class_tag);
       sizes_.push_back(size);
       tags_.push_back(*tag_ptr);
@@ -164,7 +164,7 @@
     jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
                 jlong size ATTRIBUTE_UNUSED,
                 jlong* tag_ptr,
-                jint length ATTRIBUTE_UNUSED) OVERRIDE {
+                jint length ATTRIBUTE_UNUSED) override {
       jlong current_tag = *tag_ptr;
       if (current_tag != 0) {
         *tag_ptr = current_tag + 10;
@@ -418,5 +418,21 @@
   return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
 }
 
+extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateOverInstancesCount(
+    JNIEnv* env, jclass, jclass target) {
+  jint cnt = 0;
+  auto count_func = [](jlong, jlong, jlong*, void* user_data) -> jvmtiIterationControl {
+    *reinterpret_cast<jint*>(user_data) += 1;
+    return JVMTI_ITERATION_CONTINUE;
+  };
+  JvmtiErrorToException(env,
+                        jvmti_env,
+                        jvmti_env->IterateOverInstancesOfClass(target,
+                                                               JVMTI_HEAP_OBJECT_EITHER,
+                                                               count_func,
+                                                               &cnt));
+  return cnt;
+}
+
 }  // namespace Test906IterateHeap
 }  // namespace art
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
index be9663a..190f36f 100644
--- a/test/906-iterate-heap/src/art/Test906.java
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -18,6 +18,7 @@
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Random;
 
 public class Test906 {
   public static void run() throws Exception {
@@ -69,6 +70,40 @@
     throw lastThrow;
   }
 
+  private static Object[] GenTs(Class<?> k) throws Exception {
+    Object[] ret = new Object[new Random().nextInt(100) + 10];
+    for (int i = 0; i < ret.length; i++) {
+      ret[i] = k.newInstance();
+    }
+    return ret;
+  }
+
+  private static void checkEq(int a, int b) {
+    if (a != b) {
+      Error e = new Error("Failed: Expected equal " + a + " and " + b);
+      System.out.println(e);
+      e.printStackTrace(System.out);
+    }
+  }
+
+  public static class Foo {}
+  public static class Bar extends Foo {}
+  public static class Baz extends Bar {}
+  public static class Alpha extends Bar {}
+  public static class MISSING extends Baz {}
+  private static void testIterateOverInstances() throws Exception {
+    Object[] foos = GenTs(Foo.class);
+    Object[] bars = GenTs(Bar.class);
+    Object[] bazs = GenTs(Baz.class);
+    Object[] alphas = GenTs(Alpha.class);
+    checkEq(0, iterateOverInstancesCount(MISSING.class));
+    checkEq(alphas.length, iterateOverInstancesCount(Alpha.class));
+    checkEq(bazs.length, iterateOverInstancesCount(Baz.class));
+    checkEq(bazs.length + alphas.length + bars.length, iterateOverInstancesCount(Bar.class));
+    checkEq(bazs.length + alphas.length + bars.length + foos.length,
+        iterateOverInstancesCount(Foo.class));
+  }
+
   public static void doTest() throws Exception {
     A a = new A();
     B b = new B();
@@ -86,6 +121,8 @@
 
     testHeapCount();
 
+    testIterateOverInstances();
+
     long classTags[] = new long[100];
     long sizes[] = new long[100];
     long tags[] = new long[100];
@@ -308,6 +345,8 @@
     return Main.getTag(o);
   }
 
+  private static native int iterateOverInstancesCount(Class<?> klass);
+
   private static native boolean checkInitialized(Class<?> klass);
   private static native int iterateThroughHeapCount(int heapFilter,
       Class<?> klassFilter, int stopAfter);
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index b07554c..b0e0f07 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -41,8 +41,6 @@
 
 using android::base::StringPrintf;
 
-#define FINAL final
-#define OVERRIDE override
 #define UNREACHABLE  __builtin_unreachable
 
 extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
@@ -144,7 +142,7 @@
     jint stop_after,
     jint follow_set,
     jobject jniRef) {
-  class PrintIterationConfig FINAL : public IterationConfig {
+  class PrintIterationConfig final : public IterationConfig {
    public:
     PrintIterationConfig(jint _stop_after, jint _follow_set)
         : counter_(0),
@@ -160,7 +158,7 @@
                 jlong* tag_ptr,
                 jlong* referrer_tag_ptr,
                 jint length,
-                void* user_data ATTRIBUTE_UNUSED) OVERRIDE {
+                void* user_data ATTRIBUTE_UNUSED) override {
       jlong tag = *tag_ptr;
 
       // Ignore any jni-global roots with untagged classes. These can be from the environment,
@@ -303,7 +301,7 @@
       }
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         char* name = nullptr;
         if (info_.jni_local.method != nullptr) {
           jvmti_env->GetMethodName(info_.jni_local.method, &name, nullptr, nullptr);
@@ -349,7 +347,7 @@
       }
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         char* name = nullptr;
         if (info_.stack_local.method != nullptr) {
           jvmti_env->GetMethodName(info_.stack_local.method, &name, nullptr, nullptr);
@@ -391,7 +389,7 @@
           : Elem(referrer, referree, size, length), string_(string) {}
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         return string_;
       }
 
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
index b6d406f..c6e62ae 100755
--- a/test/916-obsolete-jit/run
+++ b/test/916-obsolete-jit/run
@@ -14,12 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# We are testing the redefinition of compiled code but with jvmti we only allow
-# jitted compiled code so always add the --jit argument.
-if [[ "$@" == *"--jit"* ]]; then
-  other_args=""
-else
-  other_args="--jit"
-fi
-./default-run "$@" ${other_args} \
-                   --jvmti
+./default-run "$@" --jvmti
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 53d4c37..ffaa2cd 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -49,6 +49,7 @@
 
 TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
 TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
 TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
 TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
 TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 9344b24..da79164 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -71,13 +71,6 @@
   return Runtime::Current()->IsVerificationSoftFail() ? JNI_TRUE : JNI_FALSE;
 }
 
-// public static native boolean isDex2OatEnabled();
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                                 jclass cls ATTRIBUTE_UNUSED) {
-  return Runtime::Current()->IsDex2OatEnabled();
-}
-
 // public static native boolean hasImage();
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv* env ATTRIBUTE_UNUSED,
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 192274e..d74d2ef 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -77,7 +77,7 @@
         prev_was_runtime_(true),
         require_deoptable_(require_deoptable) {}
 
-  virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (goal_ == GetMethod()) {
       method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
       method_found_ = true;
diff --git a/test/dexdump/bytecodes.txt b/test/dexdump/bytecodes.txt
index e1a381e..1ed66e8 100644
--- a/test/dexdump/bytecodes.txt
+++ b/test/dexdump/bytecodes.txt
@@ -176,7 +176,7 @@
       ins           : 1
       outs          : 1
       insns size    : 4 16-bit code units
-0009a8:                                        |[0009a8] com.google.android.test.R.attr.<init>:()V
+0009a8:                                        |[0009a8] com.google.android.test.R$attr.<init>:()V
 0009b8: 7010 1900 0000                         |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019
 0009be: 0e00                                   |0003: return-void
       catches       : (none)
@@ -228,7 +228,7 @@
       ins           : 1
       outs          : 1
       insns size    : 4 16-bit code units
-0009c0:                                        |[0009c0] com.google.android.test.R.drawable.<init>:()V
+0009c0:                                        |[0009c0] com.google.android.test.R$drawable.<init>:()V
 0009d0: 7010 1900 0000                         |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019
 0009d6: 0e00                                   |0003: return-void
       catches       : (none)
diff --git a/test/dexdump/bytecodes.xml b/test/dexdump/bytecodes.xml
index d08c2e9..d4ee3a7 100755
--- a/test/dexdump/bytecodes.xml
+++ b/test/dexdump/bytecodes.xml
@@ -71,7 +71,7 @@
 >
 </constructor>
 </class>
-<class name="R.attr"
+<class name="R$attr"
  extends="java.lang.Object"
  interface="false"
  abstract="false"
@@ -79,15 +79,15 @@
  final="true"
  visibility="public"
 >
-<constructor name="R.attr"
- type="com.google.android.test.R.attr"
+<constructor name="R$attr"
+ type="com.google.android.test.R$attr"
  static="false"
  final="false"
  visibility="public"
 >
 </constructor>
 </class>
-<class name="R.drawable"
+<class name="R$drawable"
  extends="java.lang.Object"
  interface="false"
  abstract="false"
@@ -105,8 +105,8 @@
  value="2130837504"
 >
 </field>
-<constructor name="R.drawable"
- type="com.google.android.test.R.drawable"
+<constructor name="R$drawable"
+ type="com.google.android.test.R$drawable"
  static="false"
  final="false"
  visibility="public"
diff --git a/test/dexdump/checkers.xml b/test/dexdump/checkers.xml
index 4e56ea2..3d3bac2 100755
--- a/test/dexdump/checkers.xml
+++ b/test/dexdump/checkers.xml
@@ -181,7 +181,7 @@
  final="true"
  visibility="public"
 >
-<parameter name="arg0" type="android.content.SharedPreferences.Editor">
+<parameter name="arg0" type="android.content.SharedPreferences$Editor">
 </parameter>
 </method>
 <method name="a"
diff --git a/test/dexdump/invoke-custom.txt b/test/dexdump/invoke-custom.txt
index cfab248..1bfa053 100644
--- a/test/dexdump/invoke-custom.txt
+++ b/test/dexdump/invoke-custom.txt
@@ -58,7 +58,7 @@
       ins           : 2
       outs          : 2
       insns size    : 4 16-bit code units
-001b18:                                        |[001b18] TestBadBootstrapArguments.TestersConstantCallSite.<init>:(Ljava/lang/invoke/MethodHandle;)V
+001b18:                                        |[001b18] TestBadBootstrapArguments$TestersConstantCallSite.<init>:(Ljava/lang/invoke/MethodHandle;)V
 001b28: 7020 d200 1000                         |0000: invoke-direct {v0, v1}, Ljava/lang/invoke/ConstantCallSite;.<init>:(Ljava/lang/invoke/MethodHandle;)V // method@00d2
 001b2e: 0e00                                   |0003: return-void
       catches       : (none)
@@ -537,7 +537,7 @@
       ins           : 2
       outs          : 1
       insns size    : 4 16-bit code units
-002abc:                                        |[002abc] TestInvocationKinds.Widget.<init>:(I)V
+002abc:                                        |[002abc] TestInvocationKinds$Widget.<init>:(I)V
 002acc: 7010 bf00 0000                         |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00bf
 002ad2: 0e00                                   |0003: return-void
       catches       : (none)
@@ -586,7 +586,7 @@
       ins           : 1
       outs          : 1
       insns size    : 4 16-bit code units
-002ee8:                                        |[002ee8] TestInvokeCustomWithConcurrentThreads.1.<init>:()V
+002ee8:                                        |[002ee8] TestInvokeCustomWithConcurrentThreads$1.<init>:()V
 002ef8: 7010 cf00 0000                         |0000: invoke-direct {v0}, Ljava/lang/ThreadLocal;.<init>:()V // method@00cf
 002efe: 0e00                                   |0003: return-void
       catches       : (none)
@@ -605,7 +605,7 @@
       ins           : 1
       outs          : 1
       insns size    : 13 16-bit code units
-002ea0:                                        |[002ea0] TestInvokeCustomWithConcurrentThreads.1.initialValue:()Ljava/lang/Integer;
+002ea0:                                        |[002ea0] TestInvokeCustomWithConcurrentThreads$1.initialValue:()Ljava/lang/Integer;
 002eb0: 7100 6500 0000                         |0000: invoke-static {}, LTestInvokeCustomWithConcurrentThreads;.access$000:()Ljava/util/concurrent/atomic/AtomicInteger; // method@0065
 002eb6: 0c00                                   |0003: move-result-object v0
 002eb8: 6e10 f100 0000                         |0004: invoke-virtual {v0}, Ljava/util/concurrent/atomic/AtomicInteger;.getAndIncrement:()I // method@00f1
@@ -628,7 +628,7 @@
       ins           : 1
       outs          : 1
       insns size    : 5 16-bit code units
-002ecc:                                        |[002ecc] TestInvokeCustomWithConcurrentThreads.1.initialValue:()Ljava/lang/Object;
+002ecc:                                        |[002ecc] TestInvokeCustomWithConcurrentThreads$1.initialValue:()Ljava/lang/Object;
 002edc: 6e10 6100 0100                         |0000: invoke-virtual {v1}, LTestInvokeCustomWithConcurrentThreads$1;.initialValue:()Ljava/lang/Integer; // method@0061
 002ee2: 0c00                                   |0003: move-result-object v0
 002ee4: 1100                                   |0004: return-object v0
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 713fd35..bd58ae3 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -59,7 +59,7 @@
 VERIFY="y" # y=yes,n=no,s=softfail
 ZYGOTE=""
 DEX_VERIFY=""
-USE_DEX2OAT_AND_PATCHOAT="y"
+USE_PATCHOAT="y"
 INSTRUCTION_SET_FEATURES=""
 ARGS=""
 EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
@@ -166,14 +166,9 @@
         shift
         BOOT_IMAGE="$1"
         shift
-    elif [ "x$1" = "x--no-dex2oat" ]; then
-        DEX2OAT="-Xcompiler:${FALSE_BIN}"
-        USE_DEX2OAT_AND_PATCHOAT="n"
-        PREBUILD="n" # Do not use prebuilt odex, either.
-        shift
     elif [ "x$1" = "x--no-patchoat" ]; then
         PATCHOAT="-Xpatchoat:${FALSE_BIN}"
-        USE_DEX2OAT_AND_PATCHOAT="n"
+        USE_PATCHOAT="n"
         shift
     elif [ "x$1" = "x--relocate" ]; then
         RELOCATE="y"
@@ -549,6 +544,7 @@
     fi
     bpath="${framework}/core-libart${bpath_suffix}.jar"
     bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
+    bpath="${bpath}:${framework}/core-simple${bpath_suffix}.jar"
     bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
     bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
     bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
@@ -588,7 +584,7 @@
 fi
 
 if [ "$INTERPRETER" = "y" ]; then
-    INT_OPTS="-Xint"
+    INT_OPTS="${INT_OPTS} -Xint"
     if [ "$VERIFY" = "y" ] ; then
       INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=quicken"
       COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=quicken"
@@ -604,7 +600,7 @@
 fi
 
 if [ "$JIT" = "y" ]; then
-    INT_OPTS="-Xusejit:true"
+    INT_OPTS="${INT_OPTS} -Xusejit:true"
     if [ "$VERIFY" = "y" ] ; then
       INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=quicken"
       COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=quicken"
@@ -613,6 +609,8 @@
       COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=assume-verified"
       DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
     fi
+else
+    INT_OPTS="${INT_OPTS} -Xusejit:false"
 fi
 
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
@@ -810,7 +808,7 @@
 if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then
   if [ "$DEV_MODE" = "y" ]; then
       export ANDROID_LOG_TAGS='*:d'
-  elif [ "$USE_DEX2OAT_AND_PATCHOAT" = "n" ]; then
+  elif [ "$USE_PATCHOAT" = "n" ]; then
       # All tests would log the error of failing dex2oat/patchoat. Be silent here and only
       # log fatal events.
       export ANDROID_LOG_TAGS='*:s'
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a280f85..0a179c7 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -76,7 +76,7 @@
     },
     {
         "tests" : "629-vdex-speed",
-        "variant": "interp-ac | no-dex2oat | interpreter | jit | relocate-npatchoat",
+        "variant": "interp-ac | interpreter | jit | relocate-npatchoat",
         "description": "629 requires compilation."
     },
     {
@@ -163,7 +163,7 @@
     },
     {
         "tests": "147-stripped-dex-fallback",
-        "variant": "no-dex2oat | no-image | relocate-npatchoat",
+        "variant": "no-image | relocate-npatchoat",
         "description": ["147-stripped-dex-fallback is disabled because it",
                         "requires --prebuild."]
     },
@@ -174,7 +174,7 @@
                   "119-noimage-patchoat",
                   "137-cfi",
                   "138-duplicate-classes-check2"],
-        "variant": "no-dex2oat | no-image | relocate-npatchoat",
+        "variant": "no-image | relocate-npatchoat",
         "description": ["All these tests check that we have sane behavior if we",
                         "don't have a patchoat or dex2oat. Therefore we",
                         "shouldn't run them in situations where we actually",
@@ -246,7 +246,7 @@
                   "613-inlining-dex-cache",
                   "626-set-resolved-string",
                   "638-checker-inline-cache-intrinsic"],
-        "variant": "trace  | stream",
+        "variant": "trace | stream",
         "description": ["These tests expect JIT compilation, which is",
                         "suppressed when tracing."]
     },
@@ -257,6 +257,11 @@
                         "suppressed when tracing."]
     },
     {
+        "tests": "638-checker-inline-cache-intrinsic",
+        "variant": "interpreter | interp-ac",
+        "description": ["Test expects JIT compilation"]
+    },
+    {
         "tests": "597-deopt-invoke-stub",
         "variant": "speed-profile | interp-ac | interpreter | optimizing | trace | stream",
         "description": ["This test expects JIT compilation and no AOT for",
@@ -644,9 +649,9 @@
     },
     {
         "tests": "660-clinit",
-        "variant": "no-image | no-dex2oat | no-prebuild | jvmti-stress | redefine-stress",
+        "variant": "no-image | no-prebuild | jvmti-stress | redefine-stress",
         "description": ["Tests <clinit> for app images, which --no-image, --no-prebuild, ",
-                        "--no-dex2oat, and --redefine-stress do not create"]
+                        "and --redefine-stress do not create"]
     },
     {
         "tests": ["961-default-iface-resolution-gen",
@@ -669,7 +674,7 @@
     },
     {
         "tests": "661-oat-writer-layout",
-        "variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
+        "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
         "description": ["Test is designed to only check --compiler-filter=speed"]
     },
     {
@@ -1005,7 +1010,7 @@
     },
     {
         "tests": "677-fsi",
-        "variant": "no-dex2oat | no-image | no-prebuild | relocate-npatchoat | jvm",
+        "variant": "no-image | no-prebuild | relocate-npatchoat | jvm",
         "description": ["Test requires a successful dex2oat invocation"]
     },
     {
diff --git a/test/run-test b/test/run-test
index d90eccd..ef17302 100755
--- a/test/run-test
+++ b/test/run-test
@@ -148,7 +148,6 @@
 strace="false"
 always_clean="no"
 never_clean="no"
-have_dex2oat="yes"
 have_patchoat="yes"
 have_image="yes"
 multi_image_suffix=""
@@ -195,9 +194,6 @@
         lib="libdvm.so"
         runtime="dalvik"
         shift
-    elif [ "x$1" = "x--no-dex2oat" ]; then
-        have_dex2oat="no"
-        shift
     elif [ "x$1" = "x--no-patchoat" ]; then
         have_patchoat="no"
         shift
@@ -580,10 +576,6 @@
   run_args="${run_args} --no-patchoat"
 fi
 
-if [ "$have_dex2oat" = "no" ]; then
-  run_args="${run_args} --no-dex2oat"
-fi
-
 if [ ! "$runtime" = "jvm" ]; then
   run_args="${run_args} --lib $lib"
 fi
@@ -639,11 +631,6 @@
     usage="yes"
 fi
 
-if [ "$bisection_search" = "yes" -a "$have_dex2oat" = "no" ]; then
-    err_echo "--bisection-search and --no-dex2oat are mutually exclusive"
-    usage="yes"
-fi
-
 if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then
     err_echo "--bisection-search and --no-patchoat are mutually exclusive"
     usage="yes"
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 47b1e4e..84490bf 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -184,7 +184,7 @@
         'run-test' : ['--relocate-npatchoat']
     },
     'art-no-dex2oat' : {
-        'run-test' : ['--no-dex2oat']
+        # Deprecated configuration.
     },
     'art-heap-poisoning' : {
         'run-test' : ['--interpreter',
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index e8d4290..10c8619 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -145,7 +145,7 @@
   VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'multipicimage'}
   VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
   VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
-  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'no-dex2oat', 'prebuild'}
+  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
   VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
   VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
   VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
@@ -414,8 +414,6 @@
         options_test += ' --prebuild'
       elif prebuild == 'no-prebuild':
         options_test += ' --no-prebuild'
-      elif prebuild == 'no-dex2oat':
-        options_test += ' --no-prebuild --no-dex2oat'
 
       if cdex_level:
         # Add option and remove the cdex- prefix.
diff --git a/test/ti-agent/ti_macros.h b/test/ti-agent/ti_macros.h
index d913383..a871270 100644
--- a/test/ti-agent/ti_macros.h
+++ b/test/ti-agent/ti_macros.h
@@ -19,8 +19,6 @@
 
 #include "android-base/macros.h"
 
-#define FINAL final
-#define OVERRIDE override
 #define UNREACHABLE  __builtin_unreachable
 
 #endif  // ART_TEST_TI_AGENT_TI_MACROS_H_
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 8c74817..3b5230f 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -165,6 +165,7 @@
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MODULE_TAGS := tests
 LOCAL_MODULE := ahat-tests
+LOCAL_TEST_CONFIG := ahat-tests.xml
 LOCAL_COMPATIBILITY_SUITE := general-tests
 include $(BUILD_HOST_JAVA_LIBRARY)
 AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE)
diff --git a/tools/ahat/AndroidTest.xml b/tools/ahat/ahat-tests.xml
similarity index 100%
rename from tools/ahat/AndroidTest.xml
rename to tools/ahat/ahat-tests.xml
diff --git a/tools/art b/tools/art
index aebf5a6..9c032c0 100644
--- a/tools/art
+++ b/tools/art
@@ -16,28 +16,9 @@
 # shell dialect that should work on the host (e.g. bash), and
 # Android (e.g. mksh).
 
-# Globals
-ART_BINARY=dalvikvm
-DELETE_ANDROID_DATA="no"
-LAUNCH_WRAPPER=
-LIBART=libart.so
-JIT_PROFILE="no"
-ALLOW_DEFAULT_JDWP="no"
-VERBOSE="no"
-CLEAN_OAT_FILES="yes"
-EXTRA_OPTIONS=()
-
-# Follow all sym links to get the program name.
-if [ z"$BASH_SOURCE" != z ]; then
-  PROG_NAME="$BASH_SOURCE"
-else
-  PROG_NAME="$0"
-fi
-while [ -h "$PROG_NAME" ]; do
-  # On Mac OS, readlink -f doesn't work.
-  PROG_NAME="$(readlink "$PROG_NAME")"
-done
-
+######################################
+# Functions
+######################################
 function find_libdir() {
   # Get the actual file, $1 is the ART_BINARY_PATH and may be a symbolic link.
   # Use realpath instead of readlink because Android does not have a readlink.
@@ -48,29 +29,6 @@
   fi
 }
 
-function replace_compiler_filter_with_quicken() {
-  ARGS_WITH_QUICKEN=("$@")
-
-  found="false"
-  ((index=0))
-  while ((index <= $#)); do
-    what="${ARGS_WITH_QUICKEN[$index]}"
-
-    case "$what" in
-      --compiler-filter=*)
-        ARGS_WITH_QUICKEN[$index]="--compiler-filter=quicken"
-        found="true"
-        ;;
-    esac
-
-    ((index++))
-    shift
-  done
-  if [ "$found" != "true" ]; then
-    ARGS_WITH_QUICKEN=(-Xcompiler-option --compiler-filter=quicken "${ARGS_WITH_QUICKEN[@]}")
-  fi
-}
-
 function usage() {
   cat 1>&2 <<EOF
 Usage: art [OPTIONS] [--] [ART_OPTIONS] CLASS
@@ -224,19 +182,81 @@
   echo "$image_location"
 }
 
-# If android logging is not explicitly set, only print warnings and errors.
-if [ -z "$ANDROID_LOG_TAGS" ]; then
-  ANDROID_LOG_TAGS='*:w'
-fi
+function run_dex2oat() {
+  local class_loader_context=
+  for dex_file in "${DEX2OAT_CLASSPATH[@]}"
+  do
+    while [ -h "$dex_file" ]; do
+      # On Mac OS, readlink -f doesn't work.
+      dex_file="$(readlink "$dex_file")"
+    done
+    # Create oat file directory.
+    verbose_run mkdir -p $(dirname "$dex_file")/oat/$ISA
+    local oat_file=$(basename "$dex_file")
+    local oat_file=$(dirname "$dex_file")/oat/$ISA/${oat_file%.*}.odex
+    # When running dex2oat use the exact same context as when running dalvikvm.
+    # (see run_art function)
+    verbose_run ANDROID_DATA=$ANDROID_DATA                    \
+          ANDROID_ROOT=$ANDROID_ROOT                          \
+          LD_LIBRARY_PATH=$LD_LIBRARY_PATH                    \
+          PATH=$ANDROID_ROOT/bin:$PATH                        \
+          LD_USE_LOAD_BIAS=1                                  \
+          ANDROID_LOG_TAGS=$ANDROID_LOG_TAGS                  \
+          $DEX2OAT_BINARY_PATH                                \
+          --runtime-arg -Xnorelocate                          \
+          --boot-image=$DEX2OAT_BOOT_IMAGE                    \
+          --instruction-set=$ISA                              \
+          --class-loader-context="PCL[$class_loader_context]" \
+          "${DEX2OAT_FLAGS[@]}"                               \
+          --dex-file=$dex_file                                \
+          --oat-file=$oat_file
+    if [[ -n $class_loader_context ]]; then
+      class_loader_context+=":"
+    fi
+    class_loader_context+="$dex_file"
+  done
+}
+
+# Extract the dex2oat flags from the list of arguments.
+# -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
+# -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
+# -Ximage argument is stored in DEX2OAT_BOOT_IMAGE
+function extract_dex2oat_flags() {
+  while [ $# -gt 0 ]; do
+    case $1 in
+      -Xcompiler-option)
+        DEX2OAT_FLAGS+=("$2")
+        shift
+        ;;
+      -Ximage:*)
+        DEX2OAT_BOOT_IMAGE=$1
+        # Remove '-Ximage:' from the argument.
+        DEX2OAT_BOOT_IMAGE=${DEX2OAT_BOOT_IMAGE##-Ximage:}
+        ;;
+      -cp)
+        # Reset any previously parsed classpath, just like dalvikvm
+        # only supports one -cp argument.
+        DEX2OAT_CLASSPATH=()
+        # TODO: support -classpath and CLASSPATH
+        local oifs=$IFS
+        IFS=':'
+        for classpath_elem in $2
+        do
+          DEX2OAT_CLASSPATH+=("$classpath_elem")
+        done
+        shift
+        IFS=$oifs
+        ;;
+    esac
+    shift
+  done
+}
 
 # Runs dalvikvm, returns its exit code.
 # (Oat directories are cleaned up in between runs)
 function run_art() {
-  local image_location="$(detect_boot_image_location)"
   local ret
 
-  # First cleanup any left-over 'oat' files from the last time dalvikvm was run.
-  cleanup_oat_directory_for_classpath "$@"
   # Run dalvikvm.
   verbose_run ANDROID_DATA="$ANDROID_DATA"                  \
               ANDROID_ROOT="$ANDROID_ROOT"                  \
@@ -247,7 +267,7 @@
               $LAUNCH_WRAPPER $ART_BINARY_PATH $lib         \
               -XXlib:"$LIBART"                              \
               -Xnorelocate                                  \
-              -Ximage:"$image_location"                     \
+              -Ximage:"$DEFAULT_IMAGE_LOCATION"             \
               "$@"
   ret=$?
 
@@ -258,6 +278,23 @@
   return $ret
 }
 
+######################################
+# Globals
+######################################
+ART_BINARY=dalvikvm
+DEX2OAT_BINARY=dex2oat
+DELETE_ANDROID_DATA="no"
+LAUNCH_WRAPPER=
+LIBART=libart.so
+JIT_PROFILE="no"
+ALLOW_DEFAULT_JDWP="no"
+VERBOSE="no"
+CLEAN_OAT_FILES="yes"
+EXTRA_OPTIONS=()
+DEX2OAT_FLAGS=()
+DEX2OAT_CLASSPATH=()
+
+# Parse arguments
 while [[ "$1" = "-"* ]]; do
   case "$1" in
   --)
@@ -275,6 +312,7 @@
     ;& # Fallthrough
   --debug)
     LIBART="libartd.so"
+    DEX2OAT_BINARY=dex2oatd
     # Expect that debug mode wants all checks.
     EXTRA_OPTIONS+=(-XX:SlowDebug=true)
     ;;
@@ -329,6 +367,17 @@
   exit 1
 fi
 
+# Follow all sym links to get the program name.
+if [[ -n "$BASH_SOURCE" ]]; then
+  PROG_NAME="$BASH_SOURCE"
+else
+  PROG_NAME="$0"
+fi
+while [ -h "$PROG_NAME" ]; do
+  # On Mac OS, readlink -f doesn't work.
+  PROG_NAME="$(readlink "$PROG_NAME")"
+done
+
 PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
 ANDROID_ROOT=$PROG_DIR/..
 ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
@@ -341,8 +390,32 @@
   exit 1
 fi
 
+DEX2OAT_BINARY_PATH=$ANDROID_ROOT/bin/$DEX2OAT_BINARY
+
+if [ ! -x "$DEX2OAT_BINARY_PATH" ]; then
+  echo "Warning: Android Compiler not found: $DEX2OAT_BINARY_PATH"
+fi
+
+######################################
+# Main program
+######################################
+
+# If android logging is not explicitly set, only print warnings and errors.
+if [ -z "$ANDROID_LOG_TAGS" ]; then
+  ANDROID_LOG_TAGS='*:w'
+fi
+
 LIBDIR="$(find_libdir $ART_BINARY_PATH)"
 LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
+DEFAULT_IMAGE_LOCATION="$(detect_boot_image_location)"
+DEX2OAT_BOOT_IMAGE="$DEFAULT_IMAGE_LOCATION"
+ISA=$(LD_LIBRARY_PATH=$LD_LIBRARY_PATH $ART_BINARY_PATH -showversion | (read art version number isa && echo $isa))
+
+# Extract the dex2oat flags from the list of arguments.
+# -Xcompiler-options arguments are stored in DEX2OAT_FLAGS array
+# -cp argument is split by ':' and stored in DEX2OAT_CLASSPATH
+# -Ximage argument is stored in DEX2OAT_BOOTIMAGE
+extract_dex2oat_flags "$@"
 
 # If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
 # and ensure we delete it at the end.
@@ -360,31 +433,34 @@
 
 if [ "$PERF" != "" ]; then
   LAUNCH_WRAPPER="perf record -g --call-graph dwarf -F 10000 -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
-  EXTRA_OPTIONS+=(-Xcompiler-option --generate-debug-info)
+  DEX2OAT_FLAGS+=(--generate-debug-info)
 fi
 
 if [ "$ALLOW_DEFAULT_JDWP" = "no" ]; then
   EXTRA_OPTIONS+=(-XjdwpProvider:none)
 fi
 
+# First cleanup any left-over 'oat' files from the last time dalvikvm was run.
+cleanup_oat_directory_for_classpath "$@"
+
+# Protect additional arguments in quotes to preserve whitespaces (used by
+# run-jdwp-test.sh when running on device), '$' (may be used as part of
+# classpath) and other special characters when evaluated.
+EXTRA_OPTIONS+=("$@")
+
 if [ "$JIT_PROFILE" = "yes" ]; then
   # Create the profile. The runtime expects profiles to be created before
   # execution.
   PROFILE_PATH="$ANDROID_DATA/primary.prof"
   touch "$PROFILE_PATH"
 
-  # Replace the compiler filter with quicken so that we
-  # can capture the profile.
-  ARGS_WITH_QUICKEN=
-  replace_compiler_filter_with_quicken "$@"
-
   run_art -Xjitsaveprofilinginfo               \
           -Xps-min-methods-to-save:1           \
           -Xps-min-classes-to-save:1           \
           -Xps-min-notification-before-wake:10 \
           -Xps-profile-path:$PROFILE_PATH      \
           -Xusejit:true                        \
-          "${ARGS_WITH_QUICKEN[@]}"            \
+          ${EXTRA_OPTIONS[@]}                  \
           &> "$ANDROID_DATA/profile_gen.log"
   EXIT_STATUS=$?
 
@@ -400,13 +476,20 @@
   rm -rf "$ANDROID_DATA/dalvik-cache"
 
   # Append arguments so next invocation of run_art uses the profile.
-  EXTRA_OPTIONS+=(-Xcompiler-option --profile-file="$PROFILE_PATH")
+  DEX2OAT_FLAGS+=(--profile-file="$PROFILE_PATH")
 fi
 
-# Protect additional arguments in quotes to preserve whitespaces (used by
-# run-jdwp-test.sh when running on device), '$' (may be used as part of
-# classpath) and other special characters when evaluated.
-EXTRA_OPTIONS+=("$@")
+if [ -x "$DEX2OAT_BINARY_PATH" ]; then
+  # Run dex2oat before launching ART to generate the oat files for the classpath.
+  run_dex2oat
+fi
+
+# Do not continue if the dex2oat failed.
+EXIT_STATUS=$?
+if [ $EXIT_STATUS != 0 ]; then
+  echo "Failed dex2oat invocation" >&2
+  exit $EXIT_STATUS
+fi
 
 run_art "${EXTRA_OPTIONS[@]}"
 EXIT_STATUS=$?
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index 1a76a86..8f412bf 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -92,8 +92,7 @@
  protected:
   using Base = CmdlineArgs;
 
-  virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+  ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -122,7 +121,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  ParseStatus ParseChecks(std::string* error_msg) override {
     // Perform the parent checks.
     ParseStatus parent_checks = Base::ParseChecks(error_msg);
     if (parent_checks != kParseOk) {
@@ -172,16 +171,16 @@
 };
 
 struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
-  bool NeedsRuntime() OVERRIDE {
+  bool NeedsRuntime() override {
     return true;
   }
 
-  bool ExecuteWithoutRuntime() OVERRIDE {
+  bool ExecuteWithoutRuntime() override {
     LOG(FATAL) << "Unreachable";
     UNREACHABLE();
   }
 
-  bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+  bool ExecuteWithRuntime(Runtime* runtime) override {
     CHECK(args_ != nullptr);
 
     const size_t dex_reps = args_->dex_file_verifier_
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 694a5f4..9262076 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -86,6 +86,12 @@
                 .withDescription("Enable debug")
                 .create("d"));
         options.addOption(OptionBuilder
+                .withLongOpt("dump-all-members")
+                .withDescription("Dump all members from jar files to stdout. Ignore annotations. " +
+                        "Do not use in conjunction with any other arguments.")
+                .hasArgs(0)
+                .create('m'));
+        options.addOption(OptionBuilder
                 .withLongOpt("help")
                 .hasArgs(0)
                 .withDescription("Show this help")
@@ -113,16 +119,21 @@
         }
 
         Status status = new Status(cmd.hasOption('d'));
-        try {
-            Class2Greylist c2gl = new Class2Greylist(
-                    status,
-                    cmd.getOptionValue('p', null),
-                    cmd.getOptionValues('g'),
-                    cmd.getOptionValue('w', null),
-                    jarFiles);
-            c2gl.main();
-        } catch (IOException e) {
-            status.error(e);
+
+        if (cmd.hasOption('m')) {
+            dumpAllMembers(status, jarFiles);
+        } else {
+            try {
+                Class2Greylist c2gl = new Class2Greylist(
+                        status,
+                        cmd.getOptionValue('p', null),
+                        cmd.getOptionValues('g'),
+                        cmd.getOptionValue('w', null),
+                        jarFiles);
+                c2gl.main();
+            } catch (IOException e) {
+                status.error(e);
+            }
         }
 
         if (status.ok()) {
@@ -221,6 +232,20 @@
         return map;
     }
 
+    private static void dumpAllMembers(Status status, String[] jarFiles) {
+        for (String jarFile : jarFiles) {
+            status.debug("Processing jar file %s", jarFile);
+            try {
+                JarReader reader = new JarReader(status, jarFile);
+                reader.stream().forEach(clazz -> new MemberDumpingVisitor(clazz, status)
+                        .visit());
+                reader.close();
+            } catch (IOException e) {
+                status.error(e);
+            }
+        }
+    }
+
     private static void help(Options options) {
         new HelpFormatter().printHelp(
                 "class2greylist path/to/classes.jar [classes2.jar ...]",
diff --git a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
new file mode 100644
index 0000000..6677a3f
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
@@ -0,0 +1,47 @@
+package com.android.class2greylist;
+
+import org.apache.bcel.classfile.DescendingVisitor;
+import org.apache.bcel.classfile.EmptyVisitor;
+import org.apache.bcel.classfile.Field;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+import org.apache.bcel.classfile.Method;
+
+/**
+ * A class file visitor that simply prints to stdout the signature of every member within the class.
+ */
+public class MemberDumpingVisitor extends EmptyVisitor {
+
+    private final Status mStatus;
+    private final DescendingVisitor mDescendingVisitor;
+
+    /**
+     * Creates a visitor for a class.
+     *
+     * @param clazz Class to visit
+     */
+    public MemberDumpingVisitor(JavaClass clazz, Status status) {
+        mStatus = status;
+        mDescendingVisitor = new DescendingVisitor(clazz, this);
+    }
+
+    public void visit() {
+        mDescendingVisitor.visit();
+    }
+
+    @Override
+    public void visitMethod(Method method) {
+        visitMember(method, "L%s;->%s%s");
+    }
+
+    @Override
+    public void visitField(Field field) {
+        visitMember(field, "L%s;->%s:%s");
+    }
+
+    private void visitMember(FieldOrMethod member, String signatureFormatString) {
+        AnnotationContext context = new AnnotationContext(mStatus, member,
+                (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
+        System.out.println(context.getMemberDescriptor());
+    }
+}
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index 9515ca5..a85bf56 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -22,6 +22,7 @@
         "dexanalyze.cc",
         "dexanalyze_bytecode.cc",
         "dexanalyze_experiments.cc",
+        "dexanalyze_strings.cc",
     ],
     target: {
         android: {
diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc
index 841719b..040f41b 100644
--- a/tools/dexanalyze/dexanalyze.cc
+++ b/tools/dexanalyze/dexanalyze.cc
@@ -23,6 +23,7 @@
 
 #include "dexanalyze_bytecode.h"
 #include "dexanalyze_experiments.h"
+#include "dexanalyze_strings.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index e6e58c0..659a940 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -264,18 +264,18 @@
         uint32_t receiver = inst->VRegB_22c();
         uint32_t first_arg_reg = code_item.RegistersSize() - code_item.InsSize();
         uint32_t out_reg = inst->VRegA_22c();
-          if (Enabled(kExperimentInstanceFieldSelf) &&
-              first_arg_reg == receiver &&
-              holder_type == current_class_type) {
-            if (count_types) {
-              ++current_type.fields_.FindOrAdd(dex_field_idx)->second;
-            } else {
-              uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx);
-              ExtendPrefix(&out_reg, &field_idx);
-              CHECK(InstNibbles(new_opcode, {out_reg, field_idx}));
-              continue;
-            }
-          } else if (Enabled(kExperimentInstanceField)) {
+        if (Enabled(kExperimentInstanceFieldSelf) &&
+            first_arg_reg == receiver &&
+            holder_type == current_class_type) {
+          if (count_types) {
+            ++current_type.fields_.FindOrAdd(dex_field_idx)->second;
+          } else {
+            uint32_t field_idx = types[holder_type.index_].fields_.Get(dex_field_idx);
+            ExtendPrefix(&out_reg, &field_idx);
+            CHECK(InstNibbles(new_opcode, {out_reg, field_idx}));
+            continue;
+          }
+        } else if (Enabled(kExperimentInstanceField)) {
           if (count_types) {
             ++current_type.types_.FindOrAdd(holder_type.index_)->second;
             ++types[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second;
diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc
index 1f6fe46..b124f43 100644
--- a/tools/dexanalyze/dexanalyze_experiments.cc
+++ b/tools/dexanalyze/dexanalyze_experiments.cc
@@ -208,137 +208,6 @@
      << Percent(total_unique_non_header_bytes_, total_size) << "\n";
 }
 
-void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
-  std::set<std::string> unique_strings;
-  for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    for (size_t i = 0; i < dex_file->NumStringIds(); ++i) {
-      uint32_t length = 0;
-      const char* data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
-      // Analyze if the string has any UTF16 chars.
-      bool have_wide_char = false;
-      const char* ptr = data;
-      for (size_t j = 0; j < length; ++j) {
-        have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
-      }
-      if (have_wide_char) {
-        wide_string_bytes_ += 2 * length;
-      } else {
-        ascii_string_bytes_ += length;
-      }
-      string_data_bytes_ += ptr - data;
-      unique_strings.insert(data);
-    }
-  }
-  // Unique strings only since we want to exclude savings from multidex duplication.
-  std::vector<std::string> strings(unique_strings.begin(), unique_strings.end());
-  unique_strings.clear();
-
-  // Tunable parameters.
-  static const size_t kMinPrefixLen = 1;
-  static const size_t kMaxPrefixLen = 255;
-  static const size_t kPrefixConstantCost = 4;
-  static const size_t kPrefixIndexCost = 2;
-
-  // Calculate total shared prefix.
-  std::vector<size_t> shared_len;
-  prefixes_.clear();
-  for (size_t i = 0; i < strings.size(); ++i) {
-    size_t best_len = 0;
-    if (i > 0) {
-      best_len = std::max(best_len, PrefixLen(strings[i], strings[i - 1]));
-    }
-    if (i < strings.size() - 1) {
-      best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
-    }
-    best_len = std::min(best_len, kMaxPrefixLen);
-    std::string prefix;
-    if (best_len >= kMinPrefixLen) {
-      prefix = strings[i].substr(0, best_len);
-      ++prefixes_[prefix];
-    }
-    total_prefix_index_cost_ += kPrefixIndexCost;
-  }
-  // Optimize the result by moving long prefixes to shorter ones if it causes savings.
-  while (true) {
-    bool have_savings = false;
-    auto it = prefixes_.begin();
-    std::vector<std::string> longest;
-    for (const auto& pair : prefixes_) {
-      longest.push_back(pair.first);
-    }
-    std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
-      return a.length() > b.length();
-    });
-    // Do longest first since this provides the best results.
-    for (const std::string& s : longest) {
-      it = prefixes_.find(s);
-      CHECK(it != prefixes_.end());
-      const std::string& prefix = it->first;
-      int64_t best_savings = 0u;
-      int64_t best_len = -1;
-      for (int64_t len = prefix.length() - 1; len >= 0; --len) {
-        auto found = prefixes_.find(prefix.substr(0, len));
-        if (len != 0 && found == prefixes_.end()) {
-          continue;
-        }
-        // Calculate savings from downgrading the prefix.
-        int64_t savings = kPrefixConstantCost + prefix.length() -
-            (prefix.length() - len) * it->second;
-        if (savings > best_savings) {
-          best_savings = savings;
-          best_len = len;
-          break;
-        }
-      }
-      if (best_len != -1) {
-        prefixes_[prefix.substr(0, best_len)] += it->second;
-        it = prefixes_.erase(it);
-        optimization_savings_ += best_savings;
-        have_savings = true;
-      } else {
-        ++it;
-      }
-    }
-    if (!have_savings) {
-      break;
-    }
-  }
-  total_num_prefixes_ += prefixes_.size();
-  for (const auto& pair : prefixes_) {
-    // 4 bytes for an offset, one for length.
-    total_prefix_dict_ += pair.first.length();
-    total_prefix_table_ += kPrefixConstantCost;
-    total_prefix_savings_ += pair.first.length() * pair.second;
-  }
-}
-
-void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
-  os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
-  os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
-  os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
-
-  // Prefix based strings.
-  os << "Total shared prefix bytes " << Percent(total_prefix_savings_, total_size) << "\n";
-  os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
-  os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
-  os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
-  int64_t net_savings = total_prefix_savings_;
-  net_savings -= total_prefix_dict_;
-  net_savings -= total_prefix_table_;
-  net_savings -= total_prefix_index_cost_;
-  os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
-  os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
-  os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
-  if (verbose_level_ >= VerboseLevel::kEverything) {
-    std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
-    // Sort lexicographically.
-    std::sort(pairs.begin(), pairs.end());
-    for (const auto& pair : pairs) {
-      os << pair.first << " : " << pair.second << "\n";
-    }
-  }
-}
-
 void CountDexIndices::ProcessDexFiles(
     const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
   std::set<std::string> unique_field_names;
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 4e66b3c..55d2f44 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -62,30 +62,11 @@
   VerboseLevel verbose_level_ = VerboseLevel::kNormal;
 };
 
-// Analyze string data and strings accessed from code.
-class AnalyzeStrings : public Experiment {
- public:
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
-
- private:
-  int64_t wide_string_bytes_ = 0u;
-  int64_t ascii_string_bytes_ = 0u;
-  int64_t string_data_bytes_ = 0u;
-  int64_t total_prefix_savings_ = 0u;
-  int64_t total_prefix_dict_ = 0u;
-  int64_t total_prefix_table_ = 0u;
-  int64_t total_prefix_index_cost_ = 0u;
-  int64_t total_num_prefixes_ = 0u;
-  int64_t optimization_savings_ = 0u;
-  std::unordered_map<std::string, size_t> prefixes_;
-};
-
 // Analyze debug info sizes.
 class AnalyzeDebugInfo  : public Experiment {
  public:
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
   int64_t total_bytes_ = 0u;
@@ -110,8 +91,8 @@
 // Count numbers of dex indices.
 class CountDexIndices : public Experiment {
  public:
-  void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
+  void ProcessDexFile(const DexFile& dex_file) override;
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
 
   void Dump(std::ostream& os, uint64_t total_size) const;
 
@@ -181,9 +162,9 @@
 // Measure various code metrics including args per invoke-virtual, fill/spill move patterns.
 class CodeMetrics : public Experiment {
  public:
-  void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+  void ProcessDexFile(const DexFile& dex_file) override;
 
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
   static constexpr size_t kMaxArgCount = 6;
diff --git a/tools/dexanalyze/dexanalyze_strings.cc b/tools/dexanalyze/dexanalyze_strings.cc
new file mode 100644
index 0000000..863e4ee
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_strings.cc
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dexanalyze_strings.h"
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+#include <queue>
+
+#include "dex/class_accessor-inl.h"
+#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_instruction-inl.h"
+
+namespace art {
+namespace dexanalyze {
+
+// Tunable parameters.
+static const size_t kMinPrefixLen = 1;
+static const size_t kMaxPrefixLen = 255;
+static const size_t kPrefixConstantCost = 4;
+static const size_t kPrefixIndexCost = 2;
+
+// Node value = (distance from root) * (occurrences - 1).
+class MatchTrie {
+ public:
+  void Add(const std::string& str) {
+    MatchTrie* node = this;
+    size_t depth = 0u;
+    for (uint8_t c : str) {
+      ++depth;
+      if (node->nodes_[c] == nullptr) {
+        MatchTrie* new_node = new MatchTrie();
+        node->nodes_[c].reset(new_node);
+        new_node->parent_ = node;
+        new_node->depth_ = depth;
+        new_node->incoming_ = c;
+        node = new_node;
+      } else {
+        node = node->nodes_[c].get();
+      }
+      ++node->count_;
+    }
+    node->is_end_ = true;
+  }
+
+  // Returns the length of the longest prefix and if it's a leaf node.
+  std::pair<size_t, bool> LongestPrefix(const std::string& str) const {
+    const MatchTrie* node = this;
+    const MatchTrie* best_node = this;
+    size_t depth = 0u;
+    size_t best_depth = 0u;
+    for (uint8_t c : str) {
+      if (node->nodes_[c] == nullptr) {
+        break;
+      }
+      node = node->nodes_[c].get();
+      ++depth;
+      if (node->is_end_) {
+        best_depth = depth;
+        best_node = node;
+      }
+    }
+    bool is_leaf = true;
+    for (const std::unique_ptr<MatchTrie>& cur_node : best_node->nodes_) {
+      if (cur_node != nullptr) {
+        is_leaf = false;
+      }
+    }
+    return {best_depth, is_leaf};
+  }
+
+  int32_t Savings() const {
+    int32_t cost = kPrefixConstantCost;
+    int32_t first_used = 0u;
+    if (chosen_suffix_count_ == 0u) {
+      cost += depth_;
+    }
+    uint32_t extra_savings = 0u;
+    for (MatchTrie* cur = parent_; cur != nullptr; cur = cur->parent_) {
+      if (cur->chosen_) {
+        first_used = cur->depth_;
+        if (cur->chosen_suffix_count_ == 0u) {
+          // First suffix for the chosen parent, remove the cost of the dictionary entry.
+          extra_savings += first_used;
+        }
+        break;
+      }
+    }
+    return count_ * (depth_ - first_used) - cost + extra_savings;
+  }
+
+  template <typename T, typename... Args, template <typename...> class Queue>
+  T PopRealTop(Queue<T, Args...>& queue) {
+    auto pair = queue.top();
+    queue.pop();
+    // Keep updating values until one sticks.
+    while (pair.second->Savings() != pair.first) {
+      pair.first = pair.second->Savings();
+      queue.push(pair);
+      pair = queue.top();
+      queue.pop();
+    }
+    return pair;
+  }
+
+  std::vector<std::string> ExtractPrefixes(size_t max) {
+    std::vector<std::string> ret;
+    // Make priority queue and adaptively update it. Each node value is the savings from picking
+    // it. Insert all of the interesting nodes in the queue (children != 1).
+    std::priority_queue<std::pair<int32_t, MatchTrie*>> queue;
+    // Add all of the nodes to the queue.
+    std::vector<MatchTrie*> work(1, this);
+    while (!work.empty()) {
+      MatchTrie* elem = work.back();
+      work.pop_back();
+      size_t num_childs = 0u;
+      for (const std::unique_ptr<MatchTrie>& child : elem->nodes_) {
+        if (child != nullptr) {
+          work.push_back(child.get());
+          ++num_childs;
+        }
+      }
+      if (num_childs > 1u || elem->is_end_) {
+        queue.emplace(elem->Savings(), elem);
+      }
+    }
+    std::priority_queue<std::pair<int32_t, MatchTrie*>> prefixes;
+    // The savings can only ever go down for a given node, never up.
+    while (max != 0u && !queue.empty()) {
+      std::pair<int32_t, MatchTrie*> pair = PopRealTop(queue);
+      if (pair.second != this && pair.first > 0) {
+        // Pick this node.
+        uint32_t count = pair.second->count_;
+        pair.second->chosen_ = true;
+        for (MatchTrie* cur = pair.second->parent_; cur != this; cur = cur->parent_) {
+          if (cur->chosen_) {
+            break;
+          }
+          cur->count_ -= count;
+        }
+        for (MatchTrie* cur = pair.second->parent_; cur != this; cur = cur->parent_) {
+          ++cur->chosen_suffix_count_;
+        }
+        prefixes.emplace(pair.first, pair.second);
+        --max;
+      } else {
+        // Negative or no EV, just delete the node.
+      }
+    }
+    while (!prefixes.empty()) {
+      std::pair<int32_t, MatchTrie*> pair = PopRealTop(prefixes);
+      if (pair.first <= 0) {
+        continue;
+      }
+      std::vector<uint8_t> chars;
+      for (MatchTrie* cur = pair.second; cur != this; cur = cur->parent_) {
+        chars.push_back(cur->incoming_);
+      }
+      ret.push_back(std::string(chars.rbegin(), chars.rend()));
+      // LOG(INFO) << pair.second->Savings() << " : " << ret.back();
+    }
+    return ret;
+  }
+
+  std::unique_ptr<MatchTrie> nodes_[256];
+  MatchTrie* parent_ = nullptr;
+  uint32_t count_ = 0u;
+  int32_t depth_ = 0u;
+  int32_t savings_ = 0u;
+  uint8_t incoming_ = 0u;
+  // If the current node is the end of a possible prefix.
+  bool is_end_ = false;
+  // If the current node is chosen to be a used prefix.
+  bool chosen_ = false;
+  // If the current node is a prefix of a longer chosen prefix.
+  uint32_t chosen_suffix_count_ = 0u;
+};
+
+void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+  std::set<std::string> unique_strings;
+  // Accumulate the strings.
+  for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+    for (size_t i = 0; i < dex_file->NumStringIds(); ++i) {
+      uint32_t length = 0;
+      const char* data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
+      // Analyze if the string has any UTF16 chars.
+      bool have_wide_char = false;
+      const char* ptr = data;
+      for (size_t j = 0; j < length; ++j) {
+        have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
+      }
+      if (have_wide_char) {
+        wide_string_bytes_ += 2 * length;
+      } else {
+        ascii_string_bytes_ += length;
+      }
+      string_data_bytes_ += ptr - data;
+      unique_strings.insert(data);
+    }
+  }
+  // Unique strings only since we want to exclude savings from multidex duplication.
+  ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()), 1);
+}
+
+void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings, size_t iterations) {
+  if (iterations == 0u) {
+    return;
+  }
+  // Calculate total shared prefix.
+  std::vector<size_t> shared_len;
+  prefixes_.clear();
+  std::unique_ptr<MatchTrie> prefix_construct(new MatchTrie());
+  for (size_t i = 0; i < strings.size(); ++i) {
+    size_t best_len = 0;
+    if (i > 0) {
+      best_len = std::max(best_len, PrefixLen(strings[i], strings[i - 1]));
+    }
+    if (i < strings.size() - 1) {
+      best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
+    }
+    best_len = std::min(best_len, kMaxPrefixLen);
+    std::string prefix;
+    if (best_len >= kMinPrefixLen) {
+      prefix = strings[i].substr(0, best_len);
+      prefix_construct->Add(prefix);
+      ++prefixes_[prefix];
+      total_shared_prefix_bytes_ += best_len;
+    }
+    total_prefix_index_cost_ += kPrefixIndexCost;
+  }
+
+  static constexpr size_t kPrefixBits = 15;
+  static constexpr size_t kShortLen = (1u << (15 - kPrefixBits)) - 1;
+  std::unique_ptr<MatchTrie> prefix_trie(new MatchTrie());
+  static constexpr bool kUseGreedyTrie = true;
+  if (kUseGreedyTrie) {
+    std::vector<std::string> prefixes(prefix_construct->ExtractPrefixes(1 << kPrefixBits));
+    for (auto&& str : prefixes) {
+      prefix_trie->Add(str);
+    }
+  } else {
+    // Optimize the result by moving long prefixes to shorter ones if it causes additional savings.
+    while (true) {
+      bool have_savings = false;
+      auto it = prefixes_.begin();
+      std::vector<std::string> longest;
+      for (const auto& pair : prefixes_) {
+        longest.push_back(pair.first);
+      }
+      std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
+        return a.length() > b.length();
+      });
+      // Do longest first since this provides the best results.
+      for (const std::string& s : longest) {
+        it = prefixes_.find(s);
+        CHECK(it != prefixes_.end());
+        const std::string& prefix = it->first;
+        int64_t best_savings = 0u;
+        int64_t best_len = -1;
+        for (int64_t len = prefix.length() - 1; len >= 0; --len) {
+          auto found = prefixes_.find(prefix.substr(0, len));
+          if (len != 0 && found == prefixes_.end()) {
+            continue;
+          }
+          // Calculate savings from downgrading the prefix.
+          int64_t savings = kPrefixConstantCost + prefix.length() -
+              (prefix.length() - len) * it->second;
+          if (savings > best_savings) {
+            best_savings = savings;
+            best_len = len;
+            break;
+          }
+        }
+        if (best_len != -1) {
+          prefixes_[prefix.substr(0, best_len)] += it->second;
+          it = prefixes_.erase(it);
+          optimization_savings_ += best_savings;
+          have_savings = true;
+        } else {
+          ++it;
+        }
+      }
+      if (!have_savings) {
+        break;
+      }
+    }
+    for (auto&& pair : prefixes_) {
+      prefix_trie->Add(pair.first);
+    }
+  }
+
+  // Count longest prefixes.
+  std::set<std::string> used_prefixes;
+  std::vector<std::string> suffix;
+  for (const std::string& str : strings) {
+    auto pair = prefix_trie->LongestPrefix(str);
+    const size_t len = pair.first;
+    if (len >= kMinPrefixLen) {
+      ++strings_used_prefixed_;
+      total_prefix_savings_ += len;
+      used_prefixes.insert(str.substr(0, len));
+    }
+    suffix.push_back(str.substr(len));
+    if (suffix.back().size() < kShortLen) {
+      ++short_strings_;
+    } else {
+      ++long_strings_;
+    }
+  }
+  std::sort(suffix.begin(), suffix.end());
+  for (const std::string& prefix : used_prefixes) {
+    // 4 bytes for an offset, one for length.
+    auto pair = prefix_trie->LongestPrefix(prefix);
+    CHECK_EQ(pair.first, prefix.length());
+    if (pair.second) {
+      // Only need to add to dictionary if it's a leaf, otherwise we can reuse string data of the
+      // other prefix.
+      total_prefix_dict_ += prefix.size();
+    }
+    total_prefix_table_ += kPrefixConstantCost;
+  }
+  ProcessStrings(suffix, iterations - 1);
+}
+
+void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
+  os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
+  os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
+  os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
+
+  // Prefix based strings.
+  os << "Total shared prefix bytes " << Percent(total_shared_prefix_bytes_, total_size) << "\n";
+  os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
+  os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
+  os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
+  int64_t net_savings = total_prefix_savings_ + short_strings_;
+  net_savings -= total_prefix_dict_;
+  net_savings -= total_prefix_table_;
+  net_savings -= total_prefix_index_cost_;
+  os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
+  os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
+  os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
+  os << "Strings using prefix "
+     << Percent(strings_used_prefixed_, total_prefix_index_cost_ / kPrefixIndexCost) << "\n";
+  os << "Short strings " << Percent(short_strings_, short_strings_ + long_strings_) << "\n";
+  if (verbose_level_ >= VerboseLevel::kEverything) {
+    std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
+    // Sort lexicographically.
+    std::sort(pairs.begin(), pairs.end());
+    for (const auto& pair : pairs) {
+      os << pair.first << " : " << pair.second << "\n";
+    }
+  }
+}
+
+}  // namespace dexanalyze
+}  // namespace art
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
new file mode 100644
index 0000000..32702a6
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
+#define ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
+
+#include <array>
+#include <vector>
+#include <map>
+
+#include "base/safe_map.h"
+#include "dexanalyze_experiments.h"
+#include "dex/code_item_accessors.h"
+#include "dex/utf-inl.h"
+
+namespace art {
+namespace dexanalyze {
+
+// Analyze string data and strings accessed from code.
+class AnalyzeStrings : public Experiment {
+ public:
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
+
+ private:
+  void ProcessStrings(const std::vector<std::string>& strings, size_t iterations);
+
+  int64_t wide_string_bytes_ = 0u;
+  int64_t ascii_string_bytes_ = 0u;
+  int64_t string_data_bytes_ = 0u;
+  int64_t total_shared_prefix_bytes_ = 0u;
+  int64_t total_prefix_savings_ = 0u;
+  int64_t total_prefix_dict_ = 0u;
+  int64_t total_prefix_table_ = 0u;
+  int64_t total_prefix_index_cost_ = 0u;
+  int64_t total_num_prefixes_ = 0u;
+  int64_t optimization_savings_ = 0u;
+  int64_t strings_used_prefixed_ = 0u;
+  int64_t short_strings_ = 0u;
+  int64_t long_strings_ = 0u;
+  std::unordered_map<std::string, size_t> prefixes_;
+};
+
+}  // namespace dexanalyze
+}  // namespace art
+
+#endif  // ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
diff --git a/tools/field-null-percent/Android.bp b/tools/field-null-percent/Android.bp
new file mode 100644
index 0000000..26bb1dc
--- /dev/null
+++ b/tools/field-null-percent/Android.bp
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+    name: "fieldnull-defaults",
+    host_supported: true,
+    srcs: ["fieldnull.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    header_libs: [
+        "libopenjdkjvmti_headers",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+art_cc_library {
+    name: "libfieldnull",
+    defaults: ["fieldnull-defaults"],
+}
+
+art_cc_library {
+    name: "libfieldnulld",
+    defaults: [
+        "art_debug_defaults",
+        "fieldnull-defaults",
+    ],
+}
diff --git a/tools/field-null-percent/README.md b/tools/field-null-percent/README.md
new file mode 100644
index 0000000..d8bc65d
--- /dev/null
+++ b/tools/field-null-percent/README.md
@@ -0,0 +1,51 @@
+# fieldnull
+
+fieldnull is a JVMTI agent designed for testing for a given field the number of
+instances with that field set to null. This can be useful for determining what
+fields should be moved into side structures in cases where memory use is
+important.
+
+# Usage
+### Build
+>    `make libfieldnull`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+>     `Lname/of/class;.nameOfField:Ltype/of/field;[,...]`
+
+#### ART
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libfieldnull.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+>    `adb shell setenforce 0`
+>
+>    `adb push $ANDROID_PRODUCT_OUT/system/lib64/libfieldnull.so /data/local/tmp/`
+>
+>    `adb shell am start-activity --attach-agent '/data/local/tmp/libfieldnull.so=Ljava/lang/Class;.name:Ljava/lang/String;' some.debuggable.apps/.the.app.MainActivity`
+
+#### RI
+>    `java '-agentpath:libfieldnull.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/helloworld/classes helloworld`
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+>    `kill -SIGQUIT $(pid com.littleinc.orm_benchmark)`
+
+Will initiate a dump of the counts (to logcat).
+
+The dump will look something like this.
+
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:96] Dumping counts of null fields.`
+>
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:97] 	Field name	null count	total count`
+>
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:135] 	Ljava/lang/Class;.name:Ljava/lang/String;	5	2936`
diff --git a/tools/field-null-percent/fieldnull.cc b/tools/field-null-percent/fieldnull.cc
new file mode 100644
index 0000000..86459d2
--- /dev/null
+++ b/tools/field-null-percent/fieldnull.cc
@@ -0,0 +1,218 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+
+#include <atomic>
+#include <iomanip>
+#include <iostream>
+#include <istream>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <sstream>
+#include <string.h>
+#include <string>
+#include <vector>
+
+namespace fieldnull {
+
+#define CHECK_JVMTI(x) CHECK_EQ((x), JVMTI_ERROR_NONE)
+
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+static JavaVM* java_vm = nullptr;
+
+// Field is "Lclass/name/here;.field_name:Lfield/type/here;"
+static std::pair<jclass, jfieldID> SplitField(JNIEnv* env, const std::string& field_id) {
+  CHECK_EQ(field_id[0], 'L');
+  env->PushLocalFrame(1);
+  std::istringstream is(field_id);
+  std::string class_name;
+  std::string field_name;
+  std::string field_type;
+
+  std::getline(is, class_name, '.');
+  std::getline(is, field_name, ':');
+  std::getline(is, field_type, '\0');
+
+  jclass klass = reinterpret_cast<jclass>(
+      env->NewGlobalRef(env->FindClass(class_name.substr(1, class_name.size() - 2).c_str())));
+  jfieldID field = env->GetFieldID(klass, field_name.c_str(), field_type.c_str());
+  CHECK(klass != nullptr);
+  CHECK(field != nullptr);
+  LOG(INFO) << "listing field " << field_id;
+  env->PopLocalFrame(nullptr);
+  return std::make_pair(klass, field);
+}
+
+static std::vector<std::pair<jclass, jfieldID>> GetRequestedFields(JNIEnv* env,
+                                                                   const std::string& args) {
+  std::vector<std::pair<jclass, jfieldID>> res;
+  std::stringstream args_stream(args);
+  std::string item;
+  while (std::getline(args_stream, item, ',')) {
+    if (item == "") {
+      continue;
+    }
+    res.push_back(SplitField(env, item));
+  }
+  return res;
+}
+
+static jint SetupJvmtiEnv(JavaVM* vm, jvmtiEnv** jvmti) {
+  jint res = 0;
+  res = vm->GetEnv(reinterpret_cast<void**>(jvmti), JVMTI_VERSION_1_1);
+
+  if (res != JNI_OK || *jvmti == nullptr) {
+    LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+    return vm->GetEnv(reinterpret_cast<void**>(jvmti), kArtTiVersion);
+  }
+  return res;
+}
+
+struct RequestList {
+  std::vector<std::pair<jclass, jfieldID>> fields_;
+};
+
+static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+  JNIEnv* env = nullptr;
+  CHECK_EQ(java_vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+  LOG(INFO) << "Dumping counts of null fields.";
+  LOG(INFO) << "\t" << "Field name"
+            << "\t" << "null count"
+            << "\t" << "total count";
+  RequestList* list;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+  for (std::pair<jclass, jfieldID>& p : list->fields_) {
+    jclass klass = p.first;
+    jfieldID field = p.second;
+    // Make sure all instances of the class are tagged with the klass ptr value. Since this is a
+    // global ref it's guaranteed to be unique.
+    CHECK_JVMTI(jvmti->IterateOverInstancesOfClass(
+        p.first,
+        // We need to do this to all objects every time since we might be looking for multiple
+        // fields in classes that are subtypes of each other.
+        JVMTI_HEAP_OBJECT_EITHER,
+        /* class_tag, size, tag_ptr, user_data*/
+        [](jlong, jlong, jlong* tag_ptr, void* klass) -> jvmtiIterationControl {
+          *tag_ptr = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+          return JVMTI_ITERATION_CONTINUE;
+        },
+        klass));
+    jobject* obj_list;
+    jint obj_len;
+    jlong tag = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+    CHECK_JVMTI(jvmti->GetObjectsWithTags(1, &tag, &obj_len, &obj_list, nullptr));
+
+    uint64_t null_cnt = 0;
+    for (jint i = 0; i < obj_len; i++) {
+      if (env->GetObjectField(obj_list[i], field) == nullptr) {
+        null_cnt++;
+      }
+    }
+
+    char* field_name;
+    char* field_sig;
+    char* class_name;
+    CHECK_JVMTI(jvmti->GetFieldName(klass, field, &field_name, &field_sig, nullptr));
+    CHECK_JVMTI(jvmti->GetClassSignature(klass, &class_name, nullptr));
+    LOG(INFO) << "\t" << class_name << "." << field_name << ":" << field_sig
+              << "\t" << null_cnt
+              << "\t" << obj_len;
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_name)));
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_sig)));
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(class_name)));
+  }
+}
+
+static void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+  DataDumpRequestCb(jvmti);
+  RequestList* list = nullptr;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+  delete list;
+}
+
+static void CreateFieldList(jvmtiEnv* jvmti, JNIEnv* env, std::string args) {
+  RequestList* list = nullptr;
+  CHECK_JVMTI(jvmti->Allocate(sizeof(*list), reinterpret_cast<unsigned char**>(&list)));
+  new (list) RequestList { .fields_ = GetRequestedFields(env, args), };
+  CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(list));
+}
+
+static void VMInitCb(jvmtiEnv* jvmti, JNIEnv* env, jobject thr ATTRIBUTE_UNUSED) {
+  char* args = nullptr;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&args)));
+  CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(nullptr));
+  CreateFieldList(jvmti, env, args);
+  CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+  CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+                                              JVMTI_EVENT_DATA_DUMP_REQUEST,
+                                              nullptr));
+  CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(args)));
+}
+
+static jint AgentStart(JavaVM* vm, char* options, bool is_onload) {
+  android::base::InitLogging(/* argv */nullptr);
+  java_vm = vm;
+  jvmtiEnv* jvmti = nullptr;
+  if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
+    LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+    return JNI_ERR;
+  }
+  jvmtiCapabilities caps { .can_tag_objects = 1, };
+  CHECK_JVMTI(jvmti->AddCapabilities(&caps));
+  jvmtiEventCallbacks cb {
+    .VMInit = VMInitCb,
+    .DataDumpRequest = DataDumpRequestCb,
+    .VMDeath = VMDeathCb,
+  };
+  CHECK_JVMTI(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+  if (is_onload) {
+    unsigned char* ptr = nullptr;
+    CHECK_JVMTI(jvmti->Allocate(strlen(options) + 1, &ptr));
+    strcpy(reinterpret_cast<char*>(ptr), options);
+    CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(ptr));
+    CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+  } else {
+    JNIEnv* env = nullptr;
+    CHECK_EQ(vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+    CreateFieldList(jvmti, env, options);
+    CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+    CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+                                                JVMTI_EVENT_DATA_DUMP_REQUEST,
+                                                nullptr));
+  }
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm,
+                                                 char* options,
+                                                 void* reserved ATTRIBUTE_UNUSED) {
+  return AgentStart(vm, options, /*is_onload*/false);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm,
+                                               char* options,
+                                               void* reserved ATTRIBUTE_UNUSED) {
+  return AgentStart(jvm, options, /*is_onload*/true);
+}
+
+}  // namespace fieldnull
+
diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh
index 921a8cb..45c9125 100755
--- a/tools/golem/build-target.sh
+++ b/tools/golem/build-target.sh
@@ -367,7 +367,7 @@
     dirs_rooted+=("$root_dir/$tar_dir")
   done
 
-  execute tar -czf "${tarball}" "${dirs_rooted[@]}" --exclude .git --exclude .gitignore
+  execute tar -czf "${tarball}" --exclude ".git" --exclude ".gitignore" "${dirs_rooted[@]}"
   tar_result=$?
   if [[ $tar_result -ne 0 ]]; then
     [[ -f $tarball ]] && rm $tarball
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index bf8a1b7..6d9b6fb 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -235,7 +235,7 @@
   const bool is_method_;
 };
 
-class ClassPath FINAL {
+class ClassPath final {
  public:
   ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
     OpenDexFiles(dex_paths, open_writable);
@@ -316,7 +316,7 @@
   std::vector<std::unique_ptr<const DexFile>> dex_files_;
 };
 
-class HierarchyClass FINAL {
+class HierarchyClass final {
  public:
   HierarchyClass() {}
 
@@ -455,7 +455,7 @@
   std::vector<HierarchyClass*> extended_by_;
 };
 
-class Hierarchy FINAL {
+class Hierarchy final {
  public:
   explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
     BuildClassHierarchy();
@@ -559,7 +559,7 @@
   std::map<std::string, HierarchyClass> classes_;
 };
 
-class HiddenApi FINAL {
+class HiddenApi final {
  public:
   HiddenApi() {}
 
diff --git a/tools/jfuzz/Android.bp b/tools/jfuzz/Android.bp
new file mode 100644
index 0000000..f0d8b37
--- /dev/null
+++ b/tools/jfuzz/Android.bp
@@ -0,0 +1,29 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Fuzzer tool.
+cc_binary_host {
+    name: "jfuzz",
+    srcs: ["jfuzz.cc"],
+    cflags: [
+        "-O0",
+        "-g",
+        "-Wall",
+    ],
+    target: {
+        windows: {
+            enabled: true,
+        },
+    },
+}
diff --git a/tools/jfuzz/Android.mk b/tools/jfuzz/Android.mk
deleted file mode 100644
index c7002d6..0000000
--- a/tools/jfuzz/Android.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fuzzer tool.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := jfuzz.cc
-LOCAL_CFLAGS += -O0 -g -Wall
-LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := jfuzz
-include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 264217e..3ef78d5 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -224,5 +224,15 @@
           "libcore.javax.crypto.CipherInputStreamTest#testCloseTwice",
           "libcore.libcore.io.BlockGuardOsTest#test_android_getaddrinfo_networkPolicy",
           "libcore.libcore.io.BlockGuardOsTest#test_checkNewMethodsInPosix"]
+},
+{
+  description: "fdsan doesn't exist on the host",
+  result: EXEC_FAILED,
+  modes: [host],
+  bug: 113177877,
+  names: ["libcore.libcore.io.FdsanTest#testFileInputStream",
+          "libcore.libcore.io.FdsanTest#testFileOutputStream",
+          "libcore.libcore.io.FdsanTest#testRandomAccessFile",
+          "libcore.libcore.io.FdsanTest#testParcelFileDescriptor"]
 }
 ]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 942a4e0..23533af 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -10,14 +10,26 @@
   modes: [device],
   names: ["jsr166.CompletableFutureTest#testCompleteOnTimeout_completed",
           "jsr166.CompletableFutureTest#testDelayedExecutor",
+          "jsr166.ExecutorsTest#testTimedCallable",
+          "jsr166.RecursiveActionTest#testJoinIgnoresInterruptsOutsideForkJoinPool",
           "libcore.libcore.icu.TransliteratorTest#testAll",
           "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045",
           "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25883157",
           "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
+          "libcore.java.text.DecimalFormatTest#testWhitespaceError",
+          "libcore.java.text.DecimalFormatTest#testWhitespaceTolerated",
+          "libcore.java.text.DecimalFormatTest#test_exponentSeparator",
+          "libcore.java.text.DecimalFormatTest#test_setMaximumFractionDigitsAffectsRoundingMode",
+          "libcore.java.util.jar.OldJarFileTest#test_ConstructorLjava_io_File",
+          "libcore.java.util.jar.OldJarFileTest#test_ConstructorLjava_lang_StringZ",
+          "libcore.java.util.jar.OldJarInputStreamTest#test_read$ZII",
           "libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
           "libcore.javax.crypto.CipherBasicsTest#testBasicEncryption",
+          "org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_removeJ",
           "org.apache.harmony.tests.java.text.MessageFormatTest#test_parseLjava_lang_String",
-          "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
+          "org.apache.harmony.tests.java.util.ControlTest#test_toBundleName_LStringLLocale",
+          "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"
+        ]
 },
 {
   description: "Sometimes times out with gcstress and debug.",
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index a435f2e..b0b5810 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -311,10 +311,10 @@
   vm_args="$vm_args --vm-arg $plugin"
 fi
 
-if $use_jit; then
-  vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
-  debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
-fi
+# Because we're running debuggable, we discard any AOT code.
+# Therefore we run de2oat with 'quicken' to avoid spending time compiling.
+vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
+debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
 
 if $instant_jit; then
   debuggee_args="$debuggee_args -Xjitthreshold:0"
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index ed6ac3d..4ea5b2d 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -38,7 +38,7 @@
 static constexpr bool kNeedsInterpreter = false;
 #endif  // TRACEFAST_INITERPRETER
 
-class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+class Tracer final : public art::instrumentation::InstrumentationListener {
  public:
   Tracer() {}
 
@@ -46,40 +46,40 @@
                      art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                      art::ArtMethod* method ATTRIBUTE_UNUSED,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const art::JValue& return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
                   art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                   art::ArtMethod* method ATTRIBUTE_UNUSED,
                   uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
                  art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                  art::ArtMethod* method ATTRIBUTE_UNUSED,
                  uint32_t dex_pc ATTRIBUTE_UNUSED,
                  art::ArtField* field ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -87,7 +87,7 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::ArtField* field ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -95,32 +95,32 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::ArtField* field ATTRIBUTE_UNUSED,
                     const art::JValue& field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
                        art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
                         art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
               art::ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
                                 art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                                 art::ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
                        const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Tracer);
@@ -149,7 +149,7 @@
   TraceFastPhaseCB() {}
 
   void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
       art::ScopedThreadSuspension sts(art::Thread::Current(),
                                       art::ThreadState::kWaitingForMethodTracingStart);
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 9c86024..865b9df 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -192,8 +192,8 @@
     return uses_;
   }
 
-  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
-  void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+  void AnalyzeFieldSet(const Instruction& instruction) override;
 
  private:
   // List of reflection uses found, concrete and abstract.
@@ -212,8 +212,8 @@
     return uses_;
   }
 
-  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
-  void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+  void AnalyzeFieldSet(const Instruction& instruction) override;
 
  private:
   // List of reflection uses found, concrete and abstract.