Use 'final' and 'override' specifiers directly in ART.

Remove all uses of macros 'FINAL' and 'OVERRIDE' and replace them with
'final' and 'override' specifiers. Remove all definitions of these
macros as well, which were located in these files:
- libartbase/base/macros.h
- test/913-heaps/heaps.cc
- test/ti-agent/ti_macros.h

ART is now using C++14; the 'final' and 'override' specifiers have
been introduced in C++11.

Test: mmma art
Change-Id: I256c7758155a71a2940ef2574925a44076feeebf
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index 04e39bf..c51f981 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -46,12 +46,12 @@
       : connection_(connection) {}
 
   // Begin running the debugger.
-  void StartDebugger() OVERRIDE;
+  void StartDebugger() override;
 
   // The debugger should begin shutting down since the runtime is ending.
-  void StopDebugger() OVERRIDE;
+  void StopDebugger() override;
 
-  bool IsDebuggerConfigured() OVERRIDE;
+  bool IsDebuggerConfigured() override;
 
  private:
   AdbConnectionState* connection_;
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 366489c..e6d1564 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -57,9 +57,9 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
-  void SetUp() OVERRIDE;
+  void SetUp() override;
 
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE;
+  void SetUpRuntimeOptions(RuntimeOptions* options) override;
 
   Compiler::Kind GetCompilerKind() const;
   void SetCompilerKind(Compiler::Kind compiler_kind);
@@ -73,7 +73,7 @@
     return CompilerFilter::kDefaultCompilerFilter;
   }
 
-  void TearDown() OVERRIDE;
+  void TearDown() override;
 
   void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index f880280..864ce58 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -104,7 +104,7 @@
   uint32_t packed_fields_;
 };
 
-class CompiledMethod FINAL : public CompiledCode {
+class CompiledMethod final : public CompiledCode {
  public:
   // Constructs a CompiledMethod.
   // Note: Consider using the static allocation methods below that will allocate the CompiledMethod
diff --git a/compiler/debug/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h
index cccca25..63a049b 100644
--- a/compiler/debug/dwarf/debug_abbrev_writer.h
+++ b/compiler/debug/dwarf/debug_abbrev_writer.h
@@ -37,7 +37,7 @@
 // determines all the attributes and their format.
 // It is possible to think of them as type definitions.
 template <typename Vector = std::vector<uint8_t>>
-class DebugAbbrevWriter FINAL : private Writer<Vector> {
+class DebugAbbrevWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/debug/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h
index 89d16f2..b198178 100644
--- a/compiler/debug/dwarf/debug_info_entry_writer.h
+++ b/compiler/debug/dwarf/debug_info_entry_writer.h
@@ -42,7 +42,7 @@
  *   EndTag();
  */
 template <typename Vector = std::vector<uint8_t>>
-class DebugInfoEntryWriter FINAL : private Writer<Vector> {
+class DebugInfoEntryWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h
index b4a4d63..bb4e87f 100644
--- a/compiler/debug/dwarf/debug_line_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_line_opcode_writer.h
@@ -31,7 +31,7 @@
 //  * Keep track of current state and convert absolute values to deltas.
 //  * Divide by header-defined factors as appropriate.
 template<typename Vector = std::vector<uint8_t>>
-class DebugLineOpCodeWriter FINAL : private Writer<Vector> {
+class DebugLineOpCodeWriter final : private Writer<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
  public:
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 8a07e9c..b7117bd 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -26,7 +26,7 @@
 class DexFile;
 class VerificationResults;
 
-class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
+class QuickCompilerCallbacks final : public CompilerCallbacks {
  public:
   explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
       : CompilerCallbacks(mode), dex_files_(nullptr) {}
@@ -34,20 +34,20 @@
   ~QuickCompilerCallbacks() { }
 
   void MethodVerified(verifier::MethodVerifier* verifier)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) override;
 
-  void ClassRejected(ClassReference ref) OVERRIDE;
+  void ClassRejected(ClassReference ref) override;
 
   // We are running in an environment where we can call patchoat safely so we should.
-  bool IsRelocationPossible() OVERRIDE {
+  bool IsRelocationPossible() override {
     return true;
   }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+  verifier::VerifierDeps* GetVerifierDeps() const override {
     return verifier_deps_.get();
   }
 
-  void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+  void SetVerifierDeps(verifier::VerifierDeps* deps) override {
     verifier_deps_.reset(deps);
   }
 
@@ -55,18 +55,18 @@
     verification_results_ = verification_results;
   }
 
-  ClassStatus GetPreviousClassState(ClassReference ref) OVERRIDE;
+  ClassStatus GetPreviousClassState(ClassReference ref) override;
 
   void SetDoesClassUnloading(bool does_class_unloading, CompilerDriver* compiler_driver)
-      OVERRIDE {
+      override {
     does_class_unloading_ = does_class_unloading;
     compiler_driver_ = compiler_driver;
     DCHECK(!does_class_unloading || compiler_driver_ != nullptr);
   }
 
-  void UpdateClassState(ClassReference ref, ClassStatus state) OVERRIDE;
+  void UpdateClassState(ClassReference ref, ClassStatus state) override;
 
-  bool CanUseOatStatusForVerification(mirror::Class* klass) OVERRIDE
+  bool CanUseOatStatusForVerification(mirror::Class* klass) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetDexFiles(const std::vector<const DexFile*>* dex_files) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6eca304..fd7a35f 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -971,7 +971,7 @@
  public:
   ResolveCatchBlockExceptionsClassVisitor() : classes_() {}
 
-  virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  virtual bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     classes_.push_back(c);
     return true;
   }
@@ -1034,7 +1034,7 @@
   explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
       : image_classes_(image_classes) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     std::string temp;
     image_classes_->insert(klass->GetDescriptor(&temp));
     return true;
@@ -1210,7 +1210,7 @@
         : data_(data),
           hs_(hs) {}
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
       StringPiece name(klass->GetDescriptor(&temp));
       if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1475,7 +1475,7 @@
           end_(end),
           fn_(fn) {}
 
-    void Run(Thread* self) OVERRIDE {
+    void Run(Thread* self) override {
       while (true) {
         const size_t index = manager_->NextIndex();
         if (UNLIKELY(index >= end_)) {
@@ -1486,7 +1486,7 @@
       }
     }
 
-    void Finalize() OVERRIDE {
+    void Finalize() override {
       delete this;
     }
 
@@ -1568,7 +1568,7 @@
   explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
       : manager_(manager) {}
 
-  void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+  void Visit(size_t class_def_index) override REQUIRES(!Locks::mutator_lock_) {
     ScopedTrace trace(__FUNCTION__);
     Thread* const self = Thread::Current();
     jobject jclass_loader = manager_->GetClassLoader();
@@ -1667,7 +1667,7 @@
  public:
   explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
   }
-  void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+  void Visit(size_t type_idx) override REQUIRES(!Locks::mutator_lock_) {
   // Class derived values are more complicated, they require the linker and loader.
     ScopedObjectAccess soa(Thread::Current());
     ClassLinker* class_linker = manager_->GetClassLinker();
@@ -1888,7 +1888,7 @@
   VerifyClassVisitor(const ParallelCompilationManager* manager, verifier::HardFailLogMode log_level)
      : manager_(manager), log_level_(log_level) {}
 
-  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2020,7 +2020,7 @@
  public:
   explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
     ScopedTrace trace(__FUNCTION__);
     ScopedObjectAccess soa(Thread::Current());
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2085,7 +2085,7 @@
  public:
   explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-  void Visit(size_t class_def_index) OVERRIDE {
+  void Visit(size_t class_def_index) override {
     ScopedTrace trace(__FUNCTION__);
     jobject jclass_loader = manager_->GetClassLoader();
     const DexFile& dex_file = *manager_->GetDexFile();
@@ -2470,7 +2470,7 @@
   explicit InitializeArrayClassesAndCreateConflictTablesVisitor(VariableSizedHandleScope& hs)
       : hs_(hs) {}
 
-  virtual bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE
+  virtual bool operator()(ObjPtr<mirror::Class> klass) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
       return true;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 2eeb439..fe1568d 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -186,7 +186,7 @@
 
 class CompilerDriverProfileTest : public CompilerDriverTest {
  protected:
-  ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE {
+  ProfileCompilationInfo* GetProfileCompilationInfo() override {
     ScopedObjectAccess soa(Thread::Current());
     std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
 
@@ -200,7 +200,7 @@
     return &profile_info_;
   }
 
-  CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+  CompilerFilter::Filter GetCompilerFilter() const override {
     // Use a profile based filter.
     return CompilerFilter::kSpeedProfile;
   }
@@ -278,7 +278,7 @@
 // which will be used for OatClass.
 class CompilerDriverVerifyTest : public CompilerDriverTest {
  protected:
-  CompilerFilter::Filter GetCompilerFilter() const OVERRIDE {
+  CompilerFilter::Filter GetCompilerFilter() const override {
     return CompilerFilter::kVerify;
   }
 
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 601c914..34aceba 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,7 @@
 enum class InstructionSet;
 class InstructionSetFeatures;
 
-class CompilerOptions FINAL {
+class CompilerOptions final {
  public:
   // Guide heuristics to determine whether to compile method if profile data not available.
   static const size_t kDefaultHugeMethodThreshold = 10000;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3cb4a65..92b9543 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -221,12 +221,12 @@
 
 class JniCompilerTest : public CommonCompilerTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonCompilerTest::SetUp();
     check_generic_jni_ = false;
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     android::ResetNativeLoader();
     CommonCompilerTest::TearDown();
   }
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 249f202..b327898 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -25,24 +25,24 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
 
-class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k32) {}
-  ~ArmManagedRuntimeCallingConvention() OVERRIDE {}
+  ~ArmManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -50,37 +50,37 @@
   DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
 };
 
-class ArmJniCallingConvention FINAL : public JniCallingConvention {
+class ArmJniCallingConvention final : public JniCallingConvention {
  public:
   ArmJniCallingConvention(bool is_static,
                           bool is_synchronized,
                           bool is_critical_native,
                           const char* shorty);
-  ~ArmJniCallingConvention() OVERRIDE {}
+  ~ArmJniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for AAPCS
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  void Next() override;  // Override default behavior for AAPCS
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // AAPCS mandates return values are extended.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   // Padding to ensure longs and doubles are not split in AAPCS
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 5618942..ed0ddeb 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -25,24 +25,24 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
 
-class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~Arm64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -50,36 +50,36 @@
   DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
 };
 
-class Arm64JniCallingConvention FINAL : public JniCallingConvention {
+class Arm64JniCallingConvention final : public JniCallingConvention {
  public:
   Arm64JniCallingConvention(bool is_static,
                             bool is_synchronized,
                             bool is_critical_native,
                             const char* shorty);
-  ~Arm64JniCallingConvention() OVERRIDE {}
+  ~Arm64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // aarch64 calling convention leaves upper bits undefined.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index ad3f118..165fc60 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -27,24 +27,24 @@
 static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
               "Invalid frame pointer size");
 
-class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class MipsManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k32) {}
-  ~MipsManagedRuntimeCallingConvention() OVERRIDE {}
+  ~MipsManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -52,37 +52,37 @@
   DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
 };
 
-class MipsJniCallingConvention FINAL : public JniCallingConvention {
+class MipsJniCallingConvention final : public JniCallingConvention {
  public:
   MipsJniCallingConvention(bool is_static,
                            bool is_synchronized,
                            bool is_critical_native,
                            const char* shorty);
-  ~MipsJniCallingConvention() OVERRIDE {}
+  ~MipsJniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for o32.
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  void Next() override;  // Override default behavior for o32.
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // Mips does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   // Padding to ensure longs and doubles are not split in o32.
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index faedaef..d87f73a 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -27,24 +27,24 @@
 static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
               "Invalid frame pointer size");
 
-class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class Mips64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~Mips64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   ManagedRegisterEntrySpills entry_spills_;
@@ -52,36 +52,36 @@
   DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
 };
 
-class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+class Mips64JniCallingConvention final : public JniCallingConvention {
  public:
   Mips64JniCallingConvention(bool is_static,
                              bool is_synchronized,
                              bool is_critical_native,
                              const char* shorty);
-  ~Mips64JniCallingConvention() OVERRIDE {}
+  ~Mips64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // Mips64 does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return false;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index be83cda..d0c6198 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -25,7 +25,7 @@
 
 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
 
-class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
@@ -33,17 +33,17 @@
                                         shorty,
                                         PointerSize::k32),
         gpr_arg_count_(0) {}
-  ~X86ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~X86ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
 
  private:
   int gpr_arg_count_;
@@ -53,36 +53,36 @@
 };
 
 // Implements the x86 cdecl calling convention.
-class X86JniCallingConvention FINAL : public JniCallingConvention {
+class X86JniCallingConvention final : public JniCallingConvention {
  public:
   X86JniCallingConvention(bool is_static,
                           bool is_synchronized,
                           bool is_critical_native,
                           const char* shorty);
-  ~X86JniCallingConvention() OVERRIDE {}
+  ~X86JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // x86 needs to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index cdba334..dfab41b 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -23,59 +23,59 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+class X86_64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
       : ManagedRuntimeCallingConvention(is_static,
                                         is_synchronized,
                                         shorty,
                                         PointerSize::k64) {}
-  ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+  ~X86_64ManagedRuntimeCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // Managed runtime calling convention
-  ManagedRegister MethodRegister() OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
-  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+  ManagedRegister MethodRegister() override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
+  const ManagedRegisterEntrySpills& EntrySpills() override;
  private:
   ManagedRegisterEntrySpills entry_spills_;
   DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
 };
 
-class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+class X86_64JniCallingConvention final : public JniCallingConvention {
  public:
   X86_64JniCallingConvention(bool is_static,
                              bool is_synchronized,
                              bool is_critical_native,
                              const char* shorty);
-  ~X86_64JniCallingConvention() OVERRIDE {}
+  ~X86_64JniCallingConvention() override {}
   // Calling convention
-  ManagedRegister ReturnRegister() OVERRIDE;
-  ManagedRegister IntReturnRegister() OVERRIDE;
-  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  ManagedRegister ReturnRegister() override;
+  ManagedRegister IntReturnRegister() override;
+  ManagedRegister InterproceduralScratchRegister() override;
   // JNI calling convention
-  size_t FrameSize() OVERRIDE;
-  size_t OutArgSize() OVERRIDE;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
-  ManagedRegister ReturnScratchRegister() const OVERRIDE;
-  uint32_t CoreSpillMask() const OVERRIDE;
-  uint32_t FpSpillMask() const OVERRIDE;
-  bool IsCurrentParamInRegister() OVERRIDE;
-  bool IsCurrentParamOnStack() OVERRIDE;
-  ManagedRegister CurrentParamRegister() OVERRIDE;
-  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  size_t FrameSize() override;
+  size_t OutArgSize() override;
+  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
+  ManagedRegister ReturnScratchRegister() const override;
+  uint32_t CoreSpillMask() const override;
+  uint32_t FpSpillMask() const override;
+  bool IsCurrentParamInRegister() override;
+  bool IsCurrentParamOnStack() override;
+  ManagedRegister CurrentParamRegister() override;
+  FrameOffset CurrentParamStackOffset() override;
 
   // x86-64 needs to extend small return types.
-  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+  bool RequiresSmallResultTypeExtension() const override {
     return true;
   }
 
  protected:
-  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+  size_t NumberOfOutgoingStackArgs() override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
diff --git a/compiler/linker/buffered_output_stream.h b/compiler/linker/buffered_output_stream.h
index 512409c..cb1c44b 100644
--- a/compiler/linker/buffered_output_stream.h
+++ b/compiler/linker/buffered_output_stream.h
@@ -26,17 +26,17 @@
 namespace art {
 namespace linker {
 
-class BufferedOutputStream FINAL : public OutputStream {
+class BufferedOutputStream final : public OutputStream {
  public:
   explicit BufferedOutputStream(std::unique_ptr<OutputStream> out);
 
-  ~BufferedOutputStream() OVERRIDE;
+  ~BufferedOutputStream() override;
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+  bool WriteFully(const void* buffer, size_t byte_count) override;
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE;
+  bool Flush() override;
 
  private:
   static const size_t kBufferSize = 8 * KB;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 974c590..81ecc17 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -75,7 +75,7 @@
 // The debug sections are written last for easier stripping.
 //
 template <typename ElfTypes>
-class ElfBuilder FINAL {
+class ElfBuilder final {
  public:
   static constexpr size_t kMaxProgramHeaders = 16;
   // SHA-1 digest.  Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid
@@ -173,21 +173,21 @@
 
     // This function always succeeds to simplify code.
     // Use builder's Good() to check the actual status.
-    bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+    bool WriteFully(const void* buffer, size_t byte_count) override {
       CHECK(owner_->current_section_ == this);
       return owner_->stream_.WriteFully(buffer, byte_count);
     }
 
     // This function always succeeds to simplify code.
     // Use builder's Good() to check the actual status.
-    off_t Seek(off_t offset, Whence whence) OVERRIDE {
+    off_t Seek(off_t offset, Whence whence) override {
       // Forward the seek as-is and trust the caller to use it reasonably.
       return owner_->stream_.Seek(offset, whence);
     }
 
     // This function flushes the output and returns whether it succeeded.
     // If there was a previous failure, this does nothing and returns false, i.e. failed.
-    bool Flush() OVERRIDE {
+    bool Flush() override {
       return owner_->stream_.Flush();
     }
 
@@ -271,7 +271,7 @@
   };
 
   // Writer of .dynstr section.
-  class CachedStringSection FINAL : public CachedSection {
+  class CachedStringSection final : public CachedSection {
    public:
     CachedStringSection(ElfBuilder<ElfTypes>* owner,
                         const std::string& name,
@@ -295,7 +295,7 @@
   };
 
   // Writer of .strtab and .shstrtab sections.
-  class StringSection FINAL : public Section {
+  class StringSection final : public Section {
    public:
     StringSection(ElfBuilder<ElfTypes>* owner,
                   const std::string& name,
@@ -338,7 +338,7 @@
   };
 
   // Writer of .dynsym and .symtab sections.
-  class SymbolSection FINAL : public Section {
+  class SymbolSection final : public Section {
    public:
     SymbolSection(ElfBuilder<ElfTypes>* owner,
                   const std::string& name,
@@ -410,7 +410,7 @@
     std::vector<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
   };
 
-  class AbiflagsSection FINAL : public Section {
+  class AbiflagsSection final : public Section {
    public:
     // Section with Mips abiflag info.
     static constexpr uint8_t MIPS_AFL_REG_NONE =         0;  // no registers
@@ -480,7 +480,7 @@
     } abiflags_;
   };
 
-  class BuildIdSection FINAL : public Section {
+  class BuildIdSection final : public Section {
    public:
     BuildIdSection(ElfBuilder<ElfTypes>* owner,
                    const std::string& name,
diff --git a/compiler/linker/error_delaying_output_stream.h b/compiler/linker/error_delaying_output_stream.h
index 659f1dc..cadd71c 100644
--- a/compiler/linker/error_delaying_output_stream.h
+++ b/compiler/linker/error_delaying_output_stream.h
@@ -27,7 +27,7 @@
 namespace linker {
 
 // OutputStream wrapper that delays reporting an error until Flush().
-class ErrorDelayingOutputStream FINAL : public OutputStream {
+class ErrorDelayingOutputStream final : public OutputStream {
  public:
   explicit ErrorDelayingOutputStream(OutputStream* output)
       : OutputStream(output->GetLocation()),
@@ -37,7 +37,7 @@
 
   // This function always succeeds to simplify code.
   // Use Good() to check the actual status of the output stream.
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     if (output_good_) {
       if (!output_->WriteFully(buffer, byte_count)) {
         PLOG(ERROR) << "Failed to write " << byte_count
@@ -51,7 +51,7 @@
 
   // This function always succeeds to simplify code.
   // Use Good() to check the actual status of the output stream.
-  off_t Seek(off_t offset, Whence whence) OVERRIDE {
+  off_t Seek(off_t offset, Whence whence) override {
     // We keep shadow copy of the offset so that we return
     // the expected value even if the output stream failed.
     off_t new_offset;
@@ -81,7 +81,7 @@
 
   // Flush the output and return whether all operations have succeeded.
   // Do nothing if we already have a pending error.
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     if (output_good_) {
       output_good_ = output_->Flush();
     }
diff --git a/compiler/linker/file_output_stream.h b/compiler/linker/file_output_stream.h
index deb051f..1417132 100644
--- a/compiler/linker/file_output_stream.h
+++ b/compiler/linker/file_output_stream.h
@@ -24,17 +24,17 @@
 namespace art {
 namespace linker {
 
-class FileOutputStream FINAL : public OutputStream {
+class FileOutputStream final : public OutputStream {
  public:
   explicit FileOutputStream(File* file);
 
-  ~FileOutputStream() OVERRIDE {}
+  ~FileOutputStream() override {}
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE;
+  bool WriteFully(const void* buffer, size_t byte_count) override;
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE;
+  bool Flush() override;
 
  private:
   File* const file_;
diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc
index f93ea7a..bcb129c 100644
--- a/compiler/linker/output_stream_test.cc
+++ b/compiler/linker/output_stream_test.cc
@@ -106,20 +106,20 @@
     CheckingOutputStream()
         : OutputStream("dummy"),
           flush_called(false) { }
-    ~CheckingOutputStream() OVERRIDE {}
+    ~CheckingOutputStream() override {}
 
     bool WriteFully(const void* buffer ATTRIBUTE_UNUSED,
-                    size_t byte_count ATTRIBUTE_UNUSED) OVERRIDE {
+                    size_t byte_count ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
     }
 
-    off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) OVERRIDE {
+    off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
     }
 
-    bool Flush() OVERRIDE {
+    bool Flush() override {
       flush_called = true;
       return true;
     }
diff --git a/compiler/linker/vector_output_stream.h b/compiler/linker/vector_output_stream.h
index 92caf59..0d34da6 100644
--- a/compiler/linker/vector_output_stream.h
+++ b/compiler/linker/vector_output_stream.h
@@ -26,13 +26,13 @@
 namespace art {
 namespace linker {
 
-class VectorOutputStream FINAL : public OutputStream {
+class VectorOutputStream final : public OutputStream {
  public:
   VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
 
-  ~VectorOutputStream() OVERRIDE {}
+  ~VectorOutputStream() override {}
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     if (static_cast<size_t>(offset_) == vector_->size()) {
       const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
       vector_->insert(vector_->end(), &start[0], &start[byte_count]);
@@ -46,9 +46,9 @@
     return true;
   }
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE;
+  off_t Seek(off_t offset, Whence whence) override;
 
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     return true;
   }
 
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dfefa52..1c3660c 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -388,10 +388,10 @@
     return induction_variable_->GetBlock();
   }
 
-  MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
+  MonotonicValueRange* AsMonotonicValueRange() override { return this; }
 
   // If it's certain that this value range fits in other_range.
-  bool FitsIn(ValueRange* other_range) const OVERRIDE {
+  bool FitsIn(ValueRange* other_range) const override {
     if (other_range == nullptr) {
       return true;
     }
@@ -402,7 +402,7 @@
   // Try to narrow this MonotonicValueRange given another range.
   // Ideally it will return a normal ValueRange. But due to
   // possible overflow/underflow, that may not be possible.
-  ValueRange* Narrow(ValueRange* range) OVERRIDE {
+  ValueRange* Narrow(ValueRange* range) override {
     if (range == nullptr) {
       return this;
     }
@@ -530,7 +530,7 @@
         induction_range_(induction_analysis),
         next_(nullptr) {}
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     DCHECK(!IsAddedBlock(block));
     first_index_bounds_check_map_.clear();
     // Visit phis and instructions using a safe iterator. The iteration protects
@@ -820,7 +820,7 @@
     }
   }
 
-  void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
     HBasicBlock* block = bounds_check->GetBlock();
     HInstruction* index = bounds_check->InputAt(0);
     HInstruction* array_length = bounds_check->InputAt(1);
@@ -945,7 +945,7 @@
     return true;
   }
 
-  void VisitPhi(HPhi* phi) OVERRIDE {
+  void VisitPhi(HPhi* phi) override {
     if (phi->IsLoopHeaderPhi()
         && (phi->GetType() == DataType::Type::kInt32)
         && HasSameInputAtBackEdges(phi)) {
@@ -992,14 +992,14 @@
     }
   }
 
-  void VisitIf(HIf* instruction) OVERRIDE {
+  void VisitIf(HIf* instruction) override {
     if (instruction->InputAt(0)->IsCondition()) {
       HCondition* cond = instruction->InputAt(0)->AsCondition();
       HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition());
     }
   }
 
-  void VisitAdd(HAdd* add) OVERRIDE {
+  void VisitAdd(HAdd* add) override {
     HInstruction* right = add->GetRight();
     if (right->IsIntConstant()) {
       ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
@@ -1013,7 +1013,7 @@
     }
   }
 
-  void VisitSub(HSub* sub) OVERRIDE {
+  void VisitSub(HSub* sub) override {
     HInstruction* left = sub->GetLeft();
     HInstruction* right = sub->GetRight();
     if (right->IsIntConstant()) {
@@ -1115,19 +1115,19 @@
     }
   }
 
-  void VisitDiv(HDiv* div) OVERRIDE {
+  void VisitDiv(HDiv* div) override {
     FindAndHandlePartialArrayLength(div);
   }
 
-  void VisitShr(HShr* shr) OVERRIDE {
+  void VisitShr(HShr* shr) override {
     FindAndHandlePartialArrayLength(shr);
   }
 
-  void VisitUShr(HUShr* ushr) OVERRIDE {
+  void VisitUShr(HUShr* ushr) override {
     FindAndHandlePartialArrayLength(ushr);
   }
 
-  void VisitAnd(HAnd* instruction) OVERRIDE {
+  void VisitAnd(HAnd* instruction) override {
     if (instruction->GetRight()->IsIntConstant()) {
       int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
       if (constant > 0) {
@@ -1142,7 +1142,7 @@
     }
   }
 
-  void VisitRem(HRem* instruction) OVERRIDE {
+  void VisitRem(HRem* instruction) override {
     HInstruction* left = instruction->GetLeft();
     HInstruction* right = instruction->GetRight();
 
@@ -1202,7 +1202,7 @@
     }
   }
 
-  void VisitNewArray(HNewArray* new_array) OVERRIDE {
+  void VisitNewArray(HNewArray* new_array) override {
     HInstruction* len = new_array->GetLength();
     if (!len->IsIntConstant()) {
       HInstruction *left;
@@ -1240,7 +1240,7 @@
     * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
     * unsafe to hoist array references across their deoptimization instruction inside a loop.
     */
-  void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+  void VisitArrayGet(HArrayGet* array_get) override {
     if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
       HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
       if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 92ab798..ef08877 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -34,7 +34,7 @@
         side_effects_(side_effects),
         induction_analysis_(induction_analysis) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kBoundsCheckEliminationPassName = "BCE";
 
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index bdc395b..c6232ef 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -44,9 +44,9 @@
     GetGraph()->SetNumberOfCHAGuards(0);
   }
 
-  void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) OVERRIDE;
+  void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override;
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
  private:
   void RemoveGuard(HShouldDeoptimizeFlag* flag);
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index d2c5a34..440d51a 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -30,7 +30,7 @@
                                 const char* name = kCHAGuardOptimizationPassName)
       : HOptimization(graph, name) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization";
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a460f77..d56f7aa 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -247,7 +247,7 @@
  public:
   explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
@@ -273,9 +273,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
@@ -285,16 +285,16 @@
  public:
   explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
@@ -308,7 +308,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -349,7 +349,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathARM64"; }
 
  private:
   // The class this slow path will load.
@@ -363,7 +363,7 @@
   explicit LoadStringSlowPathARM64(HLoadString* instruction)
       : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -384,7 +384,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
@@ -394,7 +394,7 @@
  public:
   explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -408,9 +408,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
@@ -421,7 +421,7 @@
   SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeARM64(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -445,7 +445,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; }
 
  private:
   // If not null, the block to branch to after the suspend check.
@@ -462,7 +462,7 @@
   TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
 
     DCHECK(instruction_->IsCheckCast()
@@ -503,8 +503,8 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -517,7 +517,7 @@
   explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
       : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -529,7 +529,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
@@ -539,7 +539,7 @@
  public:
   explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -570,7 +570,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathARM64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
@@ -628,7 +628,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
@@ -754,7 +754,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -794,7 +794,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
     DCHECK(locations->CanCall());
@@ -831,7 +831,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4f6a44f..2e7a20b 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -125,8 +125,8 @@
   vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
   vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
 
-  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
-  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
 
  private:
   vixl::aarch64::Label entry_label_;
@@ -216,11 +216,11 @@
   InvokeDexCallingConventionVisitorARM64() {}
   virtual ~InvokeDexCallingConventionVisitorARM64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type return_type) const OVERRIDE {
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type return_type) const override {
     return calling_convention.GetReturnLocation(return_type);
   }
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -232,22 +232,22 @@
  public:
   FieldAccessCallingConventionARM64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return helpers::LocationFrom(vixl::aarch64::x1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return helpers::LocationFrom(vixl::aarch64::x0);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return helpers::LocationFrom(vixl::aarch64::x0);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
-                               bool is_instance) const OVERRIDE {
+                               bool is_instance) const override {
     return is_instance
         ? helpers::LocationFrom(vixl::aarch64::x2)
         : helpers::LocationFrom(vixl::aarch64::x1);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return helpers::LocationFrom(vixl::aarch64::d0);
   }
 
@@ -260,7 +260,7 @@
   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -268,7 +268,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -360,7 +360,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
@@ -368,7 +368,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -393,11 +393,11 @@
       : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
 
  protected:
-  void PrepareForEmitNativeCode() OVERRIDE;
-  void FinishEmitNativeCode() OVERRIDE;
-  Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
-  void FreeScratchLocation(Location loc) OVERRIDE;
-  void EmitMove(size_t index) OVERRIDE;
+  void PrepareForEmitNativeCode() override;
+  void FinishEmitNativeCode() override;
+  Location AllocateScratchLocationFor(Location::Kind kind) override;
+  void FreeScratchLocation(Location loc) override;
+  void EmitMove(size_t index) override;
 
  private:
   Arm64Assembler* GetAssembler() const;
@@ -418,39 +418,39 @@
                      OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorARM64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
   vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
   vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
   vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
     block = FirstNonEmptyBlock(block);
     return &(block_labels_[block->GetBlockId()]);
   }
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kArm64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kArm64WordSize   // 16 bytes == 2 arm64 words for each spill
         : 1 * kArm64WordSize;  //  8 bytes == 1 arm64 words for each spill
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
     DCHECK(block_entry_label->IsBound());
     return block_entry_label->GetLocation();
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  Arm64Assembler* GetAssembler() override { return &assembler_; }
+  const Arm64Assembler& GetAssembler() const override { return assembler_; }
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
 
   // Emit a write barrier.
@@ -462,12 +462,12 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // The number of registers that can be allocated. The register allocator may
   // decide to reserve and not use a few of them.
@@ -479,35 +479,35 @@
   static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm64;
   }
 
   const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_.resize(GetGraph()->GetBlocks().size());
   }
 
   // We want to use the STP and LDP instructions to spill and restore registers for slow paths.
   // These instructions can only encode offsets that are multiples of the register size accessed.
-  uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
+  uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
 
   JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
     return jump_tables_.back().get();
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
   void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
   void Load(DataType::Type type,
             vixl::aarch64::CPURegister dst,
@@ -529,7 +529,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -537,35 +537,35 @@
                                            HInstruction* instruction,
                                            SlowPathCode* slow_path);
 
-  ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return false;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL);
   }
 
@@ -652,13 +652,13 @@
   void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
   void EmitThunkCode(const linker::LinkerPatch& patch,
                      /*out*/ ArenaVector<uint8_t>* code,
-                     /*out*/ std::string* debug_name) OVERRIDE;
+                     /*out*/ std::string* debug_name) override;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Generate a GC root reference load:
   //
@@ -765,10 +765,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop() OVERRIDE;
+  void GenerateNop() override;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
  private:
   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 8c5eafd..3580975 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -383,7 +383,7 @@
  public:
   explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -397,9 +397,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL);
@@ -410,16 +410,16 @@
   explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
@@ -430,7 +430,7 @@
   SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeARMVIXL(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
     arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
@@ -451,7 +451,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; }
 
  private:
   // If not null, the block to branch to after the suspend check.
@@ -468,7 +468,7 @@
   explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
 
@@ -495,9 +495,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
@@ -511,7 +511,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -549,7 +549,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; }
 
  private:
   // The class this slow path will load.
@@ -563,7 +563,7 @@
   explicit LoadStringSlowPathARMVIXL(HLoadString* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -585,7 +585,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL);
@@ -596,7 +596,7 @@
   TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -640,9 +640,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -655,7 +655,7 @@
   explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
       : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     __ Bind(GetEntryLabel());
         LocationSummary* locations = instruction_->GetLocations();
@@ -668,7 +668,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
@@ -678,7 +678,7 @@
  public:
   explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -709,7 +709,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
@@ -744,7 +744,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     vixl32::Register reg_out = RegisterFrom(out_);
@@ -868,7 +868,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathARMVIXL";
   }
 
@@ -910,7 +910,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     vixl32::Register reg_out = RegisterFrom(out_);
     DCHECK(locations->CanCall());
@@ -936,7 +936,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index cb131a7..33502d4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -178,9 +178,9 @@
   InvokeDexCallingConventionVisitorARMVIXL() {}
   virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConventionARMVIXL calling_convention;
@@ -193,25 +193,25 @@
  public:
   FieldAccessCallingConventionARMVIXL() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return helpers::LocationFrom(vixl::aarch32::r1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return helpers::LocationFrom(vixl::aarch32::r0);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
         : helpers::LocationFrom(vixl::aarch32::r0);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
         : (is_instance
             ? helpers::LocationFrom(vixl::aarch32::r2)
             : helpers::LocationFrom(vixl::aarch32::r1));
   }
-  Location GetFpuLocation(DataType::Type type) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
         : helpers::LocationFrom(vixl::aarch32::s0);
@@ -229,8 +229,8 @@
   vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
   vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
 
-  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
-  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
 
  private:
   vixl::aarch32::Label entry_label_;
@@ -244,10 +244,10 @@
   ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   ArmVIXLAssembler* GetAssembler() const;
 
@@ -266,7 +266,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -274,7 +274,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -304,7 +304,7 @@
   InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
@@ -312,7 +312,7 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -432,48 +432,48 @@
                        OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorARMVIXL() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return static_cast<size_t>(kArmPointerSize);
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+  size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
 
-  ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+  ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
 
-  const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+  const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
 
   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
     DCHECK(block_entry_label->IsBound());
     return block_entry_label->GetLocation();
   }
 
   void FixJumpTables();
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
 
   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -495,7 +495,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -519,42 +519,42 @@
 
   vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_.resize(GetGraph()->GetBlocks().size());
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
   }
 
-  void ComputeSpillMask() OVERRIDE;
+  void ComputeSpillMask() override;
 
   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
@@ -604,13 +604,13 @@
   void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
   void EmitThunkCode(const linker::LinkerPatch& patch,
                      /*out*/ ArenaVector<uint8_t>* code,
-                     /*out*/ std::string* debug_name) OVERRIDE;
+                     /*out*/ std::string* debug_name) override;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Generate a GC root reference load:
   //
@@ -722,10 +722,10 @@
   // artReadBarrierForRootSlow.
   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
 
-  void GenerateNop() OVERRIDE;
+  void GenerateNop() override;
 
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index aed334b..d74a7a7 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -176,7 +176,7 @@
  public:
   explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
@@ -201,9 +201,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
@@ -213,16 +213,16 @@
  public:
   explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
@@ -236,7 +236,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -280,7 +280,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; }
 
  private:
   // The class this slow path will load.
@@ -294,7 +294,7 @@
   explicit LoadStringSlowPathMIPS(HLoadString* instruction)
       : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -318,7 +318,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
@@ -328,7 +328,7 @@
  public:
   explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -342,9 +342,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
@@ -355,7 +355,7 @@
   SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeMIPS(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
@@ -375,7 +375,7 @@
     return &return_label_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; }
 
   HBasicBlock* GetSuccessor() const {
     return successor_;
@@ -396,7 +396,7 @@
   explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
@@ -435,9 +435,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -450,7 +450,7 @@
   explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
     : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -462,7 +462,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
@@ -472,7 +472,7 @@
  public:
   explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -503,7 +503,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
@@ -533,9 +533,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -627,11 +627,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -798,7 +798,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
@@ -922,7 +922,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -965,7 +965,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -995,7 +995,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 4830ac9..bf95893 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -81,9 +81,9 @@
   InvokeDexCallingConventionVisitorMIPS() {}
   virtual ~InvokeDexCallingConventionVisitorMIPS() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -110,23 +110,23 @@
  public:
   FieldAccessCallingConventionMIPS() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(A1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(A0);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(V0, V1)
         : Location::RegisterLocation(V0);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(A2, A3)
         : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(F0);
   }
 
@@ -139,10 +139,10 @@
   ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   void Exchange(int index1, int index2, bool double_slot);
   void ExchangeQuadSlots(int index1, int index2);
@@ -176,14 +176,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -210,14 +210,14 @@
   InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -374,35 +374,35 @@
                     OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorMIPS() {}
 
-  void ComputeSpillMask() OVERRIDE;
-  bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void ComputeSpillMask() override;
+  bool HasAllocatedCalleeSaveRegisters() const override;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
   void MoveConstant(Location location, HConstant* c);
 
-  size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+  size_t GetWordSize() const override { return kMipsWordSize; }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kMipsDoublewordSize   // 16 bytes for each spill.
         : 1 * kMipsDoublewordSize;  //  8 bytes for each spill.
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return assembler_.GetLabelLocation(GetLabelOf(block));
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  MipsAssembler* GetAssembler() override { return &assembler_; }
+  const MipsAssembler& GetAssembler() const override { return assembler_; }
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -493,20 +493,20 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
   void ClobberRA() {
     clobbered_ra_ = true;
   }
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; }
 
   const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -514,25 +514,25 @@
     return CommonGetLabelOf<MipsLabel>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<MipsLabel>();
   }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
 
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
 
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
 
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -543,41 +543,41 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kInt64;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 72318e9..7c89808 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -128,7 +128,7 @@
  public:
   explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -153,9 +153,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
@@ -166,16 +166,16 @@
   explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction)
       : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
     mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
@@ -189,7 +189,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -233,7 +233,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; }
 
  private:
   // The class this slow path will load.
@@ -247,7 +247,7 @@
   explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
       : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     DCHECK(instruction_->IsLoadString());
     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
     LocationSummary* locations = instruction_->GetLocations();
@@ -274,7 +274,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
@@ -284,7 +284,7 @@
  public:
   explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -298,9 +298,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
@@ -311,7 +311,7 @@
   SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCodeMIPS64(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -331,7 +331,7 @@
     return &return_label_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; }
 
   HBasicBlock* GetSuccessor() const {
     return successor_;
@@ -352,7 +352,7 @@
   explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
       : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
 
     uint32_t dex_pc = instruction_->GetDexPc();
@@ -392,9 +392,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -407,7 +407,7 @@
   explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
     : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
       LocationSummary* locations = instruction_->GetLocations();
@@ -419,7 +419,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
@@ -429,7 +429,7 @@
  public:
   explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -460,7 +460,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
@@ -490,9 +490,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
     DCHECK(locations->CanCall());
@@ -583,11 +583,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
     DCHECK(locations->CanCall());
@@ -744,7 +744,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
@@ -864,7 +864,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathMIPS64";
   }
 
@@ -909,7 +909,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DataType::Type type = DataType::Type::kReference;
     GpuRegister reg_out = out_.AsRegister<GpuRegister>();
@@ -938,7 +938,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; }
 
  private:
   const Location out_;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index fc0908b..ddc154d 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -79,9 +79,9 @@
   InvokeDexCallingConventionVisitorMIPS64() {}
   virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -108,22 +108,22 @@
  public:
   FieldAccessCallingConventionMIPS64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(A1);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(A0);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::RegisterLocation(V0);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
-                               bool is_instance) const OVERRIDE {
+                               bool is_instance) const override {
     return is_instance
         ? Location::RegisterLocation(A2)
         : Location::RegisterLocation(A1);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(F0);
   }
 
@@ -136,10 +136,10 @@
   ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   void Exchange(int index1, int index2, bool double_slot);
   void ExchangeQuadSlots(int index1, int index2);
@@ -173,14 +173,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -207,14 +207,14 @@
   InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -356,31 +356,31 @@
                       OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorMIPS64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
 
-  void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) override;
 
-  size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
+  size_t GetWordSize() const override { return kMips64DoublewordSize; }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kMips64DoublewordSize   // 16 bytes for each spill.
         : 1 * kMips64DoublewordSize;  //  8 bytes for each spill.
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return assembler_.GetLabelLocation(GetLabelOf(block));
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
-  const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+  Mips64Assembler* GetAssembler() override { return &assembler_; }
+  const Mips64Assembler& GetAssembler() const override { return assembler_; }
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -471,17 +471,17 @@
 
   // Register allocation.
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
+  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; }
 
   const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
 
@@ -489,22 +489,22 @@
     return CommonGetLabelOf<Mips64Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Mips64Label>();
   }
 
   // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
   // at aligned locations.
-  uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+  uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; }
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Code generation helpers.
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
 
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveConstant(Location destination, int32_t value) override;
 
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
 
   void SwapLocations(Location loc1, Location loc2, DataType::Type type);
@@ -513,7 +513,7 @@
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -523,39 +523,39 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+                              DataType::Type type ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index df00ec7..6a27081 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -72,7 +72,7 @@
  public:
   explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -86,9 +86,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
@@ -98,16 +98,16 @@
  public:
   explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
@@ -118,7 +118,7 @@
   DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div)
       : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     __ Bind(GetEntryLabel());
     if (is_div_) {
       __ negl(reg_);
@@ -128,7 +128,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86"; }
+  const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; }
 
  private:
   Register reg_;
@@ -140,7 +140,7 @@
  public:
   explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -187,9 +187,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
@@ -200,7 +200,7 @@
   SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCode(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -224,7 +224,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; }
 
  private:
   HBasicBlock* const successor_;
@@ -237,7 +237,7 @@
  public:
   explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -256,7 +256,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
@@ -270,7 +270,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -308,7 +308,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathX86"; }
 
  private:
   // The class this slow path will load.
@@ -322,7 +322,7 @@
   TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
       : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -375,8 +375,8 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; }
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathX86"; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -389,7 +389,7 @@
   explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
     : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -402,7 +402,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
@@ -412,7 +412,7 @@
  public:
   explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -443,7 +443,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
@@ -471,9 +471,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -558,9 +558,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -724,7 +724,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
@@ -843,7 +843,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; }
 
  private:
   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
@@ -883,7 +883,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Register reg_out = out_.AsRegister<Register>();
     DCHECK(locations->CanCall());
@@ -909,7 +909,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; }
 
  private:
   const Location out_;
@@ -8100,7 +8100,7 @@
   HX86ComputeBaseMethodAddress* base_method_address_;
 
  private:
-  void Process(const MemoryRegion& region, int pos) OVERRIDE {
+  void Process(const MemoryRegion& region, int pos) override {
     // Patch the correct offset for the instruction.  The place to patch is the
     // last 4 bytes of the instruction.
     // The value to patch is the distance from the offset in the constant area
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index cb58e92..6154771 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -83,9 +83,9 @@
   InvokeDexCallingConventionVisitorX86() {}
   virtual ~InvokeDexCallingConventionVisitorX86() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -97,18 +97,18 @@
  public:
   FieldAccessCallingConventionX86() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(ECX);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(EAX);
   }
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type) const override {
     return DataType::Is64BitType(type)
         ? Location::RegisterPairLocation(EAX, EDX)
         : Location::RegisterLocation(EAX);
   }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const OVERRIDE {
+  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
     return DataType::Is64BitType(type)
         ? (is_instance
             ? Location::RegisterPairLocation(EDX, EBX)
@@ -117,7 +117,7 @@
             ? Location::RegisterLocation(EDX)
             : Location::RegisterLocation(ECX));
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(XMM0);
   }
 
@@ -130,10 +130,10 @@
   ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   X86Assembler* GetAssembler() const;
 
@@ -155,14 +155,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -186,14 +186,14 @@
   InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -320,23 +320,23 @@
                    OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorX86() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -346,46 +346,46 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kX86WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 4 * kX86WordSize   // 16 bytes == 4 words for each spill
         : 2 * kX86WordSize;  //  8 bytes == 2 words for each spill
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() override {
     return &instruction_visitor_;
   }
 
-  X86Assembler* GetAssembler() OVERRIDE {
+  X86Assembler* GetAssembler() override {
     return &assembler_;
   }
 
-  const X86Assembler& GetAssembler() const OVERRIDE {
+  const X86Assembler& GetAssembler() const override {
     return assembler_;
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return GetLabelOf(block)->Position();
   }
 
-  void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const override;
 
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
 
-  ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
+  ParallelMoveResolverX86* GetMoveResolver() override {
     return &move_resolver_;
   }
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86;
   }
 
@@ -399,25 +399,25 @@
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   // Generate a call to a static or direct method.
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   // Generate a call to a virtual method.
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
                                      uint32_t intrinsic_data);
@@ -442,16 +442,16 @@
                               dex::TypeIndex type_index,
                               Handle<mirror::Class> handle);
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
 
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
                        const PatchInfo<Label>& info,
                        uint64_t index_in_table) const;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Emit a write barrier.
   void MarkGCCard(Register temp,
@@ -466,15 +466,15 @@
     return CommonGetLabelOf<Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Label>();
   }
 
-  bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type) const override {
     return type == DataType::Type::kInt64;
   }
 
-  bool ShouldSplitLongMoves() const OVERRIDE { return true; }
+  bool ShouldSplitLongMoves() const override { return true; }
 
   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
@@ -513,7 +513,7 @@
 
   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
 
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void Finalize(CodeAllocator* allocator) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -609,9 +609,9 @@
     }
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ae2a000..489652b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -71,7 +71,7 @@
  public:
   explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     if (instruction_->CanThrowIntoCatchBlock()) {
@@ -85,9 +85,9 @@
     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "NullCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
@@ -97,16 +97,16 @@
  public:
   explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DivZeroCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
@@ -117,7 +117,7 @@
   DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, DataType::Type type, bool is_div)
       : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     __ Bind(GetEntryLabel());
     if (type_ == DataType::Type::kInt32) {
       if (is_div_) {
@@ -137,7 +137,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86_64"; }
 
  private:
   const CpuRegister cpu_reg_;
@@ -151,7 +151,7 @@
   SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
       : SlowPathCode(instruction), successor_(successor) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -175,7 +175,7 @@
     return successor_;
   }
 
-  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "SuspendCheckSlowPathX86_64"; }
 
  private:
   HBasicBlock* const successor_;
@@ -189,7 +189,7 @@
   explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
     : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -236,9 +236,9 @@
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
   }
 
-  bool IsFatal() const OVERRIDE { return true; }
+  bool IsFatal() const override { return true; }
 
-  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "BoundsCheckSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
@@ -252,7 +252,7 @@
     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     Location out = locations->Out();
     const uint32_t dex_pc = instruction_->GetDexPc();
@@ -291,7 +291,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; }
+  const char* GetDescription() const override { return "LoadClassSlowPathX86_64"; }
 
  private:
   // The class this slow path will load.
@@ -304,7 +304,7 @@
  public:
   explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -326,7 +326,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; }
+  const char* GetDescription() const override { return "LoadStringSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
@@ -337,7 +337,7 @@
   TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
       : SlowPathCode(instruction), is_fatal_(is_fatal) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
@@ -385,9 +385,9 @@
     }
   }
 
-  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; }
+  const char* GetDescription() const override { return "TypeCheckSlowPathX86_64"; }
 
-  bool IsFatal() const OVERRIDE { return is_fatal_; }
+  bool IsFatal() const override { return is_fatal_; }
 
  private:
   const bool is_fatal_;
@@ -400,7 +400,7 @@
   explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
       : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     LocationSummary* locations = instruction_->GetLocations();
@@ -413,7 +413,7 @@
     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
   }
 
-  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
+  const char* GetDescription() const override { return "DeoptimizationSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
@@ -423,7 +423,7 @@
  public:
   explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {}
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -454,7 +454,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ArraySetSlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
@@ -482,9 +482,9 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
     Register ref_reg = ref_cpu_reg.AsRegister();
@@ -573,11 +573,11 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
     Register ref_reg = ref_cpu_reg.AsRegister();
@@ -745,7 +745,7 @@
     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
 }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     CpuRegister reg_out = out_.AsRegister<CpuRegister>();
@@ -864,7 +864,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierForHeapReferenceSlowPathX86_64";
   }
 
@@ -906,7 +906,7 @@
     DCHECK(kEmitCompilerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
@@ -931,7 +931,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86_64"; }
 
  private:
   const Location out_;
@@ -7395,7 +7395,7 @@
   CodeGeneratorX86_64* codegen_;
 
  private:
-  void Process(const MemoryRegion& region, int pos) OVERRIDE {
+  void Process(const MemoryRegion& region, int pos) override {
     // Patch the correct offset for the instruction.  We use the address of the
     // 'next' instruction, which is 'pos' (patch the 4 bytes before).
     int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 5ba7f9c..f77a5c8 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -83,22 +83,22 @@
  public:
   FieldAccessCallingConventionX86_64() {}
 
-  Location GetObjectLocation() const OVERRIDE {
+  Location GetObjectLocation() const override {
     return Location::RegisterLocation(RSI);
   }
-  Location GetFieldIndexLocation() const OVERRIDE {
+  Location GetFieldIndexLocation() const override {
     return Location::RegisterLocation(RDI);
   }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::RegisterLocation(RAX);
   }
   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, bool is_instance)
-      const OVERRIDE {
+      const override {
     return is_instance
         ? Location::RegisterLocation(RDX)
         : Location::RegisterLocation(RSI);
   }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return Location::FpuRegisterLocation(XMM0);
   }
 
@@ -112,9 +112,9 @@
   InvokeDexCallingConventionVisitorX86_64() {}
   virtual ~InvokeDexCallingConventionVisitorX86_64() {}
 
-  Location GetNextLocation(DataType::Type type) OVERRIDE;
-  Location GetReturnLocation(DataType::Type type) const OVERRIDE;
-  Location GetMethodLocation() const OVERRIDE;
+  Location GetNextLocation(DataType::Type type) override;
+  Location GetReturnLocation(DataType::Type type) const override;
+  Location GetMethodLocation() const override;
 
  private:
   InvokeDexCallingConvention calling_convention;
@@ -129,10 +129,10 @@
   ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
 
-  void EmitMove(size_t index) OVERRIDE;
-  void EmitSwap(size_t index) OVERRIDE;
-  void SpillScratch(int reg) OVERRIDE;
-  void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) override;
+  void EmitSwap(size_t index) override;
+  void SpillScratch(int reg) override;
+  void RestoreScratch(int reg) override;
 
   X86_64Assembler* GetAssembler() const;
 
@@ -157,14 +157,14 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -188,14 +188,14 @@
   InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) OVERRIDE;
+  void Visit##name(H##name* instr) override;
 
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
 
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
                << " (id " << instruction->GetId() << ")";
   }
@@ -300,23 +300,23 @@
                   OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGeneratorX86_64() {}
 
-  void GenerateFrameEntry() OVERRIDE;
-  void GenerateFrameExit() OVERRIDE;
-  void Bind(HBasicBlock* block) OVERRIDE;
-  void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
-  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+  void GenerateFrameEntry() override;
+  void GenerateFrameExit() override;
+  void Bind(HBasicBlock* block) override;
+  void MoveConstant(Location destination, int32_t value) override;
+  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
 
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
                      HInstruction* instruction,
                      uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) OVERRIDE;
+                     SlowPathCode* slow_path = nullptr) override;
 
   // Generate code to invoke a runtime entry point, but do not record
   // PC-related information in a stack map.
@@ -326,46 +326,46 @@
 
   void GenerateInvokeRuntime(int32_t entry_point_offset);
 
-  size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const override {
     return kX86_64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
+  size_t GetFloatingPointSpillSlotSize() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kX86_64WordSize   // 16 bytes == 2 x86_64 words for each spill
         : 1 * kX86_64WordSize;  //  8 bytes == 1 x86_64 words for each spill
   }
 
-  HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
 
-  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() override {
     return &instruction_visitor_;
   }
 
-  X86_64Assembler* GetAssembler() OVERRIDE {
+  X86_64Assembler* GetAssembler() override {
     return &assembler_;
   }
 
-  const X86_64Assembler& GetAssembler() const OVERRIDE {
+  const X86_64Assembler& GetAssembler() const override {
     return assembler_;
   }
 
-  ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
+  ParallelMoveResolverX86_64* GetMoveResolver() override {
     return &move_resolver_;
   }
 
-  uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
+  uintptr_t GetAddressOf(HBasicBlock* block) override {
     return GetLabelOf(block)->Position();
   }
 
-  void SetupBlockedRegisters() const OVERRIDE;
-  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
-  void Finalize(CodeAllocator* allocator) OVERRIDE;
+  void SetupBlockedRegisters() const override;
+  void DumpCoreRegister(std::ostream& stream, int reg) const override;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
+  void Finalize(CodeAllocator* allocator) override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86_64;
   }
 
@@ -387,34 +387,34 @@
     return CommonGetLabelOf<Label>(block_labels_, block);
   }
 
-  void Initialize() OVERRIDE {
+  void Initialize() override {
     block_labels_ = CommonInitializeLabels<Label>();
   }
 
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
     return false;
   }
 
   // Check if the desired_string_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+      HLoadString::LoadKind desired_string_load_kind) override;
 
   // Check if the desired_class_load_kind is supported. If it is, return it,
   // otherwise return a fall-back kind that should be used instead.
   HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+      HLoadClass::LoadKind desired_class_load_kind) override;
 
   // Check if the desired_dispatch_info is supported. If it is, return it,
   // otherwise return a fall-back info that should be used instead.
   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) OVERRIDE;
+      HInvokeStaticOrDirect* invoke) override;
 
   void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
   void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
 
   void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
   void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
@@ -434,14 +434,14 @@
   void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference);
   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
 
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
 
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
                        const PatchInfo<Label>& info,
                        uint64_t index_in_table) const;
 
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
 
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference field load when Baker's read barriers are used.
@@ -565,7 +565,7 @@
   // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
   void Store64BitValueToStack(Location dest, int64_t value);
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void MoveFromReturnRegister(Location trg, DataType::Type type) override;
 
   // Assign a 64 bit constant to an address.
   void MoveInt64ToAddress(const Address& addr_low,
@@ -585,9 +585,9 @@
     }
   }
 
-  void GenerateNop() OVERRIDE;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+  void GenerateNop() override;
+  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
+  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 5db0b6d..8eb3a52 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -33,7 +33,7 @@
               const char* name = kCodeSinkingPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCodeSinkingPassName = "code_sinking";
 
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 8c062f0..0289e9c 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -101,7 +101,7 @@
     AddAllocatedRegister(Location::RegisterLocation(arm::R7));
   }
 
-  void SetupBlockedRegisters() const OVERRIDE {
+  void SetupBlockedRegisters() const override {
     arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
     blocked_core_registers_[arm::R4] = true;
     blocked_core_registers_[arm::R6] = false;
@@ -109,7 +109,7 @@
   }
 
   void MaybeGenerateMarkingRegisterCheck(int code ATTRIBUTE_UNUSED,
-                                         Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+                                         Location temp_loc ATTRIBUTE_UNUSED) override {
     // When turned on, the marking register checks in
     // CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck expects the
     // Thread Register and the Marking Register to be set to
@@ -141,7 +141,7 @@
       : arm64::CodeGeneratorARM64(graph, compiler_options) {}
 
   void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
-                                         Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
+                                         Location temp_loc ATTRIBUTE_UNUSED) override {
     // When turned on, the marking register checks in
     // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck expect the
     // Thread Register and the Marking Register to be set to
@@ -161,7 +161,7 @@
     AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
   }
 
-  void SetupBlockedRegisters() const OVERRIDE {
+  void SetupBlockedRegisters() const override {
     x86::CodeGeneratorX86::SetupBlockedRegisters();
     // ebx is a callee-save register in C, but caller-save for ART.
     blocked_core_registers_[x86::EBX] = true;
@@ -183,7 +183,7 @@
   }
 
   size_t GetSize() const { return size_; }
-  ArrayRef<const uint8_t> GetMemory() const OVERRIDE {
+  ArrayRef<const uint8_t> GetMemory() const override {
     return ArrayRef<const uint8_t>(memory_.get(), size_);
   }
 
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb78c23..09e7cab 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -26,13 +26,13 @@
       : HGraphDelegateVisitor(graph) {}
 
  private:
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
-  void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
-  void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+  void VisitUnaryOperation(HUnaryOperation* inst) override;
+  void VisitBinaryOperation(HBinaryOperation* inst) override;
 
-  void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
-  void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+  void VisitTypeConversion(HTypeConversion* inst) override;
+  void VisitDivZeroCheck(HDivZeroCheck* inst) override;
 
   DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
 };
@@ -47,24 +47,24 @@
  private:
   void VisitShift(HBinaryOperation* shift);
 
-  void VisitEqual(HEqual* instruction) OVERRIDE;
-  void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
+  void VisitEqual(HEqual* instruction) override;
+  void VisitNotEqual(HNotEqual* instruction) override;
 
-  void VisitAbove(HAbove* instruction) OVERRIDE;
-  void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
-  void VisitBelow(HBelow* instruction) OVERRIDE;
-  void VisitBelowOrEqual(HBelowOrEqual* instruction) OVERRIDE;
+  void VisitAbove(HAbove* instruction) override;
+  void VisitAboveOrEqual(HAboveOrEqual* instruction) override;
+  void VisitBelow(HBelow* instruction) override;
+  void VisitBelowOrEqual(HBelowOrEqual* instruction) override;
 
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitCompare(HCompare* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitRem(HRem* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitSub(HSub* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitCompare(HCompare* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitRem(HRem* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitSub(HSub* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
 };
 
 
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index f4dbc80..72bd95b 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -41,7 +41,7 @@
  public:
   HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kConstantFoldingPassName = "constant_folding";
 
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 54bff22..3cb8bf2 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -34,7 +34,7 @@
         candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
         stats_(stats) {}
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // Visit all instructions in block.
     HGraphVisitor::VisitBasicBlock(block);
 
@@ -43,7 +43,7 @@
     MergeCandidateFences();
   }
 
-  void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE {
+  void VisitConstructorFence(HConstructorFence* constructor_fence) override {
     candidate_fences_.push_back(constructor_fence);
 
     for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
@@ -51,29 +51,29 @@
     }
   }
 
-  void VisitBoundType(HBoundType* bound_type) OVERRIDE {
+  void VisitBoundType(HBoundType* bound_type) override {
     VisitAlias(bound_type);
   }
 
-  void VisitNullCheck(HNullCheck* null_check) OVERRIDE {
+  void VisitNullCheck(HNullCheck* null_check) override {
     VisitAlias(null_check);
   }
 
-  void VisitSelect(HSelect* select) OVERRIDE {
+  void VisitSelect(HSelect* select) override {
     VisitAlias(select);
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HInstruction* value = instruction->InputAt(1);
     VisitSetLocation(instruction, value);
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     HInstruction* value = instruction->InputAt(1);
     VisitSetLocation(instruction, value);
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     HInstruction* value = instruction->InputAt(2);
     VisitSetLocation(instruction, value);
   }
@@ -83,46 +83,46 @@
     MergeCandidateFences();
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+  void VisitInvokeInterface(HInvokeInterface* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+  void VisitClinitCheck(HClinitCheck* clinit) override {
     HandleInvoke(clinit);
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index 367d9f2..014b342 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -52,7 +52,7 @@
                                         const char* name = kCFREPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
 
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 90caa53..799721a 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -32,7 +32,7 @@
   HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
 
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 293c1ab..63a370a 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -27,7 +27,7 @@
 
 class EmitSwapMipsTest : public OptimizingUnitTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     instruction_set_ = InstructionSet::kMips;
     instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
     OptimizingUnitTest::SetUp();
@@ -46,7 +46,7 @@
                                         GetAssemblyHeader()));
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();
     codegen_.reset();
     graph_ = nullptr;
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3a2bb7a..d085609 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -44,30 +44,30 @@
   // and return value pass along the observed graph sizes.
   size_t Run(bool pass_change = true, size_t last_size = 0);
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+  void VisitBasicBlock(HBasicBlock* block) override;
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE;
-  void VisitPhi(HPhi* phi) OVERRIDE;
+  void VisitInstruction(HInstruction* instruction) override;
+  void VisitPhi(HPhi* phi) override;
 
-  void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
-  void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
-  void VisitBoundType(HBoundType* instruction) OVERRIDE;
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  void VisitCheckCast(HCheckCast* check) OVERRIDE;
-  void VisitCondition(HCondition* op) OVERRIDE;
-  void VisitConstant(HConstant* instruction) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
-  void VisitIf(HIf* instruction) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
-  void VisitLoadException(HLoadException* load) OVERRIDE;
-  void VisitNeg(HNeg* instruction) OVERRIDE;
-  void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
-  void VisitReturn(HReturn* ret) OVERRIDE;
-  void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
-  void VisitSelect(HSelect* instruction) OVERRIDE;
-  void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+  void VisitBinaryOperation(HBinaryOperation* op) override;
+  void VisitBooleanNot(HBooleanNot* instruction) override;
+  void VisitBoundType(HBoundType* instruction) override;
+  void VisitBoundsCheck(HBoundsCheck* check) override;
+  void VisitCheckCast(HCheckCast* check) override;
+  void VisitCondition(HCondition* op) override;
+  void VisitConstant(HConstant* instruction) override;
+  void VisitDeoptimize(HDeoptimize* instruction) override;
+  void VisitIf(HIf* instruction) override;
+  void VisitInstanceOf(HInstanceOf* check) override;
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+  void VisitLoadException(HLoadException* load) override;
+  void VisitNeg(HNeg* instruction) override;
+  void VisitPackedSwitch(HPackedSwitch* instruction) override;
+  void VisitReturn(HReturn* ret) override;
+  void VisitReturnVoid(HReturnVoid* ret) override;
+  void VisitSelect(HSelect* instruction) override;
+  void VisitTryBoundary(HTryBoundary* try_boundary) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
 
   void CheckTypeCheckBitstringInput(HTypeCheckInstruction* check,
                                     size_t input_pos,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d65ad40..31db8c2 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -333,7 +333,7 @@
     return output_;
   }
 
-  void VisitParallelMove(HParallelMove* instruction) OVERRIDE {
+  void VisitParallelMove(HParallelMove* instruction) override {
     StartAttributeStream("liveness") << instruction->GetLifetimePosition();
     StringList moves;
     for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
@@ -346,36 +346,36 @@
     StartAttributeStream("moves") <<  moves;
   }
 
-  void VisitIntConstant(HIntConstant* instruction) OVERRIDE {
+  void VisitIntConstant(HIntConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitLongConstant(HLongConstant* instruction) OVERRIDE {
+  void VisitLongConstant(HLongConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE {
+  void VisitFloatConstant(HFloatConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE {
+  void VisitDoubleConstant(HDoubleConstant* instruction) override {
     StartAttributeStream() << instruction->GetValue();
   }
 
-  void VisitPhi(HPhi* phi) OVERRIDE {
+  void VisitPhi(HPhi* phi) override {
     StartAttributeStream("reg") << phi->GetRegNumber();
     StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha;
   }
 
-  void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
+  void VisitMemoryBarrier(HMemoryBarrier* barrier) override {
     StartAttributeStream("kind") << barrier->GetBarrierKind();
   }
 
-  void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE {
+  void VisitMonitorOperation(HMonitorOperation* monitor) override {
     StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit");
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     StartAttributeStream("load_kind") << load_class->GetLoadKind();
     const char* descriptor = load_class->GetDexFile().GetTypeDescriptor(
         load_class->GetDexFile().GetTypeId(load_class->GetTypeIndex()));
@@ -386,19 +386,19 @@
         << load_class->NeedsAccessCheck() << std::noboolalpha;
   }
 
-  void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE {
+  void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) override {
     StartAttributeStream("load_kind") << "RuntimeCall";
     StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex();
   }
 
-  void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE {
+  void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
     StartAttributeStream("load_kind") << "RuntimeCall";
     const DexFile& dex_file = load_method_type->GetDexFile();
     const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
     StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     StartAttributeStream("load_kind") << load_string->GetLoadKind();
   }
 
@@ -413,15 +413,15 @@
     }
   }
 
-  void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+  void VisitCheckCast(HCheckCast* check_cast) override {
     HandleTypeCheckInstruction(check_cast);
   }
 
-  void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+  void VisitInstanceOf(HInstanceOf* instance_of) override {
     HandleTypeCheckInstruction(instance_of);
   }
 
-  void VisitArrayLength(HArrayLength* array_length) OVERRIDE {
+  void VisitArrayLength(HArrayLength* array_length) override {
     StartAttributeStream("is_string_length") << std::boolalpha
         << array_length->IsStringLength() << std::noboolalpha;
     if (array_length->IsEmittedAtUseSite()) {
@@ -429,31 +429,31 @@
     }
   }
 
-  void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* bounds_check) override {
     StartAttributeStream("is_string_char_at") << std::boolalpha
         << bounds_check->IsStringCharAt() << std::noboolalpha;
   }
 
-  void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
+  void VisitArrayGet(HArrayGet* array_get) override {
     StartAttributeStream("is_string_char_at") << std::boolalpha
         << array_get->IsStringCharAt() << std::noboolalpha;
   }
 
-  void VisitArraySet(HArraySet* array_set) OVERRIDE {
+  void VisitArraySet(HArraySet* array_set) override {
     StartAttributeStream("value_can_be_null") << std::boolalpha
         << array_set->GetValueCanBeNull() << std::noboolalpha;
     StartAttributeStream("needs_type_check") << std::boolalpha
         << array_set->NeedsTypeCheck() << std::noboolalpha;
   }
 
-  void VisitCompare(HCompare* compare) OVERRIDE {
+  void VisitCompare(HCompare* compare) override {
     ComparisonBias bias = compare->GetBias();
     StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias
                                      ? "gt"
                                      : (bias == ComparisonBias::kLtBias ? "lt" : "none"));
   }
 
-  void VisitInvoke(HInvoke* invoke) OVERRIDE {
+  void VisitInvoke(HInvoke* invoke) override {
     StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
     ArtMethod* method = invoke->GetResolvedMethod();
     // We don't print signatures, which conflict with c1visualizer format.
@@ -470,12 +470,12 @@
                                           << std::noboolalpha;
   }
 
-  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("invoke_type") << invoke->GetInvokeType();
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("method_load_kind") << invoke->GetMethodLoadKind();
     StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
@@ -484,96 +484,96 @@
     }
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
   }
 
-  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+  void VisitInvokePolymorphic(HInvokePolymorphic* invoke) override {
     VisitInvoke(invoke);
     StartAttributeStream("invoke_type") << "InvokePolymorphic";
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
     StartAttributeStream("field_name") <<
         iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << iget->GetFieldType();
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* iset) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* iset) override {
     StartAttributeStream("field_name") <<
         iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << iset->GetFieldType();
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* sget) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* sget) override {
     StartAttributeStream("field_name") <<
         sget->GetFieldInfo().GetDexFile().PrettyField(sget->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << sget->GetFieldType();
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* sset) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* sset) override {
     StartAttributeStream("field_name") <<
         sset->GetFieldInfo().GetDexFile().PrettyField(sset->GetFieldInfo().GetFieldIndex(),
                                                       /* with type */ false);
     StartAttributeStream("field_type") << sset->GetFieldType();
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) override {
     StartAttributeStream("field_type") << field_access->GetFieldType();
   }
 
-  void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
+  void VisitTryBoundary(HTryBoundary* try_boundary) override {
     StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
   }
 
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
+  void VisitDeoptimize(HDeoptimize* deoptimize) override {
     StartAttributeStream("kind") << deoptimize->GetKind();
   }
 
-  void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE {
+  void VisitVecOperation(HVecOperation* vec_operation) override {
     StartAttributeStream("packed_type") << vec_operation->GetPackedType();
   }
 
-  void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+  void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) override {
     StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
   }
 
-  void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
+  void VisitVecHalvingAdd(HVecHalvingAdd* hadd) override {
     VisitVecBinaryOperation(hadd);
     StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
   }
 
-  void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
+  void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) override {
     VisitVecOperation(instruction);
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
 #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
-  void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE {
+  void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
-  void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE {
+  void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) override {
     StartAttributeStream("kind") << instruction->GetOpKind();
   }
 
-  void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) OVERRIDE {
+  void VisitDataProcWithShifterOp(HDataProcWithShifterOp* instruction) override {
     StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
     if (HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
       StartAttributeStream("shift") << instruction->GetShiftAmount();
@@ -814,7 +814,7 @@
     Flush();
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     StartTag("block");
     PrintProperty("name", "B", block->GetBlockId());
     if (block->GetLifetimeStart() != kNoLifetime) {
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 75cfff2..bbf2265 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -31,7 +31,7 @@
                   const char* pass_name = kGlobalValueNumberingPassName)
       : HOptimization(graph, pass_name), side_effects_(side_effects) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kGlobalValueNumberingPassName = "GVN";
 
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 89fed2e..a48aa90 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -37,7 +37,7 @@
  public:
   explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kInductionPassName = "induction_var_analysis";
 
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 2fdf6a1..6fd0c20 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -60,7 +60,7 @@
         handles_(handles),
         inline_stats_(nullptr) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kInlinerPassName = "inliner";
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f493b66..2757f7b 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -66,44 +66,44 @@
   bool TryCombineVecMultiplyAccumulate(HVecMul* mul);
 
   void VisitShift(HBinaryOperation* shift);
-  void VisitEqual(HEqual* equal) OVERRIDE;
-  void VisitNotEqual(HNotEqual* equal) OVERRIDE;
-  void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
-  void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE;
-  void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE;
-  void VisitArraySet(HArraySet* equal) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
-  void VisitArrayLength(HArrayLength* instruction) OVERRIDE;
-  void VisitCheckCast(HCheckCast* instruction) OVERRIDE;
-  void VisitAbs(HAbs* instruction) OVERRIDE;
-  void VisitAdd(HAdd* instruction) OVERRIDE;
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitCondition(HCondition* instruction) OVERRIDE;
-  void VisitGreaterThan(HGreaterThan* condition) OVERRIDE;
-  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE;
-  void VisitLessThan(HLessThan* condition) OVERRIDE;
-  void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE;
-  void VisitBelow(HBelow* condition) OVERRIDE;
-  void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE;
-  void VisitAbove(HAbove* condition) OVERRIDE;
-  void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE;
-  void VisitDiv(HDiv* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitNeg(HNeg* instruction) OVERRIDE;
-  void VisitNot(HNot* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitSub(HSub* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
-  void VisitSelect(HSelect* select) OVERRIDE;
-  void VisitIf(HIf* instruction) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
-  void VisitInvoke(HInvoke* invoke) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
-  void VisitVecMul(HVecMul* instruction) OVERRIDE;
+  void VisitEqual(HEqual* equal) override;
+  void VisitNotEqual(HNotEqual* equal) override;
+  void VisitBooleanNot(HBooleanNot* bool_not) override;
+  void VisitInstanceFieldSet(HInstanceFieldSet* equal) override;
+  void VisitStaticFieldSet(HStaticFieldSet* equal) override;
+  void VisitArraySet(HArraySet* equal) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitNullCheck(HNullCheck* instruction) override;
+  void VisitArrayLength(HArrayLength* instruction) override;
+  void VisitCheckCast(HCheckCast* instruction) override;
+  void VisitAbs(HAbs* instruction) override;
+  void VisitAdd(HAdd* instruction) override;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitCondition(HCondition* instruction) override;
+  void VisitGreaterThan(HGreaterThan* condition) override;
+  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) override;
+  void VisitLessThan(HLessThan* condition) override;
+  void VisitLessThanOrEqual(HLessThanOrEqual* condition) override;
+  void VisitBelow(HBelow* condition) override;
+  void VisitBelowOrEqual(HBelowOrEqual* condition) override;
+  void VisitAbove(HAbove* condition) override;
+  void VisitAboveOrEqual(HAboveOrEqual* condition) override;
+  void VisitDiv(HDiv* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitNeg(HNeg* instruction) override;
+  void VisitNot(HNot* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitSub(HSub* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
+  void VisitSelect(HSelect* select) override;
+  void VisitIf(HIf* instruction) override;
+  void VisitInstanceOf(HInstanceOf* instruction) override;
+  void VisitInvoke(HInvoke* invoke) override;
+  void VisitDeoptimize(HDeoptimize* deoptimize) override;
+  void VisitVecMul(HVecMul* instruction) override;
 
   bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
 
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 2d134e0..982a24a 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -46,7 +46,7 @@
 
   static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 37fcdb9..24fbb6c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -56,7 +56,7 @@
    * (2) Since statements can be removed in a "forward" fashion,
    *     the visitor should test if each statement is still there.
    */
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // TODO: fragile iteration, provide more robust iterators?
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* instruction = it.Current();
@@ -66,15 +66,15 @@
     }
   }
 
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index f1a16ef..fca9341 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 };
 
 }  // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index e0a6279..b536cb4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -58,7 +58,7 @@
    * (2) Since statements can be removed in a "forward" fashion,
    *     the visitor should test if each statement is still there.
    */
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // TODO: fragile iteration, provide more robust iterators?
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* instruction = it.Current();
@@ -69,18 +69,18 @@
   }
 
   // HInstruction visitors, sorted alphabetically.
-  void VisitAnd(HAnd* instruction) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitMul(HMul* instruction) OVERRIDE;
-  void VisitOr(HOr* instruction) OVERRIDE;
-  void VisitShl(HShl* instruction) OVERRIDE;
-  void VisitShr(HShr* instruction) OVERRIDE;
-  void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
-  void VisitUShr(HUShr* instruction) OVERRIDE;
-  void VisitXor(HXor* instruction) OVERRIDE;
-  void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
-  void VisitVecStore(HVecStore* instruction) OVERRIDE;
+  void VisitAnd(HAnd* instruction) override;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitMul(HMul* instruction) override;
+  void VisitOr(HOr* instruction) override;
+  void VisitShl(HShl* instruction) override;
+  void VisitShr(HShr* instruction) override;
+  void VisitTypeConversion(HTypeConversion* instruction) override;
+  void VisitUShr(HUShr* instruction) override;
+  void VisitXor(HXor* instruction) override;
+  void VisitVecLoad(HVecLoad* instruction) override;
+  void VisitVecStore(HVecStore* instruction) override;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 8659c1f..8d93c01 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 };
 
 }  // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 3bdf90f..5d0c63b 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -39,8 +39,8 @@
   bool TryExtractArrayAccessIndex(HInstruction* access,
                                   HInstruction* index,
                                   DataType::Type packed_type);
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
+  void VisitArrayGet(HArrayGet* instruction) override;
+  void VisitArraySet(HArraySet* instruction) override;
 
   OptimizingCompilerStats* stats_;
   CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 94ef73d..b431334 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -35,7 +35,7 @@
 
   static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 993648f..06e2fbb 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -42,7 +42,7 @@
                        const char* name = kIntrinsicsRecognizerPassName)
       : HOptimization(graph, name, stats) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   // Static helper that recognizes intrinsic call. Returns true on success.
   // If it fails due to invoke type mismatch, wrong_invoke_type is set.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a657b58..1abfcb0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@
   explicit IntrinsicSlowPathARM64(HInvoke* invoke)
       : SlowPathCodeARM64(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
@@ -145,7 +145,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathARM64"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathARM64"; }
 
  private:
   // The instruction where this slow path is happening.
@@ -163,7 +163,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -216,7 +216,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathARM64"; }
 
  private:
   Location tmp_;
@@ -1006,9 +1006,9 @@
   explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
       : SlowPathCodeARM64(invoke) {}
 
-  const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+  const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARM64"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     Arm64Assembler* assembler = arm64_codegen->GetAssembler();
     MacroAssembler* masm = assembler->GetVIXLAssembler();
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 033a644..9c46efd 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -37,7 +37,7 @@
 
 class CodeGeneratorARM64;
 
-class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARM64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
       : allocator_(allocator), codegen_(codegen) {}
@@ -45,7 +45,7 @@
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -63,14 +63,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
 };
 
-class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARM64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorARM64(CodeGeneratorARM64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 74a779d..1127fb8 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -85,7 +85,7 @@
     return calling_convention_visitor.GetMethodLocation();
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
     __ Bind(GetEntryLabel());
 
@@ -111,7 +111,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPath"; }
 
  private:
   // The instruction where this slow path is happening.
@@ -173,7 +173,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
     LocationSummary* locations = instruction_->GetLocations();
@@ -233,7 +233,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE {
+  const char* GetDescription() const override {
     return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
   }
 
@@ -969,9 +969,9 @@
   explicit BakerReadBarrierCasSlowPathARMVIXL(HInvoke* invoke)
       : SlowPathCodeARMVIXL(invoke) {}
 
-  const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARMVIXL"; }
+  const char* GetDescription() const override { return "BakerReadBarrierCasSlowPathARMVIXL"; }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
     ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
     __ Bind(GetEntryLabel());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 9c02d0a..1fea776 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -27,14 +27,14 @@
 class ArmVIXLAssembler;
 class CodeGeneratorARMVIXL;
 
-class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderARMVIXL final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -54,14 +54,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
 };
 
-class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorARMVIXL final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 01d9f96..771714b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -108,7 +108,7 @@
  public:
   explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
 
     __ Bind(GetEntryLabel());
@@ -137,7 +137,7 @@
     __ B(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 1c1ba40..08d4e82 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -30,14 +30,14 @@
 class CodeGeneratorMIPS;
 class MipsAssembler;
 
-class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
 };
 
-class IntrinsicCodeGeneratorMIPS FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 0bd69c6..4a1bd5b 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -97,7 +97,7 @@
   explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
      : SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
 
-  void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen_in) override {
     CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
 
     __ Bind(GetEntryLabel());
@@ -126,7 +126,7 @@
     __ Bc(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 748b0b0..ca8bc8f 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -30,14 +30,14 @@
 class CodeGeneratorMIPS64;
 class Mips64Assembler;
 
-class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
 };
 
-class IntrinsicCodeGeneratorMIPS64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 8c69d9b..41947f1 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -47,7 +47,7 @@
     return calling_convention_visitor.GetMethodLocation();
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     Assembler* assembler = codegen->GetAssembler();
     assembler->Bind(GetEntryLabel());
 
@@ -73,7 +73,7 @@
     assembler->Jump(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+  const char* GetDescription() const override { return "IntrinsicSlowPath"; }
 
  private:
   // The instruction where this slow path is happening.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5c7be54..d33c0c3 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -82,7 +82,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -160,7 +160,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86);
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index e3555e7..ae150da 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -30,14 +30,14 @@
 class CodeGeneratorX86;
 class X86Assembler;
 
-class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
 };
 
-class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorX86(CodeGeneratorX86* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index b5afe93..ae88974 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -80,7 +80,7 @@
     DCHECK(kUseBakerReadBarrier);
   }
 
-  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) override {
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
@@ -118,7 +118,7 @@
     __ jmp(GetExitLabel());
   }
 
-  const char* GetDescription() const OVERRIDE { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
+  const char* GetDescription() const override { return "ReadBarrierSystemArrayCopySlowPathX86_64"; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathX86_64);
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 5cb601e..199cfed 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -30,14 +30,14 @@
 class CodeGeneratorX86_64;
 class X86_64Assembler;
 
-class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicLocationsBuilderX86_64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen);
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
@@ -55,14 +55,14 @@
   DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
 };
 
-class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor {
+class IntrinsicCodeGeneratorX86_64 final : public IntrinsicVisitor {
  public:
   explicit IntrinsicCodeGeneratorX86_64(CodeGeneratorX86_64* codegen) : codegen_(codegen) {}
 
   // Define visitor methods.
 
 #define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+  void Visit ## Name(HInvoke* invoke) override;
 #include "intrinsics_list.h"
   INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
 #undef INTRINSICS_LIST
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index f72d195..9cafddb 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -33,7 +33,7 @@
       : HOptimization(graph, name, stats),
         side_effects_(side_effects) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
 
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 769a3f1..08d9309 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -492,12 +492,12 @@
                             HeapLocation::kDeclaringClassDefIndexForArrays);
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     has_heap_stores_ = true;
     if (location->GetReferenceInfo()->IsSingleton()) {
@@ -523,12 +523,12 @@
     }
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
     has_heap_stores_ = true;
   }
@@ -536,7 +536,7 @@
   // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
   // since we cannot accurately track the fields.
 
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+  void VisitArrayGet(HArrayGet* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetType();
@@ -544,7 +544,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetComponentType();
@@ -552,7 +552,7 @@
     has_heap_stores_ = true;
   }
 
-  void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+  void VisitVecLoad(HVecLoad* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetPackedType();
@@ -560,7 +560,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitVecStore(HVecStore* instruction) OVERRIDE {
+  void VisitVecStore(HVecStore* instruction) override {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
     DataType::Type type = instruction->GetPackedType();
@@ -568,7 +568,7 @@
     has_heap_stores_ = true;
   }
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     // Any new-instance or new-array cannot alias with references that
     // pre-exist the new-instance/new-array. We append entries into
     // ref_info_array_ which keeps track of the order of creation
@@ -580,7 +580,7 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) override {
     has_monitor_operations_ = true;
   }
 
@@ -605,7 +605,7 @@
     return heap_location_collector_;
   }
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
 
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 28ac942..7f71745 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -107,7 +107,7 @@
         singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     // Populate the heap_values array for this block.
     // TODO: try to reuse the heap_values array from one predecessor if possible.
     if (block->IsLoopHeader()) {
@@ -656,13 +656,13 @@
     }
   }
 
-  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
     HInstruction* object = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field));
   }
 
-  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
     HInstruction* object = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     HInstruction* value = instruction->InputAt(1);
@@ -670,24 +670,24 @@
     VisitSetLocation(instruction, idx, value);
   }
 
-  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) override {
     HInstruction* cls = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field));
   }
 
-  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) override {
     HInstruction* cls = instruction->InputAt(0);
     const FieldInfo& field = instruction->GetFieldInfo();
     size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field);
     VisitSetLocation(instruction, idx, instruction->InputAt(1));
   }
 
-  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+  void VisitArrayGet(HArrayGet* instruction) override {
     VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
   }
 
-  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+  void VisitArraySet(HArraySet* instruction) override {
     size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
     VisitSetLocation(instruction, idx, instruction->InputAt(2));
   }
@@ -743,15 +743,15 @@
     }
   }
 
-  void VisitReturn(HReturn* instruction) OVERRIDE {
+  void VisitReturn(HReturn* instruction) override {
     HandleExit(instruction->GetBlock());
   }
 
-  void VisitReturnVoid(HReturnVoid* return_void) OVERRIDE {
+  void VisitReturnVoid(HReturnVoid* return_void) override {
     HandleExit(return_void->GetBlock());
   }
 
-  void VisitThrow(HThrow* throw_instruction) OVERRIDE {
+  void VisitThrow(HThrow* throw_instruction) override {
     HandleExit(throw_instruction->GetBlock());
   }
 
@@ -777,35 +777,35 @@
     }
   }
 
-  void VisitInvoke(HInvoke* invoke) OVERRIDE {
+  void VisitInvoke(HInvoke* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+  void VisitClinitCheck(HClinitCheck* clinit) override {
     HandleInvoke(clinit);
   }
 
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) override {
     // Conservatively treat it as an invocation.
     HandleInvoke(instruction);
   }
 
-  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+  void VisitNewInstance(HNewInstance* new_instance) override {
     ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
     if (ref_info == nullptr) {
       // new_instance isn't used for field accesses. No need to process it.
@@ -829,7 +829,7 @@
     }
   }
 
-  void VisitNewArray(HNewArray* new_array) OVERRIDE {
+  void VisitNewArray(HNewArray* new_array) override {
     ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_array);
     if (ref_info == nullptr) {
       // new_array isn't used for array accesses. No need to process it.
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 408386b..f7ba41a 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -35,7 +35,7 @@
         side_effects_(side_effects),
         lsa_(lsa) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
 
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index d355ced..2ae3683 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -87,14 +87,14 @@
   // Maximum number of instructions to be created as a result of full unrolling.
   static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
 
-  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
     return analysis_info->HasLongTypeInstructions() ||
            IsLoopTooBig(analysis_info,
                         kScalarHeuristicMaxBodySizeInstr,
                         kScalarHeuristicMaxBodySizeBlocks);
   }
 
-  uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
     int64_t trip_count = analysis_info->GetTripCount();
     // Unroll only loops with known trip count.
     if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
@@ -108,9 +108,9 @@
     return desired_unrolling_factor;
   }
 
-  bool IsLoopPeelingEnabled() const OVERRIDE { return true; }
+  bool IsLoopPeelingEnabled() const override { return true; }
 
-  bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+  bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
     int64_t trip_count = analysis_info->GetTripCount();
     // We assume that trip count is known.
     DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
@@ -144,7 +144,7 @@
   // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
   static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
 
-  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
+  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
     return IsLoopTooBig(loop_analysis_info,
                         kArm64ScalarHeuristicMaxBodySizeInstr,
                         kArm64ScalarHeuristicMaxBodySizeBlocks);
@@ -153,7 +153,7 @@
   uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
                                   int64_t trip_count,
                                   uint32_t max_peel,
-                                  uint32_t vector_length) const OVERRIDE {
+                                  uint32_t vector_length) const override {
     // Don't unroll with insufficient iterations.
     // TODO: Unroll loops with unknown trip count.
     DCHECK_NE(vector_length, 0u);
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 644b740..2b202fd 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -43,7 +43,7 @@
                     OptimizingCompilerStats* stats,
                     const char* name = kLoopOptimizationPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kLoopOptimizationPassName = "loop_optimization";
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d88b036..748e21f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1529,12 +1529,12 @@
   private:                                                                \
   H##type& operator=(const H##type&) = delete;                            \
   public:                                                                 \
-  const char* DebugName() const OVERRIDE { return #type; }                \
-  HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE {             \
+  const char* DebugName() const override { return #type; }                \
+  HInstruction* Clone(ArenaAllocator* arena) const override {             \
     DCHECK(IsClonable());                                                 \
     return new (arena) H##type(*this->As##type());                        \
   }                                                                       \
-  void Accept(HGraphVisitor* visitor) OVERRIDE
+  void Accept(HGraphVisitor* visitor) override
 
 #define DECLARE_ABSTRACT_INSTRUCTION(type)                              \
   private:                                                              \
@@ -2595,7 +2595,7 @@
 class HVariableInputSizeInstruction : public HInstruction {
  public:
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
     return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
   }
 
@@ -2645,7 +2645,7 @@
   virtual ~HExpression() {}
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
   }
 
@@ -2667,7 +2667,7 @@
   virtual ~HExpression() {}
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>();
   }
 
@@ -2680,13 +2680,13 @@
 
 // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
 // instruction that branches to the exit block.
-class HReturnVoid FINAL : public HExpression<0> {
+class HReturnVoid final : public HExpression<0> {
  public:
   explicit HReturnVoid(uint32_t dex_pc = kNoDexPc)
       : HExpression(kReturnVoid, SideEffects::None(), dex_pc) {
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(ReturnVoid);
 
@@ -2696,14 +2696,14 @@
 
 // Represents dex's RETURN opcodes. A HReturn is a control flow
 // instruction that branches to the exit block.
-class HReturn FINAL : public HExpression<1> {
+class HReturn final : public HExpression<1> {
  public:
   explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc)
       : HExpression(kReturn, SideEffects::None(), dex_pc) {
     SetRawInputAt(0, value);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(Return);
 
@@ -2711,7 +2711,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Return);
 };
 
-class HPhi FINAL : public HVariableInputSizeInstruction {
+class HPhi final : public HVariableInputSizeInstruction {
  public:
   HPhi(ArenaAllocator* allocator,
        uint32_t reg_number,
@@ -2735,7 +2735,7 @@
     SetPackedFlag<kFlagCanBeNull>(true);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
   static DataType::Type ToPhiType(DataType::Type type) {
@@ -2755,7 +2755,7 @@
     SetPackedField<TypeField>(new_type);
   }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
   void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   uint32_t GetRegNumber() const { return reg_number_; }
@@ -2813,13 +2813,13 @@
 // The exit instruction is the only instruction of the exit block.
 // Instructions aborting the method (HThrow and HReturn) must branch to the
 // exit block.
-class HExit FINAL : public HExpression<0> {
+class HExit final : public HExpression<0> {
  public:
   explicit HExit(uint32_t dex_pc = kNoDexPc)
       : HExpression(kExit, SideEffects::None(), dex_pc) {
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   DECLARE_INSTRUCTION(Exit);
 
@@ -2828,14 +2828,14 @@
 };
 
 // Jumps from one block to another.
-class HGoto FINAL : public HExpression<0> {
+class HGoto final : public HExpression<0> {
  public:
   explicit HGoto(uint32_t dex_pc = kNoDexPc)
       : HExpression(kGoto, SideEffects::None(), dex_pc) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool IsControlFlow() const override { return true; }
 
   HBasicBlock* GetSuccessor() const {
     return GetBlock()->GetSingleSuccessor();
@@ -2853,7 +2853,7 @@
       : HExpression(kind, type, SideEffects::None(), dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   // Is this constant -1 in the arithmetic sense?
   virtual bool IsMinusOne() const { return false; }
@@ -2872,15 +2872,15 @@
   DEFAULT_COPY_CONSTRUCTOR(Constant);
 };
 
-class HNullConstant FINAL : public HConstant {
+class HNullConstant final : public HConstant {
  public:
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return 0; }
+  uint64_t GetValueAsUint64() const override { return 0; }
 
-  size_t ComputeHashCode() const OVERRIDE { return 0; }
+  size_t ComputeHashCode() const override { return 0; }
 
   // The null constant representation is a 0-bit pattern.
   virtual bool IsZeroBitPattern() const { return true; }
@@ -2900,25 +2900,25 @@
 
 // Constants of the type int. Those can be from Dex instructions, or
 // synthesized (for example with the if-eqz instruction).
-class HIntConstant FINAL : public HConstant {
+class HIntConstant final : public HConstant {
  public:
   int32_t GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE {
+  uint64_t GetValueAsUint64() const override {
     return static_cast<uint64_t>(static_cast<uint32_t>(value_));
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsIntConstant()) << other->DebugName();
     return other->AsIntConstant()->value_ == value_;
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
+  size_t ComputeHashCode() const override { return GetValue(); }
 
-  bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
-  bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
-  bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
-  bool IsOne() const OVERRIDE { return GetValue() == 1; }
+  bool IsMinusOne() const override { return GetValue() == -1; }
+  bool IsArithmeticZero() const override { return GetValue() == 0; }
+  bool IsZeroBitPattern() const override { return GetValue() == 0; }
+  bool IsOne() const override { return GetValue() == 1; }
 
   // Integer constants are used to encode Boolean values as well,
   // where 1 means true and 0 means false.
@@ -2946,23 +2946,23 @@
   ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
 };
 
-class HLongConstant FINAL : public HConstant {
+class HLongConstant final : public HConstant {
  public:
   int64_t GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
+  uint64_t GetValueAsUint64() const override { return value_; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsLongConstant()) << other->DebugName();
     return other->AsLongConstant()->value_ == value_;
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
-  bool IsArithmeticZero() const OVERRIDE { return GetValue() == 0; }
-  bool IsZeroBitPattern() const OVERRIDE { return GetValue() == 0; }
-  bool IsOne() const OVERRIDE { return GetValue() == 1; }
+  bool IsMinusOne() const override { return GetValue() == -1; }
+  bool IsArithmeticZero() const override { return GetValue() == 0; }
+  bool IsZeroBitPattern() const override { return GetValue() == 0; }
+  bool IsOne() const override { return GetValue() == 1; }
 
   DECLARE_INSTRUCTION(LongConstant);
 
@@ -2980,25 +2980,25 @@
   friend class HGraph;
 };
 
-class HFloatConstant FINAL : public HConstant {
+class HFloatConstant final : public HConstant {
  public:
   float GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE {
+  uint64_t GetValueAsUint64() const override {
     return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsFloatConstant()) << other->DebugName();
     return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE {
+  bool IsMinusOne() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
   }
-  bool IsArithmeticZero() const OVERRIDE {
+  bool IsArithmeticZero() const override {
     return std::fpclassify(value_) == FP_ZERO;
   }
   bool IsArithmeticPositiveZero() const {
@@ -3007,10 +3007,10 @@
   bool IsArithmeticNegativeZero() const {
     return IsArithmeticZero() && std::signbit(value_);
   }
-  bool IsZeroBitPattern() const OVERRIDE {
+  bool IsZeroBitPattern() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(0.0f);
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
   }
   bool IsNaN() const {
@@ -3039,23 +3039,23 @@
   friend class HGraph;
 };
 
-class HDoubleConstant FINAL : public HConstant {
+class HDoubleConstant final : public HConstant {
  public:
   double GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+  uint64_t GetValueAsUint64() const override { return bit_cast<uint64_t, double>(value_); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsDoubleConstant()) << other->DebugName();
     return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
   }
 
-  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const override { return static_cast<size_t>(GetValue()); }
 
-  bool IsMinusOne() const OVERRIDE {
+  bool IsMinusOne() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
   }
-  bool IsArithmeticZero() const OVERRIDE {
+  bool IsArithmeticZero() const override {
     return std::fpclassify(value_) == FP_ZERO;
   }
   bool IsArithmeticPositiveZero() const {
@@ -3064,10 +3064,10 @@
   bool IsArithmeticNegativeZero() const {
     return IsArithmeticZero() && std::signbit(value_);
   }
-  bool IsZeroBitPattern() const OVERRIDE {
+  bool IsZeroBitPattern() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((0.0));
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
   }
   bool IsNaN() const {
@@ -3098,15 +3098,15 @@
 
 // Conditional branch. A block ending with an HIf instruction must have
 // two successors.
-class HIf FINAL : public HExpression<1> {
+class HIf final : public HExpression<1> {
  public:
   explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HExpression(kIf, SideEffects::None(), dex_pc) {
     SetRawInputAt(0, input);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool IsControlFlow() const override { return true; }
 
   HBasicBlock* IfTrueSuccessor() const {
     return GetBlock()->GetSuccessors()[0];
@@ -3128,7 +3128,7 @@
 // non-exceptional control flow.
 // Normal-flow successor is stored at index zero, exception handlers under
 // higher indices in no particular order.
-class HTryBoundary FINAL : public HExpression<0> {
+class HTryBoundary final : public HExpression<0> {
  public:
   enum class BoundaryKind {
     kEntry,
@@ -3141,7 +3141,7 @@
     SetPackedField<BoundaryKindField>(kind);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   // Returns the block's non-exceptional successor (index zero).
   HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
@@ -3187,7 +3187,7 @@
 };
 
 // Deoptimize to interpreter, upon checking a condition.
-class HDeoptimize FINAL : public HVariableInputSizeInstruction {
+class HDeoptimize final : public HVariableInputSizeInstruction {
  public:
   // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
   // across.
@@ -3207,7 +3207,7 @@
     SetRawInputAt(0, cond);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Use this constructor when the `HDeoptimize` guards an instruction, and any user
   // that relies on the deoptimization to pass should have its input be the `HDeoptimize`
@@ -3233,15 +3233,15 @@
     SetRawInputAt(1, guard);
   }
 
-  bool CanBeMoved() const OVERRIDE { return GetPackedFlag<kFieldCanBeMoved>(); }
+  bool CanBeMoved() const override { return GetPackedFlag<kFieldCanBeMoved>(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return (other->CanBeMoved() == CanBeMoved()) && (other->AsDeoptimize()->GetKind() == GetKind());
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
 
@@ -3281,7 +3281,7 @@
 // if it's true, starts to do deoptimization.
 // It has a 4-byte slot on stack.
 // TODO: allocate a register for this flag.
-class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
+class HShouldDeoptimizeFlag final : public HVariableInputSizeInstruction {
  public:
   // CHA guards are only optimized in a separate pass and it has no side effects
   // with regard to other passes.
@@ -3299,7 +3299,7 @@
   // further guard elimination/motion since a guard might have been used for justification
   // of the elimination of another guard. Therefore, we pretend this guard cannot be moved
   // to avoid other optimizations trying to move it.
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
 
@@ -3310,7 +3310,7 @@
 // Represents the ArtMethod that was passed as a first argument to
 // the method. It is used by instructions that depend on it, like
 // instructions that work with the dex cache.
-class HCurrentMethod FINAL : public HExpression<0> {
+class HCurrentMethod final : public HExpression<0> {
  public:
   explicit HCurrentMethod(DataType::Type type, uint32_t dex_pc = kNoDexPc)
       : HExpression(kCurrentMethod, type, SideEffects::None(), dex_pc) {
@@ -3324,7 +3324,7 @@
 
 // Fetches an ArtMethod from the virtual table or the interface method table
 // of a class.
-class HClassTableGet FINAL : public HExpression<1> {
+class HClassTableGet final : public HExpression<1> {
  public:
   enum class TableKind {
     kVTable,
@@ -3342,9 +3342,9 @@
     SetRawInputAt(0, cls);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return other->AsClassTableGet()->GetIndex() == index_ &&
         other->AsClassTableGet()->GetPackedFields() == GetPackedFields();
   }
@@ -3373,7 +3373,7 @@
 // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
 // have one successor for each entry in the switch table, and the final successor
 // will be the block containing the next Dex opcode.
-class HPackedSwitch FINAL : public HExpression<1> {
+class HPackedSwitch final : public HExpression<1> {
  public:
   HPackedSwitch(int32_t start_value,
                 uint32_t num_entries,
@@ -3385,9 +3385,9 @@
     SetRawInputAt(0, input);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
@@ -3418,13 +3418,13 @@
   }
 
   // All of the UnaryOperation instructions are clonable.
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   HInstruction* GetInput() const { return InputAt(0); }
   DataType::Type GetResultType() const { return GetType(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -3459,7 +3459,7 @@
   }
 
   // All of the BinaryOperation instructions are clonable.
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   HInstruction* GetLeft() const { return InputAt(0); }
   HInstruction* GetRight() const { return InputAt(1); }
@@ -3499,8 +3499,8 @@
     }
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -3581,7 +3581,7 @@
   ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); }
   void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return GetPackedFields() == other->AsCondition()->GetPackedFields();
   }
 
@@ -3638,42 +3638,42 @@
 };
 
 // Instruction to check if two inputs are equal to each other.
-class HEqual FINAL : public HCondition {
+class HEqual final : public HCondition {
  public:
   HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kEqual, first, second, dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
-                      HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HNullConstant* y ATTRIBUTE_UNUSED) const override {
     return MakeConstantCondition(true, GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HEqual instruction; evaluate it as
   // `Compare(x, y) == 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0),
                                  GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(Equal);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondEQ;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondNE;
   }
 
@@ -3684,42 +3684,42 @@
   template <typename T> static bool Compute(T x, T y) { return x == y; }
 };
 
-class HNotEqual FINAL : public HCondition {
+class HNotEqual final : public HCondition {
  public:
   HNotEqual(HInstruction* first, HInstruction* second,
             uint32_t dex_pc = kNoDexPc)
       : HCondition(kNotEqual, first, second, dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
-                      HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HNullConstant* y ATTRIBUTE_UNUSED) const override {
     return MakeConstantCondition(false, GetDexPc());
   }
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HNotEqual instruction; evaluate it as
   // `Compare(x, y) != 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(NotEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondNE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondEQ;
   }
 
@@ -3730,36 +3730,36 @@
   template <typename T> static bool Compute(T x, T y) { return x != y; }
 };
 
-class HLessThan FINAL : public HCondition {
+class HLessThan final : public HCondition {
  public:
   HLessThan(HInstruction* first, HInstruction* second,
             uint32_t dex_pc = kNoDexPc)
       : HCondition(kLessThan, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HLessThan instruction; evaluate it as
   // `Compare(x, y) < 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(LessThan);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondLT;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondGE;
   }
 
@@ -3770,36 +3770,36 @@
   template <typename T> static bool Compute(T x, T y) { return x < y; }
 };
 
-class HLessThanOrEqual FINAL : public HCondition {
+class HLessThanOrEqual final : public HCondition {
  public:
   HLessThanOrEqual(HInstruction* first, HInstruction* second,
                    uint32_t dex_pc = kNoDexPc)
       : HCondition(kLessThanOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HLessThanOrEqual instruction; evaluate it as
   // `Compare(x, y) <= 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(LessThanOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondLE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondGT;
   }
 
@@ -3810,35 +3810,35 @@
   template <typename T> static bool Compute(T x, T y) { return x <= y; }
 };
 
-class HGreaterThan FINAL : public HCondition {
+class HGreaterThan final : public HCondition {
  public:
   HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kGreaterThan, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HGreaterThan instruction; evaluate it as
   // `Compare(x, y) > 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(GreaterThan);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondGT;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondLE;
   }
 
@@ -3849,35 +3849,35 @@
   template <typename T> static bool Compute(T x, T y) { return x > y; }
 };
 
-class HGreaterThanOrEqual FINAL : public HCondition {
+class HGreaterThanOrEqual final : public HCondition {
  public:
   HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kGreaterThanOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // In the following Evaluate methods, a HCompare instruction has
   // been merged into this HGreaterThanOrEqual instruction; evaluate it as
   // `Compare(x, y) >= 0`.
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
   }
 
   DECLARE_INSTRUCTION(GreaterThanOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondGE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondLT;
   }
 
@@ -3888,36 +3888,36 @@
   template <typename T> static bool Compute(T x, T y) { return x >= y; }
 };
 
-class HBelow FINAL : public HCondition {
+class HBelow final : public HCondition {
  public:
   HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kBelow, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(Below);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondB;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondAE;
   }
 
@@ -3930,36 +3930,36 @@
   }
 };
 
-class HBelowOrEqual FINAL : public HCondition {
+class HBelowOrEqual final : public HCondition {
  public:
   HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kBelowOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(BelowOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondBE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondA;
   }
 
@@ -3972,36 +3972,36 @@
   }
 };
 
-class HAbove FINAL : public HCondition {
+class HAbove final : public HCondition {
  public:
   HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kAbove, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(Above);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondA;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondBE;
   }
 
@@ -4014,36 +4014,36 @@
   }
 };
 
-class HAboveOrEqual FINAL : public HCondition {
+class HAboveOrEqual final : public HCondition {
  public:
   HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
       : HCondition(kAboveOrEqual, first, second, dex_pc) {
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
 
   DECLARE_INSTRUCTION(AboveOrEqual);
 
-  IfCondition GetCondition() const OVERRIDE {
+  IfCondition GetCondition() const override {
     return kCondAE;
   }
 
-  IfCondition GetOppositeCondition() const OVERRIDE {
+  IfCondition GetOppositeCondition() const override {
     return kCondB;
   }
 
@@ -4058,7 +4058,7 @@
 
 // Instruction to check how two inputs compare to each other.
 // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
-class HCompare FINAL : public HBinaryOperation {
+class HCompare final : public HBinaryOperation {
  public:
   // Note that `comparison_type` is the type of comparison performed
   // between the comparison's inputs, not the type of the instantiated
@@ -4090,7 +4090,7 @@
     return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     // Note that there is no "cmp-int" Dex instruction so we shouldn't
     // reach this code path when processing a freshly built HIR
     // graph. However HCompare integer instructions can be synthesized
@@ -4098,17 +4098,17 @@
     // IntegerSignum intrinsics, so we have to handle this case.
     return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return GetPackedFields() == other->AsCompare()->GetPackedFields();
   }
 
@@ -4147,7 +4147,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Compare);
 };
 
-class HNewInstance FINAL : public HExpression<1> {
+class HNewInstance final : public HExpression<1> {
  public:
   HNewInstance(HInstruction* cls,
                uint32_t dex_pc,
@@ -4166,16 +4166,16 @@
     SetRawInputAt(0, cls);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   dex::TypeIndex GetTypeIndex() const { return type_index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
 
   // Calls runtime so needs an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
   // Can throw errors when out-of-memory or if it's not instantiable/accessible.
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   bool NeedsChecks() const {
     return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4183,7 +4183,7 @@
 
   bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
 
@@ -4237,7 +4237,7 @@
 
 class HInvoke : public HVariableInputSizeInstruction {
  public:
-  bool NeedsEnvironment() const OVERRIDE;
+  bool NeedsEnvironment() const override;
 
   void SetArgumentAt(size_t index, HInstruction* argument) {
     SetRawInputAt(index, argument);
@@ -4270,15 +4270,15 @@
 
   void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
 
-  bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+  bool CanThrow() const override { return GetPackedFlag<kFlagCanThrow>(); }
 
   void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
 
-  bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+  bool AlwaysThrows() const override { return GetPackedFlag<kFlagAlwaysThrows>(); }
 
-  bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
+  bool CanBeMoved() const override { return IsIntrinsic() && !DoesAnyWrite(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
   }
 
@@ -4344,7 +4344,7 @@
   uint32_t intrinsic_optimizations_;
 };
 
-class HInvokeUnresolved FINAL : public HInvoke {
+class HInvokeUnresolved final : public HInvoke {
  public:
   HInvokeUnresolved(ArenaAllocator* allocator,
                     uint32_t number_of_arguments,
@@ -4363,7 +4363,7 @@
                 invoke_type) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokeUnresolved);
 
@@ -4371,7 +4371,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
 };
 
-class HInvokePolymorphic FINAL : public HInvoke {
+class HInvokePolymorphic final : public HInvoke {
  public:
   HInvokePolymorphic(ArenaAllocator* allocator,
                      uint32_t number_of_arguments,
@@ -4389,7 +4389,7 @@
                 kVirtual) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokePolymorphic);
 
@@ -4397,7 +4397,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
 };
 
-class HInvokeCustom FINAL : public HInvoke {
+class HInvokeCustom final : public HInvoke {
  public:
   HInvokeCustom(ArenaAllocator* allocator,
                 uint32_t number_of_arguments,
@@ -4418,7 +4418,7 @@
 
   uint32_t GetCallSiteIndex() const { return call_site_index_; }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   DECLARE_INSTRUCTION(InvokeCustom);
 
@@ -4429,7 +4429,7 @@
   uint32_t call_site_index_;
 };
 
-class HInvokeStaticOrDirect FINAL : public HInvoke {
+class HInvokeStaticOrDirect final : public HInvoke {
  public:
   // Requirements of this method call regarding the class
   // initialization (clinit) check of its declaring class.
@@ -4518,7 +4518,7 @@
     SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetDispatchInfo(const DispatchInfo& dispatch_info) {
     bool had_current_method_input = HasCurrentMethodInput();
@@ -4548,7 +4548,7 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
     ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
     if (kIsDebugBuild && IsStaticWithExplicitClinitCheck()) {
       DCHECK(!input_records.empty());
@@ -4566,13 +4566,13 @@
     return input_records;
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // We access the method via the dex cache so we can't do an implicit null check.
     // TODO: for intrinsics we can generate implicit null checks.
     return false;
   }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     return GetType() == DataType::Type::kReference && !IsStringInit();
   }
 
@@ -4587,7 +4587,7 @@
   MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
   CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
   bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
+  bool NeedsDexCacheOfDeclaringClass() const override;
   bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
   bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
   bool HasPcRelativeMethodLoadKind() const {
@@ -4688,7 +4688,7 @@
 std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
 std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
 
-class HInvokeVirtual FINAL : public HInvoke {
+class HInvokeVirtual final : public HInvoke {
  public:
   HInvokeVirtual(ArenaAllocator* allocator,
                  uint32_t number_of_arguments,
@@ -4709,9 +4709,9 @@
         vtable_index_(vtable_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     switch (GetIntrinsic()) {
       case Intrinsics::kThreadCurrentThread:
       case Intrinsics::kStringBufferAppend:
@@ -4724,7 +4724,7 @@
     }
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     // TODO: Add implicit null checks in intrinsics.
     return (obj == InputAt(0)) && !IsIntrinsic();
   }
@@ -4741,7 +4741,7 @@
   const uint32_t vtable_index_;
 };
 
-class HInvokeInterface FINAL : public HInvoke {
+class HInvokeInterface final : public HInvoke {
  public:
   HInvokeInterface(ArenaAllocator* allocator,
                    uint32_t number_of_arguments,
@@ -4762,14 +4762,14 @@
         imt_index_(imt_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     // TODO: Add implicit null checks in intrinsics.
     return (obj == InputAt(0)) && !IsIntrinsic();
   }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     // The assembly stub currently needs it.
     return true;
   }
@@ -4786,7 +4786,7 @@
   const uint32_t imt_index_;
 };
 
-class HNeg FINAL : public HUnaryOperation {
+class HNeg final : public HUnaryOperation {
  public:
   HNeg(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kNeg, result_type, input, dex_pc) {
@@ -4795,16 +4795,16 @@
 
   template <typename T> static T Compute(T x) { return -x; }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc());
   }
 
@@ -4814,7 +4814,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Neg);
 };
 
-class HNewArray FINAL : public HExpression<2> {
+class HNewArray final : public HExpression<2> {
  public:
   HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
       : HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
@@ -4822,15 +4822,15 @@
     SetRawInputAt(1, length);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   // Calls runtime so needs an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
   // May throw NegativeArraySizeException, OutOfMemoryError, etc.
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   HLoadClass* GetLoadClass() const {
     DCHECK(InputAt(0)->IsLoadClass());
@@ -4847,7 +4847,7 @@
   DEFAULT_COPY_CONSTRUCTOR(NewArray);
 };
 
-class HAdd FINAL : public HBinaryOperation {
+class HAdd final : public HBinaryOperation {
  public:
   HAdd(DataType::Type result_type,
        HInstruction* left,
@@ -4856,23 +4856,23 @@
       : HBinaryOperation(kAdd, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x + y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4883,7 +4883,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Add);
 };
 
-class HSub FINAL : public HBinaryOperation {
+class HSub final : public HBinaryOperation {
  public:
   HSub(DataType::Type result_type,
        HInstruction* left,
@@ -4894,19 +4894,19 @@
 
   template <typename T> static T Compute(T x, T y) { return x - y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4917,7 +4917,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Sub);
 };
 
-class HMul FINAL : public HBinaryOperation {
+class HMul final : public HBinaryOperation {
  public:
   HMul(DataType::Type result_type,
        HInstruction* left,
@@ -4926,23 +4926,23 @@
       : HBinaryOperation(kMul, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x * y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -4953,7 +4953,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Mul);
 };
 
-class HDiv FINAL : public HBinaryOperation {
+class HDiv final : public HBinaryOperation {
  public:
   HDiv(DataType::Type result_type,
        HInstruction* left,
@@ -4978,19 +4978,19 @@
     return x / y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -5001,7 +5001,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Div);
 };
 
-class HRem FINAL : public HBinaryOperation {
+class HRem final : public HBinaryOperation {
  public:
   HRem(DataType::Type result_type,
        HInstruction* left,
@@ -5026,19 +5026,19 @@
     return std::fmod(x, y);
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
   }
@@ -5049,7 +5049,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Rem);
 };
 
-class HMin FINAL : public HBinaryOperation {
+class HMin final : public HBinaryOperation {
  public:
   HMin(DataType::Type result_type,
        HInstruction* left,
@@ -5057,26 +5057,26 @@
        uint32_t dex_pc)
       : HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   // Evaluation for integral values.
   template <typename T> static T ComputeIntegral(T x, T y) {
     return (x <= y) ? x : y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // TODO: Evaluation for floating-point values.
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
 
   DECLARE_INSTRUCTION(Min);
 
@@ -5084,7 +5084,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Min);
 };
 
-class HMax FINAL : public HBinaryOperation {
+class HMax final : public HBinaryOperation {
  public:
   HMax(DataType::Type result_type,
        HInstruction* left,
@@ -5092,26 +5092,26 @@
        uint32_t dex_pc)
       : HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   // Evaluation for integral values.
   template <typename T> static T ComputeIntegral(T x, T y) {
     return (x >= y) ? x : y;
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
   }
   // TODO: Evaluation for floating-point values.
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override { return nullptr; }
 
   DECLARE_INSTRUCTION(Max);
 
@@ -5119,7 +5119,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Max);
 };
 
-class HAbs FINAL : public HUnaryOperation {
+class HAbs final : public HUnaryOperation {
  public:
   HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kAbs, result_type, input, dex_pc) {}
@@ -5139,17 +5139,17 @@
     return bit_cast<T, S>(bits & std::numeric_limits<S>::max());
   }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(ComputeIntegral(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(ComputeIntegral(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x) const override {
     return GetBlock()->GetGraph()->GetFloatConstant(
         ComputeFP<float, int32_t>(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x) const override {
     return GetBlock()->GetGraph()->GetDoubleConstant(
         ComputeFP<double, int64_t>(x->GetValue()), GetDexPc());
   }
@@ -5160,7 +5160,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Abs);
 };
 
-class HDivZeroCheck FINAL : public HExpression<1> {
+class HDivZeroCheck final : public HExpression<1> {
  public:
   // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
   // constructor.
@@ -5169,15 +5169,15 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DECLARE_INSTRUCTION(DivZeroCheck);
 
@@ -5185,7 +5185,7 @@
   DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
 };
 
-class HShl FINAL : public HBinaryOperation {
+class HShl final : public HBinaryOperation {
  public:
   HShl(DataType::Type result_type,
        HInstruction* value,
@@ -5201,26 +5201,26 @@
     return value << (distance & max_shift_distance);
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5231,7 +5231,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Shl);
 };
 
-class HShr FINAL : public HBinaryOperation {
+class HShr final : public HBinaryOperation {
  public:
   HShr(DataType::Type result_type,
        HInstruction* value,
@@ -5247,26 +5247,26 @@
     return value >> (distance & max_shift_distance);
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5277,7 +5277,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Shr);
 };
 
-class HUShr FINAL : public HBinaryOperation {
+class HUShr final : public HBinaryOperation {
  public:
   HUShr(DataType::Type result_type,
         HInstruction* value,
@@ -5295,26 +5295,26 @@
     return static_cast<T>(ux >> (distance & max_shift_distance));
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5325,7 +5325,7 @@
   DEFAULT_COPY_CONSTRUCTOR(UShr);
 };
 
-class HAnd FINAL : public HBinaryOperation {
+class HAnd final : public HBinaryOperation {
  public:
   HAnd(DataType::Type result_type,
        HInstruction* left,
@@ -5334,25 +5334,25 @@
       : HBinaryOperation(kAnd, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x & y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5363,7 +5363,7 @@
   DEFAULT_COPY_CONSTRUCTOR(And);
 };
 
-class HOr FINAL : public HBinaryOperation {
+class HOr final : public HBinaryOperation {
  public:
   HOr(DataType::Type result_type,
       HInstruction* left,
@@ -5372,25 +5372,25 @@
       : HBinaryOperation(kOr, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x | y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5401,7 +5401,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Or);
 };
 
-class HXor FINAL : public HBinaryOperation {
+class HXor final : public HBinaryOperation {
  public:
   HXor(DataType::Type result_type,
        HInstruction* left,
@@ -5410,25 +5410,25 @@
       : HBinaryOperation(kXor, result_type, left, right, SideEffects::None(), dex_pc) {
   }
 
-  bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const override { return true; }
 
   template <typename T> static T Compute(T x, T y) { return x ^ y; }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5439,7 +5439,7 @@
   DEFAULT_COPY_CONSTRUCTOR(Xor);
 };
 
-class HRor FINAL : public HBinaryOperation {
+class HRor final : public HBinaryOperation {
  public:
   HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
       : HBinaryOperation(kRor, result_type, value, distance) {
@@ -5460,26 +5460,26 @@
     }
   }
 
-  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxIntShiftDistance), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* value, HIntConstant* distance) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(value->GetValue(), distance->GetValue(), kMaxLongShiftDistance), GetDexPc());
   }
   HConstant* Evaluate(HLongConstant* value ATTRIBUTE_UNUSED,
-                      HLongConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HLongConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for the (long, long) case.";
     UNREACHABLE();
   }
   HConstant* Evaluate(HFloatConstant* value ATTRIBUTE_UNUSED,
-                      HFloatConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* value ATTRIBUTE_UNUSED,
-                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* distance ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5492,7 +5492,7 @@
 
 // The value of a parameter in this method. Its location depends on
 // the calling convention.
-class HParameterValue FINAL : public HExpression<0> {
+class HParameterValue final : public HExpression<0> {
  public:
   HParameterValue(const DexFile& dex_file,
                   dex::TypeIndex type_index,
@@ -5512,7 +5512,7 @@
   uint8_t GetIndex() const { return index_; }
   bool IsThis() const { return GetPackedFlag<kFlagIsThis>(); }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
   void SetCanBeNull(bool can_be_null) { SetPackedFlag<kFlagCanBeNull>(can_be_null); }
 
   DECLARE_INSTRUCTION(ParameterValue);
@@ -5535,30 +5535,30 @@
   const uint8_t index_;
 };
 
-class HNot FINAL : public HUnaryOperation {
+class HNot final : public HUnaryOperation {
  public:
   HNot(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kNot, result_type, input, dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
   template <typename T> static T Compute(T x) { return ~x; }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x) const override {
     return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5569,14 +5569,14 @@
   DEFAULT_COPY_CONSTRUCTOR(Not);
 };
 
-class HBooleanNot FINAL : public HUnaryOperation {
+class HBooleanNot final : public HUnaryOperation {
  public:
   explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HUnaryOperation(kBooleanNot, DataType::Type::kBool, input, dex_pc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -5585,18 +5585,18 @@
     return !x;
   }
 
-  HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x) const override {
     return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for long values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
-  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -5607,7 +5607,7 @@
   DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
 };
 
-class HTypeConversion FINAL : public HExpression<1> {
+class HTypeConversion final : public HExpression<1> {
  public:
   // Instantiate a type conversion of `input` to `result_type`.
   HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
@@ -5621,9 +5621,9 @@
   DataType::Type GetInputType() const { return GetInput()->GetType(); }
   DataType::Type GetResultType() const { return GetType(); }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
@@ -5639,7 +5639,7 @@
 
 static constexpr uint32_t kNoRegNumber = -1;
 
-class HNullCheck FINAL : public HExpression<1> {
+class HNullCheck final : public HExpression<1> {
  public:
   // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
   // constructor.
@@ -5648,17 +5648,17 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   DECLARE_INSTRUCTION(NullCheck);
 
@@ -5703,7 +5703,7 @@
   const DexFile& dex_file_;
 };
 
-class HInstanceFieldGet FINAL : public HExpression<1> {
+class HInstanceFieldGet final : public HExpression<1> {
  public:
   HInstanceFieldGet(HInstruction* value,
                     ArtField* field,
@@ -5728,19 +5728,19 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return !IsVolatile(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     const HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
     return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
   }
 
-  size_t ComputeHashCode() const OVERRIDE {
+  size_t ComputeHashCode() const override {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
@@ -5765,7 +5765,7 @@
   const FieldInfo field_info_;
 };
 
-class HInstanceFieldSet FINAL : public HExpression<2> {
+class HInstanceFieldSet final : public HExpression<2> {
  public:
   HInstanceFieldSet(HInstruction* object,
                     HInstruction* value,
@@ -5792,9 +5792,9 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
   }
 
@@ -5820,7 +5820,7 @@
   const FieldInfo field_info_;
 };
 
-class HArrayGet FINAL : public HExpression<2> {
+class HArrayGet final : public HExpression<2> {
  public:
   HArrayGet(HInstruction* array,
             HInstruction* index,
@@ -5846,12 +5846,12 @@
     SetRawInputAt(1, index);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // TODO: We can be smarter here.
     // Currently, unless the array is the result of NewArray, the array access is always
     // preceded by some form of null NullCheck necessary for the bounds check, usually
@@ -5911,7 +5911,7 @@
                 "Too many packed fields.");
 };
 
-class HArraySet FINAL : public HExpression<3> {
+class HArraySet final : public HExpression<3> {
  public:
   HArraySet(HInstruction* array,
             HInstruction* index,
@@ -5943,17 +5943,17 @@
     SetRawInputAt(2, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     // We call a runtime method to throw ArrayStoreException.
     return NeedsTypeCheck();
   }
 
   // Can throw ArrayStoreException.
-  bool CanThrow() const OVERRIDE { return NeedsTypeCheck(); }
+  bool CanThrow() const override { return NeedsTypeCheck(); }
 
-  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
     // TODO: Same as for ArrayGet.
     return false;
   }
@@ -6030,7 +6030,7 @@
       BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
 };
 
-class HArrayLength FINAL : public HExpression<1> {
+class HArrayLength final : public HExpression<1> {
  public:
   HArrayLength(HInstruction* array, uint32_t dex_pc, bool is_string_length = false)
       : HExpression(kArrayLength, DataType::Type::kInt32, SideEffects::None(), dex_pc) {
@@ -6040,12 +6040,12 @@
     SetRawInputAt(0, array);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+  bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
     return obj == InputAt(0);
   }
 
@@ -6068,7 +6068,7 @@
                 "Too many packed fields.");
 };
 
-class HBoundsCheck FINAL : public HExpression<2> {
+class HBoundsCheck final : public HExpression<2> {
  public:
   // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
   // constructor.
@@ -6083,15 +6083,15 @@
     SetRawInputAt(1, length);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
 
@@ -6106,16 +6106,16 @@
   static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
 };
 
-class HSuspendCheck FINAL : public HExpression<0> {
+class HSuspendCheck final : public HExpression<0> {
  public:
   explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
       : HExpression(kSuspendCheck, SideEffects::CanTriggerGC(), dex_pc),
         slow_path_(nullptr) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return true;
   }
 
@@ -6141,7 +6141,7 @@
       : HExpression<0>(kNativeDebugInfo, SideEffects::None(), dex_pc) {
   }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return true;
   }
 
@@ -6154,7 +6154,7 @@
 /**
  * Instruction to load a Class object.
  */
-class HLoadClass FINAL : public HInstruction {
+class HLoadClass final : public HInstruction {
  public:
   // Determines how to load the Class.
   enum class LoadKind {
@@ -6217,7 +6217,7 @@
     SetPackedFlag<kFlagValidLoadedClassRTI>(false);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetLoadKind(LoadKind load_kind);
 
@@ -6231,15 +6231,15 @@
            GetLoadKind() == LoadKind::kBssEntry;
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   bool InstructionDataEquals(const HInstruction* other) const;
 
-  size_t ComputeHashCode() const OVERRIDE { return type_index_.index_; }
+  size_t ComputeHashCode() const override { return type_index_.index_; }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return CanCallRuntime();
   }
 
@@ -6257,7 +6257,7 @@
            GetLoadKind() == LoadKind::kBssEntry;
   }
 
-  bool CanThrow() const OVERRIDE {
+  bool CanThrow() const override {
     return NeedsAccessCheck() ||
            MustGenerateClinitCheck() ||
            // If the class is in the boot image, the lookup in the runtime call cannot throw.
@@ -6284,7 +6284,7 @@
   dex::TypeIndex GetTypeIndex() const { return type_index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     return GetLoadKind() == LoadKind::kRuntimeCall;
   }
 
@@ -6311,7 +6311,7 @@
   void AddSpecialInput(HInstruction* special_input);
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
@@ -6392,7 +6392,7 @@
   special_input->AddUseAt(this, 0);
 }
 
-class HLoadString FINAL : public HInstruction {
+class HLoadString final : public HInstruction {
  public:
   // Determines how to load the String.
   enum class LoadKind {
@@ -6436,7 +6436,7 @@
     SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   void SetLoadKind(LoadKind load_kind);
 
@@ -6466,15 +6466,15 @@
     string_ = str;
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
+  bool InstructionDataEquals(const HInstruction* other) const override;
 
-  size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
+  size_t ComputeHashCode() const override { return string_index_.index_; }
 
   // Will call the runtime if we need to load the string through
   // the dex cache and the string is not guaranteed to be there yet.
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     LoadKind load_kind = GetLoadKind();
     if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
         load_kind == LoadKind::kBootImageRelRo ||
@@ -6485,12 +6485,12 @@
     return true;
   }
 
-  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+  bool NeedsDexCacheOfDeclaringClass() const override {
     return GetLoadKind() == LoadKind::kRuntimeCall;
   }
 
-  bool CanBeNull() const OVERRIDE { return false; }
-  bool CanThrow() const OVERRIDE { return NeedsEnvironment(); }
+  bool CanBeNull() const override { return false; }
+  bool CanThrow() const override { return NeedsEnvironment(); }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
     return SideEffects::CanTriggerGC();
@@ -6499,7 +6499,7 @@
   void AddSpecialInput(HInstruction* special_input);
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
@@ -6561,7 +6561,7 @@
   special_input->AddUseAt(this, 0);
 }
 
-class HLoadMethodHandle FINAL : public HInstruction {
+class HLoadMethodHandle final : public HInstruction {
  public:
   HLoadMethodHandle(HCurrentMethod* current_method,
                     uint16_t method_handle_idx,
@@ -6577,12 +6577,12 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
 
@@ -6605,7 +6605,7 @@
   const DexFile& dex_file_;
 };
 
-class HLoadMethodType FINAL : public HInstruction {
+class HLoadMethodType final : public HInstruction {
  public:
   HLoadMethodType(HCurrentMethod* current_method,
                   dex::ProtoIndex proto_index,
@@ -6621,12 +6621,12 @@
   }
 
   using HInstruction::GetInputRecords;  // Keep the const version visible.
-  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override final {
     return ArrayRef<HUserRecord<HInstruction*>>(
         &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
 
@@ -6652,7 +6652,7 @@
 /**
  * Performs an initialization check on its Class object input.
  */
-class HClinitCheck FINAL : public HExpression<1> {
+class HClinitCheck final : public HExpression<1> {
  public:
   HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
       : HExpression(
@@ -6663,17 +6663,17 @@
     SetRawInputAt(0, constant);
   }
   // TODO: Make ClinitCheck clonable.
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     // May call runtime to initialize the class.
     return true;
   }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   HLoadClass* GetLoadClass() const {
     DCHECK(InputAt(0)->IsLoadClass());
@@ -6687,7 +6687,7 @@
   DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
 };
 
-class HStaticFieldGet FINAL : public HExpression<1> {
+class HStaticFieldGet final : public HExpression<1> {
  public:
   HStaticFieldGet(HInstruction* cls,
                   ArtField* field,
@@ -6713,15 +6713,15 @@
   }
 
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return !IsVolatile(); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     const HStaticFieldGet* other_get = other->AsStaticFieldGet();
     return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
-  size_t ComputeHashCode() const OVERRIDE {
+  size_t ComputeHashCode() const override {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
@@ -6746,7 +6746,7 @@
   const FieldInfo field_info_;
 };
 
-class HStaticFieldSet FINAL : public HExpression<2> {
+class HStaticFieldSet final : public HExpression<2> {
  public:
   HStaticFieldSet(HInstruction* cls,
                   HInstruction* value,
@@ -6773,7 +6773,7 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
   const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6797,7 +6797,7 @@
   const FieldInfo field_info_;
 };
 
-class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
+class HUnresolvedInstanceFieldGet final : public HExpression<1> {
  public:
   HUnresolvedInstanceFieldGet(HInstruction* obj,
                               DataType::Type field_type,
@@ -6811,9 +6811,9 @@
     SetRawInputAt(0, obj);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetType(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6827,7 +6827,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedInstanceFieldSet FINAL : public HExpression<2> {
+class HUnresolvedInstanceFieldSet final : public HExpression<2> {
  public:
   HUnresolvedInstanceFieldSet(HInstruction* obj,
                               HInstruction* value,
@@ -6842,9 +6842,9 @@
     SetRawInputAt(1, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6867,7 +6867,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
+class HUnresolvedStaticFieldGet final : public HExpression<0> {
  public:
   HUnresolvedStaticFieldGet(DataType::Type field_type,
                             uint32_t field_index,
@@ -6879,9 +6879,9 @@
         field_index_(field_index) {
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetType(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6895,7 +6895,7 @@
   const uint32_t field_index_;
 };
 
-class HUnresolvedStaticFieldSet FINAL : public HExpression<1> {
+class HUnresolvedStaticFieldSet final : public HExpression<1> {
  public:
   HUnresolvedStaticFieldSet(HInstruction* value,
                             DataType::Type field_type,
@@ -6908,9 +6908,9 @@
     SetRawInputAt(0, value);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE { return true; }
-  bool CanThrow() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override { return true; }
+  bool CanThrow() const override { return true; }
 
   DataType::Type GetFieldType() const { return GetPackedField<FieldTypeField>(); }
   uint32_t GetFieldIndex() const { return field_index_; }
@@ -6934,13 +6934,13 @@
 };
 
 // Implement the move-exception DEX instruction.
-class HLoadException FINAL : public HExpression<0> {
+class HLoadException final : public HExpression<0> {
  public:
   explicit HLoadException(uint32_t dex_pc = kNoDexPc)
       : HExpression(kLoadException, DataType::Type::kReference, SideEffects::None(), dex_pc) {
   }
 
-  bool CanBeNull() const OVERRIDE { return false; }
+  bool CanBeNull() const override { return false; }
 
   DECLARE_INSTRUCTION(LoadException);
 
@@ -6950,7 +6950,7 @@
 
 // Implicit part of move-exception which clears thread-local exception storage.
 // Must not be removed because the runtime expects the TLS to get cleared.
-class HClearException FINAL : public HExpression<0> {
+class HClearException final : public HExpression<0> {
  public:
   explicit HClearException(uint32_t dex_pc = kNoDexPc)
       : HExpression(kClearException, SideEffects::AllWrites(), dex_pc) {
@@ -6962,20 +6962,20 @@
   DEFAULT_COPY_CONSTRUCTOR(ClearException);
 };
 
-class HThrow FINAL : public HExpression<1> {
+class HThrow final : public HExpression<1> {
  public:
   HThrow(HInstruction* exception, uint32_t dex_pc)
       : HExpression(kThrow, SideEffects::CanTriggerGC(), dex_pc) {
     SetRawInputAt(0, exception);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
-  bool AlwaysThrows() const OVERRIDE { return true; }
+  bool AlwaysThrows() const override { return true; }
 
   DECLARE_INSTRUCTION(Throw);
 
@@ -7062,10 +7062,10 @@
     return static_cast<uint32_t>(mask->AsIntConstant()->GetValue());
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsInstanceOf() || other->IsCheckCast()) << other->DebugName();
     return GetPackedFields() == down_cast<const HTypeCheckInstruction*>(other)->GetPackedFields();
   }
@@ -7110,7 +7110,7 @@
   Handle<mirror::Class> klass_;
 };
 
-class HInstanceOf FINAL : public HTypeCheckInstruction {
+class HInstanceOf final : public HTypeCheckInstruction {
  public:
   HInstanceOf(HInstruction* object,
               HInstruction* target_class_or_null,
@@ -7132,9 +7132,9 @@
                               bitstring_mask,
                               SideEffectsForArchRuntimeCalls(check_kind)) {}
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
-  bool NeedsEnvironment() const OVERRIDE {
+  bool NeedsEnvironment() const override {
     return CanCallRuntime(GetTypeCheckKind());
   }
 
@@ -7153,7 +7153,7 @@
   DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
 };
 
-class HBoundType FINAL : public HExpression<1> {
+class HBoundType final : public HExpression<1> {
  public:
   explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
       : HExpression(kBoundType, DataType::Type::kReference, SideEffects::None(), dex_pc),
@@ -7164,8 +7164,8 @@
     SetRawInputAt(0, input);
   }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
-  bool IsClonable() const OVERRIDE { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override;
+  bool IsClonable() const override { return true; }
 
   // {Get,Set}Upper* should only be used in reference type propagation.
   const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
@@ -7177,7 +7177,7 @@
     SetPackedFlag<kFlagCanBeNull>(can_be_null);
   }
 
-  bool CanBeNull() const OVERRIDE { return GetPackedFlag<kFlagCanBeNull>(); }
+  bool CanBeNull() const override { return GetPackedFlag<kFlagCanBeNull>(); }
 
   DECLARE_INSTRUCTION(BoundType);
 
@@ -7201,7 +7201,7 @@
   ReferenceTypeInfo upper_bound_;
 };
 
-class HCheckCast FINAL : public HTypeCheckInstruction {
+class HCheckCast final : public HTypeCheckInstruction {
  public:
   HCheckCast(HInstruction* object,
              HInstruction* target_class_or_null,
@@ -7223,13 +7223,13 @@
                               bitstring_mask,
                               SideEffects::CanTriggerGC()) {}
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool NeedsEnvironment() const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool NeedsEnvironment() const override {
     // Instruction may throw a CheckCastError.
     return true;
   }
 
-  bool CanThrow() const OVERRIDE { return true; }
+  bool CanThrow() const override { return true; }
 
   DECLARE_INSTRUCTION(CheckCast);
 
@@ -7263,7 +7263,7 @@
 };
 std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
 
-class HMemoryBarrier FINAL : public HExpression<0> {
+class HMemoryBarrier final : public HExpression<0> {
  public:
   explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
       : HExpression(kMemoryBarrier,
@@ -7272,7 +7272,7 @@
     SetPackedField<BarrierKindField>(barrier_kind);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
 
@@ -7348,7 +7348,7 @@
 // * CompilerDriver::RequiresConstructorBarrier
 // * QuasiAtomic::ThreadFenceForConstructor
 //
-class HConstructorFence FINAL : public HVariableInputSizeInstruction {
+class HConstructorFence final : public HVariableInputSizeInstruction {
                                   // A fence has variable inputs because the inputs can be removed
                                   // after prepare_for_register_allocation phase.
                                   // (TODO: In the future a fence could freeze multiple objects
@@ -7445,7 +7445,7 @@
   DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
 };
 
-class HMonitorOperation FINAL : public HExpression<1> {
+class HMonitorOperation final : public HExpression<1> {
  public:
   enum class OperationKind {
     kEnter,
@@ -7462,9 +7462,9 @@
   }
 
   // Instruction may go into runtime, so we need an environment.
-  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool NeedsEnvironment() const override { return true; }
 
-  bool CanThrow() const OVERRIDE {
+  bool CanThrow() const override {
     // Verifier guarantees that monitor-exit cannot throw.
     // This is important because it allows the HGraphBuilder to remove
     // a dead throw-catch loop generated for `synchronized` blocks/methods.
@@ -7490,7 +7490,7 @@
   using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
 };
 
-class HSelect FINAL : public HExpression<3> {
+class HSelect final : public HExpression<3> {
  public:
   HSelect(HInstruction* condition,
           HInstruction* true_value,
@@ -7508,17 +7508,17 @@
     SetRawInputAt(2, condition);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
   HInstruction* GetFalseValue() const { return InputAt(0); }
   HInstruction* GetTrueValue() const { return InputAt(1); }
   HInstruction* GetCondition() const { return InputAt(2); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
 
-  bool CanBeNull() const OVERRIDE {
+  bool CanBeNull() const override {
     return GetTrueValue()->CanBeNull() || GetFalseValue()->CanBeNull();
   }
 
@@ -7606,7 +7606,7 @@
 
 static constexpr size_t kDefaultNumberOfMoves = 4;
 
-class HParallelMove FINAL : public HExpression<0> {
+class HParallelMove final : public HExpression<0> {
  public:
   explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
       : HExpression(kParallelMove, SideEffects::None(), dex_pc),
@@ -7668,7 +7668,7 @@
 // never used across anything that can trigger GC.
 // The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
 // So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
+class HIntermediateAddress final : public HExpression<2> {
  public:
   HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
       : HExpression(kIntermediateAddress,
@@ -7682,12 +7682,12 @@
     SetRawInputAt(1, offset);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetBaseAddress() const { return InputAt(0); }
   HInstruction* GetOffset() const { return InputAt(1); }
@@ -7760,7 +7760,7 @@
 
   // Visit functions that delegate to to super class.
 #define DECLARE_VISIT_INSTRUCTION(name, super)                                        \
-  void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+  void Visit##name(H##name* instr) override { Visit##super(instr); }
 
   FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -7782,7 +7782,7 @@
   explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
       : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     if (instruction->IsClonable()) {
       ReplaceInstrOrPhiByClone(instruction);
       instr_replaced_by_clones_count_++;
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 05b27a7..4993f57 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,7 +30,7 @@
                     kNoDexPc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
 
@@ -39,7 +39,7 @@
 };
 
 // Mips version of HPackedSwitch that holds a pointer to the base method address.
-class HMipsPackedSwitch FINAL : public HExpression<2> {
+class HMipsPackedSwitch final : public HExpression<2> {
  public:
   HMipsPackedSwitch(int32_t start_value,
                     int32_t num_entries,
@@ -53,7 +53,7 @@
     SetRawInputAt(1, method_base);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
@@ -91,7 +91,7 @@
 //
 // Note: as the instruction doesn't involve base array address into computations it has no side
 // effects.
-class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
+class HIntermediateArrayAddressIndex final : public HExpression<2> {
  public:
   HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
       : HExpression(kIntermediateArrayAddressIndex,
@@ -102,11 +102,11 @@
     SetRawInputAt(1, shift);
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetIndex() const { return InputAt(0); }
   HInstruction* GetShift() const { return InputAt(1); }
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 29358e1..7dcac17 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -24,7 +24,7 @@
 
 namespace art {
 
-class HMultiplyAccumulate FINAL : public HExpression<3> {
+class HMultiplyAccumulate final : public HExpression<3> {
  public:
   HMultiplyAccumulate(DataType::Type type,
                       InstructionKind op,
@@ -39,14 +39,14 @@
     SetRawInputAt(kInputMulRightIndex, mul_right);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
+  bool IsClonable() const override { return true; }
 
   static constexpr int kInputAccumulatorIndex = 0;
   static constexpr int kInputMulLeftIndex = 1;
   static constexpr int kInputMulRightIndex = 2;
 
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other) const override {
     return op_kind_ == other->AsMultiplyAccumulate()->op_kind_;
   }
 
@@ -62,7 +62,7 @@
   const InstructionKind op_kind_;
 };
 
-class HBitwiseNegatedRight FINAL : public HBinaryOperation {
+class HBitwiseNegatedRight final : public HBinaryOperation {
  public:
   HBitwiseNegatedRight(DataType::Type result_type,
                        InstructionKind op,
@@ -97,21 +97,21 @@
     }
   }
 
-  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
     return GetBlock()->GetGraph()->GetIntConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
-  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
     return GetBlock()->GetGraph()->GetLongConstant(
         Compute(x->GetValue(), y->GetValue()), GetDexPc());
   }
   HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
-                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for float values";
     UNREACHABLE();
   }
   HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
-                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
     LOG(FATAL) << DebugName() << " is not defined for double values";
     UNREACHABLE();
   }
@@ -145,7 +145,7 @@
 //
 // Note: as the instruction doesn't involve base array address into computations it has no side
 // effects (in comparison of HIntermediateAddress).
-class HIntermediateAddressIndex FINAL : public HExpression<3> {
+class HIntermediateAddressIndex final : public HExpression<3> {
  public:
   HIntermediateAddressIndex(
       HInstruction* index, HInstruction* offset, HInstruction* shift, uint32_t dex_pc)
@@ -158,12 +158,12 @@
     SetRawInputAt(2, shift);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
     return true;
   }
-  bool IsActualObject() const OVERRIDE { return false; }
+  bool IsActualObject() const override { return false; }
 
   HInstruction* GetIndex() const { return InputAt(0); }
   HInstruction* GetOffset() const { return InputAt(1); }
@@ -175,7 +175,7 @@
   DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
 };
 
-class HDataProcWithShifterOp FINAL : public HExpression<2> {
+class HDataProcWithShifterOp final : public HExpression<2> {
  public:
   enum OpKind {
     kLSL,   // Logical shift left.
@@ -212,9 +212,9 @@
     SetRawInputAt(1, right);
   }
 
-  bool IsClonable() const OVERRIDE { return true; }
-  bool CanBeMoved() const OVERRIDE { return true; }
-  bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
+  bool IsClonable() const override { return true; }
+  bool CanBeMoved() const override { return true; }
+  bool InstructionDataEquals(const HInstruction* other_instr) const override {
     const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
     return instr_kind_ == other->instr_kind_ &&
         op_kind_ == other->op_kind_ &&
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 95fb5ab..c7539f2 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -117,12 +117,12 @@
   // Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be
   // altered to return true if the instruction might reside outside the SIMD loop body since SIMD
   // registers are not kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   // Tests if all data of a vector node (vector length and packed type) is equal.
   // Each concrete implementation that adds more fields should test equality of
   // those fields in its own method *and* call all super methods.
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecOperation());
     const HVecOperation* o = other->AsVecOperation();
     return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
@@ -280,7 +280,7 @@
   HInstruction* GetArray() const { return InputAt(0); }
   HInstruction* GetIndex() const { return InputAt(1); }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecMemoryOperation());
     const HVecMemoryOperation* o = other->AsVecMemoryOperation();
     return HVecOperation::InstructionDataEquals(o) && GetAlignment() == o->GetAlignment();
@@ -315,7 +315,7 @@
 
 // Replicates the given scalar into a vector,
 // viz. replicate(x) = [ x, .. , x ].
-class HVecReplicateScalar FINAL : public HVecUnaryOperation {
+class HVecReplicateScalar final : public HVecUnaryOperation {
  public:
   HVecReplicateScalar(ArenaAllocator* allocator,
                       HInstruction* scalar,
@@ -329,7 +329,7 @@
 
   // A replicate needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecReplicateScalar);
 
@@ -341,7 +341,7 @@
 // viz. extract[ x1, .. , xn ] = x_i.
 //
 // TODO: for now only i == 1 case supported.
-class HVecExtractScalar FINAL : public HVecUnaryOperation {
+class HVecExtractScalar final : public HVecUnaryOperation {
  public:
   HVecExtractScalar(ArenaAllocator* allocator,
                     HInstruction* input,
@@ -361,7 +361,7 @@
 
   // An extract needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecExtractScalar);
 
@@ -372,7 +372,7 @@
 // Reduces the given vector into the first element as sum/min/max,
 // viz. sum-reduce[ x1, .. , xn ] = [ y, ---- ], where y = sum xi
 // and the "-" denotes "don't care" (implementation dependent).
-class HVecReduce FINAL : public HVecUnaryOperation {
+class HVecReduce final : public HVecUnaryOperation {
  public:
   enum ReductionKind {
     kSum = 1,
@@ -393,9 +393,9 @@
 
   ReductionKind GetKind() const { return kind_; }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecReduce());
     const HVecReduce* o = other->AsVecReduce();
     return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
@@ -412,7 +412,7 @@
 
 // Converts every component in the vector,
 // viz. cnv[ x1, .. , xn ]  = [ cnv(x1), .. , cnv(xn) ].
-class HVecCnv FINAL : public HVecUnaryOperation {
+class HVecCnv final : public HVecUnaryOperation {
  public:
   HVecCnv(ArenaAllocator* allocator,
           HInstruction* input,
@@ -427,7 +427,7 @@
   DataType::Type GetInputType() const { return InputAt(0)->AsVecOperation()->GetPackedType(); }
   DataType::Type GetResultType() const { return GetPackedType(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecCnv);
 
@@ -437,7 +437,7 @@
 
 // Negates every component in the vector,
 // viz. neg[ x1, .. , xn ]  = [ -x1, .. , -xn ].
-class HVecNeg FINAL : public HVecUnaryOperation {
+class HVecNeg final : public HVecUnaryOperation {
  public:
   HVecNeg(ArenaAllocator* allocator,
           HInstruction* input,
@@ -448,7 +448,7 @@
     DCHECK(HasConsistentPackedTypes(input, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecNeg);
 
@@ -459,7 +459,7 @@
 // Takes absolute value of every component in the vector,
 // viz. abs[ x1, .. , xn ]  = [ |x1|, .. , |xn| ]
 // for signed operand x.
-class HVecAbs FINAL : public HVecUnaryOperation {
+class HVecAbs final : public HVecUnaryOperation {
  public:
   HVecAbs(ArenaAllocator* allocator,
           HInstruction* input,
@@ -470,7 +470,7 @@
     DCHECK(HasConsistentPackedTypes(input, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAbs);
 
@@ -481,7 +481,7 @@
 // Bitwise- or boolean-nots every component in the vector,
 // viz. not[ x1, .. , xn ]  = [ ~x1, .. , ~xn ], or
 //      not[ x1, .. , xn ]  = [ !x1, .. , !xn ] for boolean.
-class HVecNot FINAL : public HVecUnaryOperation {
+class HVecNot final : public HVecUnaryOperation {
  public:
   HVecNot(ArenaAllocator* allocator,
           HInstruction* input,
@@ -492,7 +492,7 @@
     DCHECK(input->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecNot);
 
@@ -506,7 +506,7 @@
 
 // Adds every component in the two vectors,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
-class HVecAdd FINAL : public HVecBinaryOperation {
+class HVecAdd final : public HVecBinaryOperation {
  public:
   HVecAdd(ArenaAllocator* allocator,
           HInstruction* left,
@@ -519,7 +519,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAdd);
 
@@ -530,7 +530,7 @@
 // Adds every component in the two vectors using saturation arithmetic,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 +_sat y1, .. , xn +_sat yn ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationAdd FINAL : public HVecBinaryOperation {
+class HVecSaturationAdd final : public HVecBinaryOperation {
  public:
   HVecSaturationAdd(ArenaAllocator* allocator,
                     HInstruction* left,
@@ -544,7 +544,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSaturationAdd);
 
@@ -556,7 +556,7 @@
 // rounded   [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
 // truncated [ x1, .. , xn ] hadd  [ y1, .. , yn ] = [ (x1 + y1)     >> 1, .. , (xn + yn )    >> 1 ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecHalvingAdd FINAL : public HVecBinaryOperation {
+class HVecHalvingAdd final : public HVecBinaryOperation {
  public:
   HVecHalvingAdd(ArenaAllocator* allocator,
                  HInstruction* left,
@@ -574,9 +574,9 @@
 
   bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecHalvingAdd());
     const HVecHalvingAdd* o = other->AsVecHalvingAdd();
     return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
@@ -596,7 +596,7 @@
 
 // Subtracts every component in the two vectors,
 // viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
-class HVecSub FINAL : public HVecBinaryOperation {
+class HVecSub final : public HVecBinaryOperation {
  public:
   HVecSub(ArenaAllocator* allocator,
           HInstruction* left,
@@ -609,7 +609,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSub);
 
@@ -620,7 +620,7 @@
 // Subtracts every component in the two vectors using saturation arithmetic,
 // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 -_sat y1, .. , xn -_sat yn ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecSaturationSub FINAL : public HVecBinaryOperation {
+class HVecSaturationSub final : public HVecBinaryOperation {
  public:
   HVecSaturationSub(ArenaAllocator* allocator,
                     HInstruction* left,
@@ -634,7 +634,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecSaturationSub);
 
@@ -644,7 +644,7 @@
 
 // Multiplies every component in the two vectors,
 // viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
-class HVecMul FINAL : public HVecBinaryOperation {
+class HVecMul final : public HVecBinaryOperation {
  public:
   HVecMul(ArenaAllocator* allocator,
           HInstruction* left,
@@ -657,7 +657,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMul);
 
@@ -667,7 +667,7 @@
 
 // Divides every component in the two vectors,
 // viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
-class HVecDiv FINAL : public HVecBinaryOperation {
+class HVecDiv final : public HVecBinaryOperation {
  public:
   HVecDiv(ArenaAllocator* allocator,
           HInstruction* left,
@@ -680,7 +680,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecDiv);
 
@@ -691,7 +691,7 @@
 // Takes minimum of every component in the two vectors,
 // viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMin FINAL : public HVecBinaryOperation {
+class HVecMin final : public HVecBinaryOperation {
  public:
   HVecMin(ArenaAllocator* allocator,
           HInstruction* left,
@@ -704,7 +704,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMin);
 
@@ -715,7 +715,7 @@
 // Takes maximum of every component in the two vectors,
 // viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
 // for either both signed or both unsigned operands x, y (reflected in packed_type).
-class HVecMax FINAL : public HVecBinaryOperation {
+class HVecMax final : public HVecBinaryOperation {
  public:
   HVecMax(ArenaAllocator* allocator,
           HInstruction* left,
@@ -728,7 +728,7 @@
     DCHECK(HasConsistentPackedTypes(right, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecMax);
 
@@ -738,7 +738,7 @@
 
 // Bitwise-ands every component in the two vectors,
 // viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
-class HVecAnd FINAL : public HVecBinaryOperation {
+class HVecAnd final : public HVecBinaryOperation {
  public:
   HVecAnd(ArenaAllocator* allocator,
           HInstruction* left,
@@ -750,7 +750,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAnd);
 
@@ -760,7 +760,7 @@
 
 // Bitwise-and-nots every component in the two vectors,
 // viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
-class HVecAndNot FINAL : public HVecBinaryOperation {
+class HVecAndNot final : public HVecBinaryOperation {
  public:
   HVecAndNot(ArenaAllocator* allocator,
              HInstruction* left,
@@ -773,7 +773,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecAndNot);
 
@@ -783,7 +783,7 @@
 
 // Bitwise-ors every component in the two vectors,
 // viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
-class HVecOr FINAL : public HVecBinaryOperation {
+class HVecOr final : public HVecBinaryOperation {
  public:
   HVecOr(ArenaAllocator* allocator,
          HInstruction* left,
@@ -795,7 +795,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecOr);
 
@@ -805,7 +805,7 @@
 
 // Bitwise-xors every component in the two vectors,
 // viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
-class HVecXor FINAL : public HVecBinaryOperation {
+class HVecXor final : public HVecBinaryOperation {
  public:
   HVecXor(ArenaAllocator* allocator,
           HInstruction* left,
@@ -817,7 +817,7 @@
     DCHECK(left->IsVecOperation() && right->IsVecOperation());
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecXor);
 
@@ -827,7 +827,7 @@
 
 // Logically shifts every component in the vector left by the given distance,
 // viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
-class HVecShl FINAL : public HVecBinaryOperation {
+class HVecShl final : public HVecBinaryOperation {
  public:
   HVecShl(ArenaAllocator* allocator,
           HInstruction* left,
@@ -839,7 +839,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecShl);
 
@@ -849,7 +849,7 @@
 
 // Arithmetically shifts every component in the vector right by the given distance,
 // viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
-class HVecShr FINAL : public HVecBinaryOperation {
+class HVecShr final : public HVecBinaryOperation {
  public:
   HVecShr(ArenaAllocator* allocator,
           HInstruction* left,
@@ -861,7 +861,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecShr);
 
@@ -871,7 +871,7 @@
 
 // Logically shifts every component in the vector right by the given distance,
 // viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
-class HVecUShr FINAL : public HVecBinaryOperation {
+class HVecUShr final : public HVecBinaryOperation {
  public:
   HVecUShr(ArenaAllocator* allocator,
            HInstruction* left,
@@ -883,7 +883,7 @@
     DCHECK(HasConsistentPackedTypes(left, packed_type));
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(VecUShr);
 
@@ -898,7 +898,7 @@
 // Assigns the given scalar elements to a vector,
 // viz. set( array(x1, .. , xn) ) = [ x1, .. ,            xn ] if n == m,
 //      set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m <  n.
-class HVecSetScalars FINAL : public HVecOperation {
+class HVecSetScalars final : public HVecOperation {
  public:
   HVecSetScalars(ArenaAllocator* allocator,
                  HInstruction* scalars[],
@@ -921,7 +921,7 @@
 
   // Setting scalars needs to stay in place, since SIMD registers are not
   // kept alive across vector loop boundaries (yet).
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecSetScalars);
 
@@ -934,7 +934,7 @@
 // For floating point types, Java rounding behavior must be preserved; the products are rounded to
 // the proper precision before being added. "Fused" multiply-add operations available on several
 // architectures are not usable since they would violate Java language rules.
-class HVecMultiplyAccumulate FINAL : public HVecOperation {
+class HVecMultiplyAccumulate final : public HVecOperation {
  public:
   HVecMultiplyAccumulate(ArenaAllocator* allocator,
                          InstructionKind op,
@@ -964,9 +964,9 @@
     SetRawInputAt(2, mul_right);
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecMultiplyAccumulate());
     const HVecMultiplyAccumulate* o = other->AsVecMultiplyAccumulate();
     return HVecOperation::InstructionDataEquals(o) && GetOpKind() == o->GetOpKind();
@@ -989,7 +989,7 @@
 // viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) =
 //          [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ],
 //      for m <= n, non-overlapping sums, and signed operands x, y.
-class HVecSADAccumulate FINAL : public HVecOperation {
+class HVecSADAccumulate final : public HVecOperation {
  public:
   HVecSADAccumulate(ArenaAllocator* allocator,
                     HInstruction* accumulator,
@@ -1023,7 +1023,7 @@
 
 // Loads a vector from memory, viz. load(mem, 1)
 // yield the vector [ mem(1), .. , mem(n) ].
-class HVecLoad FINAL : public HVecMemoryOperation {
+class HVecLoad final : public HVecMemoryOperation {
  public:
   HVecLoad(ArenaAllocator* allocator,
            HInstruction* base,
@@ -1047,9 +1047,9 @@
 
   bool IsStringCharAt() const { return GetPackedFlag<kFieldIsStringCharAt>(); }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
-  bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
+  bool InstructionDataEquals(const HInstruction* other) const override {
     DCHECK(other->IsVecLoad());
     const HVecLoad* o = other->AsVecLoad();
     return HVecMemoryOperation::InstructionDataEquals(o) && IsStringCharAt() == o->IsStringCharAt();
@@ -1069,7 +1069,7 @@
 
 // Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
 // sets mem(1) = x1, .. , mem(n) = xn.
-class HVecStore FINAL : public HVecMemoryOperation {
+class HVecStore final : public HVecMemoryOperation {
  public:
   HVecStore(ArenaAllocator* allocator,
             HInstruction* base,
@@ -1093,7 +1093,7 @@
   }
 
   // A store needs to stay in place.
-  bool CanBeMoved() const OVERRIDE { return false; }
+  bool CanBeMoved() const override { return false; }
 
   DECLARE_INSTRUCTION(VecStore);
 
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index d1e7f68..a551104 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -20,7 +20,7 @@
 namespace art {
 
 // Compute the address of the method for X86 Constant area support.
-class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
+class HX86ComputeBaseMethodAddress final : public HExpression<0> {
  public:
   // Treat the value as an int32_t, but it is really a 32 bit native pointer.
   HX86ComputeBaseMethodAddress()
@@ -30,7 +30,7 @@
                     kNoDexPc) {
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+  bool CanBeMoved() const override { return true; }
 
   DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
 
@@ -39,7 +39,7 @@
 };
 
 // Load a constant value from the constant table.
-class HX86LoadFromConstantTable FINAL : public HExpression<2> {
+class HX86LoadFromConstantTable final : public HExpression<2> {
  public:
   HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
                             HConstant* constant)
@@ -66,7 +66,7 @@
 };
 
 // Version of HNeg with access to the constant table for FP types.
-class HX86FPNeg FINAL : public HExpression<2> {
+class HX86FPNeg final : public HExpression<2> {
  public:
   HX86FPNeg(DataType::Type result_type,
             HInstruction* input,
@@ -89,7 +89,7 @@
 };
 
 // X86 version of HPackedSwitch that holds a pointer to the base method address.
-class HX86PackedSwitch FINAL : public HExpression<2> {
+class HX86PackedSwitch final : public HExpression<2> {
  public:
   HX86PackedSwitch(int32_t start_value,
                    int32_t num_entries,
@@ -103,7 +103,7 @@
     SetRawInputAt(1, method_base);
   }
 
-  bool IsControlFlow() const OVERRIDE { return true; }
+  bool IsControlFlow() const override { return true; }
 
   int32_t GetStartValue() const { return start_value_; }
 
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 04301f5..be1f7ea 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -133,7 +133,7 @@
       return memory_.data();
     }
 
-    ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+    ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
 
    private:
     std::vector<uint8_t> memory_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f52b96d..0a74705 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -74,7 +74,7 @@
 /**
  * Used by the code generator, to allocate the code in a vector.
  */
-class CodeVectorAllocator FINAL : public CodeAllocator {
+class CodeVectorAllocator final : public CodeAllocator {
  public:
   explicit CodeVectorAllocator(ArenaAllocator* allocator)
       : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
@@ -84,7 +84,7 @@
     return &memory_[0];
   }
 
-  ArrayRef<const uint8_t> GetMemory() const OVERRIDE { return ArrayRef<const uint8_t>(memory_); }
+  ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
   uint8_t* GetData() { return memory_.data(); }
 
  private:
@@ -264,12 +264,12 @@
   PassObserver* const pass_observer_;
 };
 
-class OptimizingCompiler FINAL : public Compiler {
+class OptimizingCompiler final : public Compiler {
  public:
   explicit OptimizingCompiler(CompilerDriver* driver);
-  ~OptimizingCompiler() OVERRIDE;
+  ~OptimizingCompiler() override;
 
-  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
+  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
 
   CompiledMethod* Compile(const DexFile::CodeItem* code_item,
                           uint32_t access_flags,
@@ -278,29 +278,29 @@
                           uint32_t method_idx,
                           Handle<mirror::ClassLoader> class_loader,
                           const DexFile& dex_file,
-                          Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+                          Handle<mirror::DexCache> dex_cache) const override;
 
   CompiledMethod* JniCompile(uint32_t access_flags,
                              uint32_t method_idx,
                              const DexFile& dex_file,
-                             Handle<mirror::DexCache> dex_cache) const OVERRIDE;
+                             Handle<mirror::DexCache> dex_cache) const override;
 
-  uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
+  uintptr_t GetEntryPointOf(ArtMethod* method) const override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
         InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet())));
   }
 
-  void Init() OVERRIDE;
+  void Init() override;
 
-  void UnInit() const OVERRIDE;
+  void UnInit() const override;
 
   bool JitCompile(Thread* self,
                   jit::JitCodeCache* code_cache,
                   ArtMethod* method,
                   bool osr,
                   jit::JitLogger* jit_logger)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index e6e069f..5fadcab 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -58,7 +58,7 @@
   virtual ~ParallelMoveResolverWithSwap() {}
 
   // Resolve a set of parallel moves, emitting assembler instructions.
-  void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+  void EmitNativeCode(HParallelMove* parallel_move) override;
 
  protected:
   class ScratchRegisterScope : public ValueObject {
@@ -133,7 +133,7 @@
   virtual ~ParallelMoveResolverNoSwap() {}
 
   // Resolve a set of parallel moves, emitting assembler instructions.
-  void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
+  void EmitNativeCode(HParallelMove* parallel_move) override;
 
  protected:
   // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index be35201..399a6d8 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -56,7 +56,7 @@
   explicit TestParallelMoveResolverWithSwap(ArenaAllocator* allocator)
       : ParallelMoveResolverWithSwap(allocator) {}
 
-  void EmitMove(size_t index) OVERRIDE {
+  void EmitMove(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
@@ -68,7 +68,7 @@
     message_ << ")";
   }
 
-  void EmitSwap(size_t index) OVERRIDE {
+  void EmitSwap(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
@@ -80,8 +80,8 @@
     message_ << ")";
   }
 
-  void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
-  void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+  void SpillScratch(int reg ATTRIBUTE_UNUSED) override {}
+  void RestoreScratch(int reg ATTRIBUTE_UNUSED) override {}
 
   std::string GetMessage() const {
     return  message_.str();
@@ -99,13 +99,13 @@
   explicit TestParallelMoveResolverNoSwap(ArenaAllocator* allocator)
       : ParallelMoveResolverNoSwap(allocator), scratch_index_(kScratchRegisterStartIndexForTest) {}
 
-  void PrepareForEmitNativeCode() OVERRIDE {
+  void PrepareForEmitNativeCode() override {
     scratch_index_ = kScratchRegisterStartIndexForTest;
   }
 
-  void FinishEmitNativeCode() OVERRIDE {}
+  void FinishEmitNativeCode() override {}
 
-  Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE {
+  Location AllocateScratchLocationFor(Location::Kind kind) override {
     if (kind == Location::kStackSlot || kind == Location::kFpuRegister ||
         kind == Location::kRegister) {
       kind = Location::kRegister;
@@ -125,9 +125,9 @@
     return scratch;
   }
 
-  void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
+  void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) override {}
 
-  void EmitMove(size_t index) OVERRIDE {
+  void EmitMove(size_t index) override {
     MoveOperands* move = moves_[index];
     if (!message_.str().empty()) {
       message_ << " ";
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index a7e97a1..05208ff 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -58,7 +58,7 @@
     DCHECK(base_ != nullptr);
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     // If this is an invoke with PC-relative load kind,
     // we need to add the base as the special input.
     if (invoke->HasPcRelativeMethodLoadKind() &&
@@ -70,7 +70,7 @@
     }
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
     switch (load_kind) {
       case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
@@ -86,7 +86,7 @@
     }
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     HLoadString::LoadKind load_kind = load_string->GetLoadKind();
     switch (load_kind) {
       case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
@@ -102,7 +102,7 @@
     }
   }
 
-  void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+  void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
     if (switch_insn->GetNumEntries() <=
         InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
       return;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 6dd1ee0..872370b 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 41f2f77..4b07d5b 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -42,53 +42,53 @@
   }
 
  private:
-  void VisitAdd(HAdd* add) OVERRIDE {
+  void VisitAdd(HAdd* add) override {
     BinaryFP(add);
   }
 
-  void VisitSub(HSub* sub) OVERRIDE {
+  void VisitSub(HSub* sub) override {
     BinaryFP(sub);
   }
 
-  void VisitMul(HMul* mul) OVERRIDE {
+  void VisitMul(HMul* mul) override {
     BinaryFP(mul);
   }
 
-  void VisitDiv(HDiv* div) OVERRIDE {
+  void VisitDiv(HDiv* div) override {
     BinaryFP(div);
   }
 
-  void VisitCompare(HCompare* compare) OVERRIDE {
+  void VisitCompare(HCompare* compare) override {
     BinaryFP(compare);
   }
 
-  void VisitReturn(HReturn* ret) OVERRIDE {
+  void VisitReturn(HReturn* ret) override {
     HConstant* value = ret->InputAt(0)->AsConstant();
     if ((value != nullptr && DataType::IsFloatingPointType(value->GetType()))) {
       ReplaceInput(ret, value, 0, true);
     }
   }
 
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+  void VisitInvokeInterface(HInvokeInterface* invoke) override {
     HandleInvoke(invoke);
   }
 
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+  void VisitLoadClass(HLoadClass* load_class) override {
     if (load_class->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class);
       load_class->AddSpecialInput(method_address);
     }
   }
 
-  void VisitLoadString(HLoadString* load_string) OVERRIDE {
+  void VisitLoadString(HLoadString* load_string) override {
     if (load_string->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string);
       load_string->AddSpecialInput(method_address);
@@ -102,31 +102,31 @@
     }
   }
 
-  void VisitEqual(HEqual* cond) OVERRIDE {
+  void VisitEqual(HEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitNotEqual(HNotEqual* cond) OVERRIDE {
+  void VisitNotEqual(HNotEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitLessThan(HLessThan* cond) OVERRIDE {
+  void VisitLessThan(HLessThan* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE {
+  void VisitLessThanOrEqual(HLessThanOrEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitGreaterThan(HGreaterThan* cond) OVERRIDE {
+  void VisitGreaterThan(HGreaterThan* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE {
+  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) override {
     BinaryFP(cond);
   }
 
-  void VisitNeg(HNeg* neg) OVERRIDE {
+  void VisitNeg(HNeg* neg) override {
     if (DataType::IsFloatingPointType(neg->GetType())) {
       // We need to replace the HNeg with a HX86FPNeg in order to address the constant area.
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
@@ -141,7 +141,7 @@
     }
   }
 
-  void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+  void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
     if (switch_insn->GetNumEntries() <=
         InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
       return;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index db56b7f..3b470a6 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsX86PassName  = "pc_relative_fixups_x86";
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 2978add..a8ab256 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -43,18 +43,18 @@
       "prepare_for_register_allocation";
 
  private:
-  void VisitCheckCast(HCheckCast* check_cast) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE;
-  void VisitNullCheck(HNullCheck* check) OVERRIDE;
-  void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  void VisitBoundType(HBoundType* bound_type) OVERRIDE;
-  void VisitArraySet(HArraySet* instruction) OVERRIDE;
-  void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
-  void VisitCondition(HCondition* condition) OVERRIDE;
-  void VisitConstructorFence(HConstructorFence* constructor_fence) OVERRIDE;
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
-  void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
+  void VisitCheckCast(HCheckCast* check_cast) override;
+  void VisitInstanceOf(HInstanceOf* instance_of) override;
+  void VisitNullCheck(HNullCheck* check) override;
+  void VisitDivZeroCheck(HDivZeroCheck* check) override;
+  void VisitBoundsCheck(HBoundsCheck* check) override;
+  void VisitBoundType(HBoundType* bound_type) override;
+  void VisitArraySet(HArraySet* instruction) override;
+  void VisitClinitCheck(HClinitCheck* check) override;
+  void VisitCondition(HCondition* condition) override;
+  void VisitConstructorFence(HConstructorFence* constructor_fence) override;
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
+  void VisitDeoptimize(HDeoptimize* deoptimize) override;
 
   bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
   bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index c6579dc..8ef9ce4 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -33,7 +33,7 @@
     PrintString(": ");
   }
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     PrintPreInstruction(instruction);
     PrintString(instruction->DebugName());
     PrintPostInstruction(instruction);
@@ -70,7 +70,7 @@
     PrintNewLine();
   }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     PrintString("BasicBlock ");
     PrintInt(block->GetBlockId());
     const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -108,15 +108,15 @@
   explicit StringPrettyPrinter(HGraph* graph)
       : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
 
-  void PrintInt(int value) OVERRIDE {
+  void PrintInt(int value) override {
     str_ += android::base::StringPrintf("%d", value);
   }
 
-  void PrintString(const char* value) OVERRIDE {
+  void PrintString(const char* value) override {
     str_ += value;
   }
 
-  void PrintNewLine() OVERRIDE {
+  void PrintNewLine() override {
     str_ += '\n';
   }
 
@@ -124,12 +124,12 @@
 
   std::string str() const { return str_; }
 
-  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+  void VisitBasicBlock(HBasicBlock* block) override {
     current_block_ = block;
     HPrettyPrinter::VisitBasicBlock(block);
   }
 
-  void VisitGoto(HGoto* gota) OVERRIDE {
+  void VisitGoto(HGoto* gota) override {
     PrintString("  ");
     PrintInt(gota->GetId());
     PrintString(": Goto ");
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 0d62248..a9d5902 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -94,26 +94,26 @@
     worklist_.reserve(kDefaultWorklistSize);
   }
 
-  void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
-  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
-  void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
-  void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE;
-  void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
-  void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE;
-  void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE;
-  void VisitLoadString(HLoadString* instr) OVERRIDE;
-  void VisitLoadException(HLoadException* instr) OVERRIDE;
-  void VisitNewArray(HNewArray* instr) OVERRIDE;
-  void VisitParameterValue(HParameterValue* instr) OVERRIDE;
-  void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
-  void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
-  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
-  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
-  void VisitInvoke(HInvoke* instr) OVERRIDE;
-  void VisitArrayGet(HArrayGet* instr) OVERRIDE;
-  void VisitCheckCast(HCheckCast* instr) OVERRIDE;
-  void VisitBoundType(HBoundType* instr) OVERRIDE;
-  void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+  void VisitDeoptimize(HDeoptimize* deopt) override;
+  void VisitNewInstance(HNewInstance* new_instance) override;
+  void VisitLoadClass(HLoadClass* load_class) override;
+  void VisitInstanceOf(HInstanceOf* load_class) override;
+  void VisitClinitCheck(HClinitCheck* clinit_check) override;
+  void VisitLoadMethodHandle(HLoadMethodHandle* instr) override;
+  void VisitLoadMethodType(HLoadMethodType* instr) override;
+  void VisitLoadString(HLoadString* instr) override;
+  void VisitLoadException(HLoadException* instr) override;
+  void VisitNewArray(HNewArray* instr) override;
+  void VisitParameterValue(HParameterValue* instr) override;
+  void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
+  void VisitStaticFieldGet(HStaticFieldGet* instr) override;
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) override;
+  void VisitInvoke(HInvoke* instr) override;
+  void VisitArrayGet(HArrayGet* instr) override;
+  void VisitCheckCast(HCheckCast* instr) override;
+  void VisitBoundType(HBoundType* instr) override;
+  void VisitNullCheck(HNullCheck* instr) override;
   void VisitPhi(HPhi* phi);
 
   void VisitBasicBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index d36d592..7c6a048 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,7 +40,7 @@
   // Visit a single instruction.
   void Visit(HInstruction* instruction);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   // Returns true if klass is admissible to the propagation: non-null and resolved.
   // For an array type, we also check if the component type is admissible.
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3072c92..16131e1 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -90,9 +90,9 @@
                               CodeGenerator* codegen,
                               const SsaLivenessAnalysis& analysis,
                               bool iterative_move_coalescing = true);
-  ~RegisterAllocatorGraphColor() OVERRIDE;
+  ~RegisterAllocatorGraphColor() override;
 
-  void AllocateRegisters() OVERRIDE;
+  void AllocateRegisters() override;
 
   bool Validate(bool log_fatal_on_failure);
 
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 36788b7..4d445c7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -42,11 +42,11 @@
   RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
                               CodeGenerator* codegen,
                               const SsaLivenessAnalysis& analysis);
-  ~RegisterAllocatorLinearScan() OVERRIDE;
+  ~RegisterAllocatorLinearScan() override;
 
-  void AllocateRegisters() OVERRIDE;
+  void AllocateRegisters() override;
 
-  bool Validate(bool log_fatal_on_failure) OVERRIDE {
+  bool Validate(bool log_fatal_on_failure) override {
     processing_core_registers_ = true;
     if (!ValidateInternal(log_fatal_on_failure)) {
       return false;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7144775..db6a760 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -40,7 +40,7 @@
 
 class RegisterAllocatorTest : public OptimizingUnitTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // This test is using the x86 ISA.
     OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
     OptimizingUnitTest::SetUp();
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index fd48d84..48e80f5 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -339,7 +339,7 @@
         last_visited_latency_(0),
         last_visited_internal_latency_(0) {}
 
-  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+  void VisitInstruction(HInstruction* instruction) override {
     LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
         "Architecture-specific scheduling latency visitors must handle all instructions"
         " (potentially by overriding the generic `VisitInstruction()`.";
@@ -392,7 +392,7 @@
   }
 
   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
-                                         const SchedulingGraph& graph) OVERRIDE {
+                                         const SchedulingGraph& graph) override {
     UNUSED(graph);
     DCHECK(!nodes->empty());
     size_t select = rand_r(&seed_) % nodes->size();
@@ -412,9 +412,9 @@
  public:
   CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
 
-  void Reset() OVERRIDE { prev_select_ = nullptr; }
+  void Reset() override { prev_select_ = nullptr; }
   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
-                                         const SchedulingGraph& graph) OVERRIDE;
+                                         const SchedulingGraph& graph) override;
 
  protected:
   SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
@@ -492,7 +492,7 @@
         codegen_(cg),
         instruction_set_(instruction_set) {}
 
-  bool Run() OVERRIDE {
+  bool Run() override {
     return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
   }
 
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 2f36948..875593b 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -100,7 +100,7 @@
   M(DataProcWithShifterOp, unused)
 
 #define DECLARE_VISIT_INSTRUCTION(type, unused)  \
-  void Visit##type(H##type* instruction) OVERRIDE;
+  void Visit##type(H##type* instruction) override;
 
   FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -140,9 +140,9 @@
   HSchedulerARM(SchedulingNodeSelector* selector,
                 SchedulingLatencyVisitorARM* arm_latency_visitor)
       : HScheduler(arm_latency_visitor, selector) {}
-  ~HSchedulerARM() OVERRIDE {}
+  ~HSchedulerARM() override {}
 
-  bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+  bool IsSchedulable(const HInstruction* instruction) const override {
 #define CASE_INSTRUCTION_KIND(type, unused) case \
   HInstruction::InstructionKind::k##type:
     switch (instruction->GetKind()) {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 0d2f8d9..7f6549d 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -118,7 +118,7 @@
   M(DataProcWithShifterOp, unused)
 
 #define DECLARE_VISIT_INSTRUCTION(type, unused)  \
-  void Visit##type(H##type* instruction) OVERRIDE;
+  void Visit##type(H##type* instruction) override;
 
   FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
   FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -136,9 +136,9 @@
  public:
   explicit HSchedulerARM64(SchedulingNodeSelector* selector)
       : HScheduler(&arm64_latency_visitor_, selector) {}
-  ~HSchedulerARM64() OVERRIDE {}
+  ~HSchedulerARM64() override {}
 
-  bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
+  bool IsSchedulable(const HInstruction* instruction) const override {
 #define CASE_INSTRUCTION_KIND(type, unused) case \
   HInstruction::InstructionKind::k##type:
     switch (instruction->GetKind()) {
@@ -160,7 +160,7 @@
   // SIMD&FP registers are callee saved) so don't reorder such vector instructions.
   //
   // TODO: remove this when a proper support of SIMD registers is introduced to the compiler.
-  bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE {
+  bool IsSchedulingBarrier(const HInstruction* instr) const override {
     return HScheduler::IsSchedulingBarrier(instr) ||
            instr->IsVecReduce() ||
            instr->IsVecExtractScalar() ||
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index d24d226..2889166 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -68,7 +68,7 @@
                    OptimizingCompilerStats* stats,
                    const char* name = kSelectGeneratorPassName);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSelectGeneratorPassName = "select_generator";
 
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index cbac361..dc55eea 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -37,7 +37,7 @@
       : HOptimization(graph, name),
         codegen_(codegen) { }
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSharpeningPassName = "sharpening";
 
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index cebd4ad..97c00c9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -60,7 +60,7 @@
  * A live range contains the start and end of a range where an instruction or a temporary
  * is live.
  */
-class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
+class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
     DCHECK_LT(start, end);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index a683c69..4b52553 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -29,7 +29,7 @@
 
 class SsaLivenessAnalysisTest : public OptimizingUnitTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     OptimizingUnitTest::SetUp();
     graph_ = CreateGraph();
     codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index ee859e8..c5cc752 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@
   explicit SsaDeadPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   void MarkDeadPhis();
   void EliminateDeadPhis();
@@ -53,7 +53,7 @@
   explicit SsaRedundantPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
 
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 85ed06e..e679893 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -38,15 +38,15 @@
  public:
   explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
 
-  void PrintInt(int value) OVERRIDE {
+  void PrintInt(int value) override {
     str_ += android::base::StringPrintf("%d", value);
   }
 
-  void PrintString(const char* value) OVERRIDE {
+  void PrintString(const char* value) override {
     str_ += value;
   }
 
-  void PrintNewLine() OVERRIDE {
+  void PrintNewLine() override {
     str_ += '\n';
   }
 
@@ -54,7 +54,7 @@
 
   std::string str() const { return str_; }
 
-  void VisitIntConstant(HIntConstant* constant) OVERRIDE {
+  void VisitIntConstant(HIntConstant* constant) override {
     PrintPreInstruction(constant);
     str_ += constant->DebugName();
     str_ += " ";
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index f0069c0..b1abcf6 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -31,7 +31,7 @@
         do_implicit_null_checks_(do_implicit_null_checks) {}
 
  private:
-  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE {
+  void VisitBoundsCheck(HBoundsCheck* check) override {
     // Replace the length by the array itself, so that we can do compares to memory.
     HArrayLength* array_len = check->InputAt(1)->AsArrayLength();
 
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index b254000..3f4178d 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -31,7 +31,7 @@
                              CodeGenerator* codegen,
                              OptimizingCompilerStats* stats);
 
-  bool Run() OVERRIDE;
+  bool Run() override;
 
   static constexpr const char* kX86MemoryOperandGenerationPassName =
           "x86_memory_operand_generation";
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index b0310f2..98c0191 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,7 +39,7 @@
 namespace art {
 namespace arm {
 
-class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
+class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
  public:
   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
   // fewer system calls than a larger default capacity.
@@ -149,7 +149,7 @@
   using MacroAssembler::Vmov;
 };
 
-class ArmVIXLAssembler FINAL : public Assembler {
+class ArmVIXLAssembler final : public Assembler {
  private:
   class ArmException;
  public:
@@ -161,19 +161,19 @@
 
   virtual ~ArmVIXLAssembler() {}
   ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Size of generated code.
-  size_t CodeSize() const OVERRIDE;
-  const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+  size_t CodeSize() const override;
+  const uint8_t* CodeBufferBaseAddress() const override;
 
   // Copy instructions out of assembly buffer into the given region of memory.
-  void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+  void FinalizeInstructions(const MemoryRegion& region) override;
 
-  void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
   }
 
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 4bc5d69..674bf12 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -32,7 +32,7 @@
 namespace art {
 namespace arm {
 
-class ArmVIXLJNIMacroAssembler FINAL
+class ArmVIXLJNIMacroAssembler final
     : public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
  private:
   class ArmException;
@@ -42,7 +42,7 @@
         exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
 
   virtual ~ArmVIXLJNIMacroAssembler() {}
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   //
   // Overridden common assembler high-level functionality
@@ -52,109 +52,109 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
   void LoadFromThread(ManagedRegister dest,
                       ThreadOffset32 src,
-                      size_t size) OVERRIDE;
+                      size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister scratch) OVERRIDE;
+                          ManagedRegister scratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+  void GetCurrentThread(ManagedRegister mtr) override;
   void GetCurrentThread(FrameOffset dest_offset,
-                        ManagedRegister scratch) OVERRIDE;
+                        ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -163,43 +163,43 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
   void LoadReferenceFromHandleScope(ManagedRegister dst,
-                                    ManagedRegister src) OVERRIDE;
+                                    ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+  void MemoryBarrier(ManagedRegister scratch) override;
 
   void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
   void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
@@ -231,7 +231,7 @@
   friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
 };
 
-class ArmVIXLJNIMacroLabel FINAL
+class ArmVIXLJNIMacroLabel final
     : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
                                  vixl32::Label,
                                  InstructionSet::kArm> {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 8983af2..74537dd 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@
   kStoreDWord
 };
 
-class Arm64Assembler FINAL : public Assembler {
+class Arm64Assembler final : public Assembler {
  public:
   explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
 
@@ -70,11 +70,11 @@
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
 
   // Finalize the code.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Size of generated code.
-  size_t CodeSize() const OVERRIDE;
-  const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+  size_t CodeSize() const override;
+  const uint8_t* CodeBufferBaseAddress() const override;
 
   // Copy instructions out of assembly buffer into the given region of memory.
   void FinalizeInstructions(const MemoryRegion& region);
@@ -109,10 +109,10 @@
   // MaybeGenerateMarkingRegisterCheck and is passed to the BRK instruction.
   void GenerateMarkingRegisterCheck(vixl::aarch64::Register temp, int code = 0);
 
-  void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
   }
 
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index f531b2a..45316ed 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,7 +40,7 @@
 namespace art {
 namespace arm64 {
 
-class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
  public:
   explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
       : JNIMacroAssemblerFwd(allocator),
@@ -49,94 +49,94 @@
   ~Arm64JNIMacroAssembler();
 
   // Finalize the code.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit code that will create an activation on the stack.
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+                                ManagedRegister scratch) override;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+               bool unpoison_reference) override;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
   void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+      override;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+            size_t size) override;
+  void MemoryBarrier(ManagedRegister scratch) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -145,40 +145,40 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   class Arm64Exception {
@@ -234,7 +234,7 @@
   ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
 };
 
-class Arm64JNIMacroLabel FINAL
+class Arm64JNIMacroLabel final
     : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
                                  vixl::aarch64::Label,
                                  InstructionSet::kArm64> {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 379a639..251b82c 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -283,7 +283,7 @@
 
 // The purpose of this class is to ensure that we do not have to explicitly
 // call the AdvancePC method (which is good for convenience and correctness).
-class DebugFrameOpCodeWriterForAssembler FINAL
+class DebugFrameOpCodeWriterForAssembler final
     : public dwarf::DebugFrameOpCodeWriter<> {
  public:
   struct DelayedAdvancePC {
@@ -292,7 +292,7 @@
   };
 
   // This method is called the by the opcode writers.
-  virtual void ImplicitlyAdvancePC() FINAL;
+  virtual void ImplicitlyAdvancePC() final;
 
   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
       : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 7c800b3..9e23d11 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -737,7 +737,7 @@
  protected:
   AssemblerTest() {}
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     allocator_.reset(new ArenaAllocator(&pool_));
     assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
@@ -753,7 +753,7 @@
     SetUpHelpers();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
     allocator_.reset();
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index f5df926..e0c2992 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -259,19 +259,19 @@
 template <typename T, PointerSize kPointerSize>
 class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
  public:
-  void FinalizeCode() OVERRIDE {
+  void FinalizeCode() override {
     asm_.FinalizeCode();
   }
 
-  size_t CodeSize() const OVERRIDE {
+  size_t CodeSize() const override {
     return asm_.CodeSize();
   }
 
-  void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+  void FinalizeInstructions(const MemoryRegion& region) override {
     asm_.FinalizeInstructions(region);
   }
 
-  DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+  DebugFrameOpCodeWriterForAssembler& cfi() override {
     return asm_.cfi();
   }
 
@@ -299,7 +299,7 @@
   JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
   }
 
-  virtual ~JNIMacroLabelCommon() OVERRIDE {}
+  virtual ~JNIMacroLabelCommon() override {}
 
  private:
   PlatformLabel label_;
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index b70c18b..067a595 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -58,7 +58,7 @@
  protected:
   JNIMacroAssemblerTest() {}
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     allocator_.reset(new ArenaAllocator(&pool_));
     assembler_.reset(CreateAssembler(allocator_.get()));
     test_helper_.reset(
@@ -74,7 +74,7 @@
     SetUpHelpers();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     test_helper_.reset();  // Clean up the helper.
     assembler_.reset();
     allocator_.reset();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index af3d7a0..8a1e1df 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -263,7 +263,7 @@
   DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
 };
 
-class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
+class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
  public:
   using JNIBase = JNIMacroAssembler<PointerSize::k32>;
 
@@ -285,8 +285,8 @@
     cfi().DelayEmittingAdvancePCs();
   }
 
-  size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
-  size_t CodePosition() OVERRIDE;
+  size_t CodeSize() const override { return Assembler::CodeSize(); }
+  size_t CodePosition() override;
   DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
 
   virtual ~MipsAssembler() {
@@ -1143,10 +1143,10 @@
     }
   }
 
-  void Bind(Label* label) OVERRIDE {
+  void Bind(Label* label) override {
     Bind(down_cast<MipsLabel*>(label));
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
   }
 
@@ -1155,25 +1155,25 @@
   using JNIBase::Jump;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
   // Emit a conditional jump to the label by applying a unary condition test to the register.
   void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
             JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+            ManagedRegister test ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
 
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS32";
     UNREACHABLE();
   }
@@ -1232,108 +1232,108 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister mscratch) OVERRIDE;
+                                ManagedRegister mscratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister msrc,
                      FrameOffset in_off,
-                     ManagedRegister mscratch) OVERRIDE;
+                     ManagedRegister mscratch) override;
 
   // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister mdest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister mscratch) OVERRIDE;
+                            ManagedRegister mscratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister mscratch) OVERRIDE;
+                          ManagedRegister mscratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -1342,34 +1342,34 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister mscratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
 
   // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit branches and finalize all instructions.
   void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index 0f85892..f9919f5 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -61,15 +61,15 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --no-warn -32 -march=mips32r5 -mmsa";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -78,15 +78,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r5";
   }
 
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -222,7 +222,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -234,23 +234,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 3d876ca..1ec7a6a 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -61,16 +61,16 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerCmdName() OVERRIDE {
+  std::string GetAssemblerCmdName() override {
     // We assemble and link for MIPS32R6. See GetAssemblerParameters() for details.
     return "gcc";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     // We assemble and link for MIPS32R6. The reason is that object files produced for MIPS32R6
     // (and MIPS64R6) with the GNU assembler don't have correct final offsets in PC-relative
     // branches in the .text section and so they require a relocation pass (there's a relocation
@@ -82,7 +82,7 @@
         " -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -91,15 +91,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r6";
   }
 
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -235,7 +235,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -247,23 +247,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index f94d074..9527fa6 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -55,19 +55,19 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --no-warn -32 -march=mips32r2";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa32r2";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips::Register(mips::ZERO));
       registers_.push_back(new mips::Register(mips::AT));
@@ -170,7 +170,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -181,19 +181,19 @@
     UNREACHABLE();
   }
 
-  std::vector<mips::Register*> GetRegisters() OVERRIDE {
+  std::vector<mips::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips::FRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 19f23b7..ce447db 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -414,7 +414,7 @@
   DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
 };
 
-class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Mips64Assembler final : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
  public:
   using JNIBase = JNIMacroAssembler<PointerSize::k64>;
 
@@ -439,7 +439,7 @@
     }
   }
 
-  size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+  size_t CodeSize() const override { return Assembler::CodeSize(); }
   DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
 
   // Emit Machine Instructions.
@@ -920,10 +920,10 @@
     }
   }
 
-  void Bind(Label* label) OVERRIDE {
+  void Bind(Label* label) override {
     Bind(down_cast<Mips64Label*>(label));
   }
-  void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(Label* label ATTRIBUTE_UNUSED) override {
     UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
   }
 
@@ -934,25 +934,25 @@
   using JNIBase::Jump;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
   // Emit a conditional jump to the label by applying a unary condition test to the register.
   void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
             JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+            ManagedRegister test ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
 
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
     LOG(FATAL) << "Not implemented on MIPS64";
     UNREACHABLE();
   }
@@ -1322,119 +1322,119 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack.
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister mscratch) OVERRIDE;
+                                ManagedRegister mscratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
-                     ManagedRegister mscratch) OVERRIDE;
+                     ManagedRegister mscratch) override;
 
   // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) override;
 
   // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister mscratch) OVERRIDE;
+                            ManagedRegister mscratch) override;
 
   void CopyRawPtrToThread(ThreadOffset64 thr_offs,
                           FrameOffset fr_offs,
-                          ManagedRegister mscratch) OVERRIDE;
+                          ManagedRegister mscratch) override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) OVERRIDE;
+            ManagedRegister mscratch, size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
   // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+                              ManagedRegister in_reg, bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
-                              mscratch, bool null_allowed) OVERRIDE;
+                              mscratch, bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
 
   // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() OVERRIDE;
+  void FinalizeCode() override;
 
   // Emit branches and finalize all instructions.
   void FinalizeInstructions(const MemoryRegion& region);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index a53ff7c..4ceb356 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -63,16 +63,16 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "mips64";
   }
 
-  std::string GetAssemblerCmdName() OVERRIDE {
+  std::string GetAssemblerCmdName() override {
     // We assemble and link for MIPS64R6. See GetAssemblerParameters() for details.
     return "gcc";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     // We assemble and link for MIPS64R6. The reason is that object files produced for MIPS64R6
     // (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
     // branches in the .text section and so they require a relocation pass (there's a relocation
@@ -80,7 +80,7 @@
     return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
   }
 
-  void Pad(std::vector<uint8_t>& data) OVERRIDE {
+  void Pad(std::vector<uint8_t>& data) override {
     // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
     // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
     // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
@@ -89,15 +89,15 @@
     data.insert(data.end(), pad_size, 0);
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mmips:isa64r6";
   }
 
-  mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+  mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
     return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (registers_.size() == 0) {
       registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
       registers_.push_back(new mips64::GpuRegister(mips64::AT));
@@ -233,7 +233,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -245,23 +245,23 @@
     UNREACHABLE();
   }
 
-  std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE {
+  std::vector<mips64::GpuRegister*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<mips64::FpuRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<mips64::FpuRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE {
+  std::vector<mips64::VectorRegister*> GetVectorRegisters() override {
     return vec_registers_;
   }
 
-  uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+  uint32_t CreateImmediate(int64_t imm_value) override {
     return imm_value;
   }
 
-  std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e42c4c9..5ac9236 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -306,7 +306,7 @@
   ArenaVector<int32_t> buffer_;
 };
 
-class X86Assembler FINAL : public Assembler {
+class X86Assembler final : public Assembler {
  public:
   explicit X86Assembler(ArenaAllocator* allocator)
       : Assembler(allocator), constant_area_(allocator) {}
@@ -758,8 +758,8 @@
   //
   int PreferredLoopAlignment() { return 16; }
   void Align(int alignment, int offset);
-  void Bind(Label* label) OVERRIDE;
-  void Jump(Label* label) OVERRIDE {
+  void Bind(Label* label) override;
+  void Jump(Label* label) override {
     jmp(label);
   }
   void Bind(NearLabel* label);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index cd007b3..b03c40a 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -51,19 +51,19 @@
                         x86::Immediate> Base;
 
  protected:
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86";
   }
 
-  std::string GetAssemblerParameters() OVERRIDE {
+  std::string GetAssemblerParameters() override {
     return " --32";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386 --no-show-raw-insn";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (addresses_singleton_.size() == 0) {
       // One addressing mode to test the repeat drivers.
       addresses_singleton_.push_back(x86::Address(x86::EAX, x86::EBX, x86::TIMES_1, 2));
@@ -118,25 +118,25 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
   }
 
-  std::vector<x86::Address> GetAddresses() OVERRIDE {
+  std::vector<x86::Address> GetAddresses() override {
     return addresses_;
   }
 
-  std::vector<x86::Register*> GetRegisters() OVERRIDE {
+  std::vector<x86::Register*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<x86::XmmRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<x86::XmmRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  x86::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+  x86::Immediate CreateImmediate(int64_t imm_value) override {
     return x86::Immediate(imm_value);
   }
 
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index dd99f03..df946bd 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -25,10 +25,10 @@
 namespace x86 {
 
 // Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
+class X86ExceptionSlowPath final : public SlowPath {
  public:
   explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
-  virtual void Emit(Assembler *sp_asm) OVERRIDE;
+  virtual void Emit(Assembler *sp_asm) override;
  private:
   const size_t stack_adjust_;
 };
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 99219d8..a701080 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -32,7 +32,7 @@
 
 class X86JNIMacroLabel;
 
-class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
  public:
   explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
   virtual ~X86JNIMacroAssembler() {}
@@ -45,130 +45,130 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) override;
 
   void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override;
 
   // Copying routines
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset32 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
+      override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
+            ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
+            ManagedRegister scratch, size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current()
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
   // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+                              ManagedRegister in_reg, bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
-                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
+                              ManagedRegister scratch, bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset]
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
 };
 
-class X86JNIMacroLabel FINAL
+class X86JNIMacroLabel final
     : public JNIMacroLabelCommon<X86JNIMacroLabel,
                                  art::Label,
                                  InstructionSet::kX86> {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e4d72a7..e696635 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -351,7 +351,7 @@
 };
 
 
-class X86_64Assembler FINAL : public Assembler {
+class X86_64Assembler final : public Assembler {
  public:
   explicit X86_64Assembler(ArenaAllocator* allocator)
       : Assembler(allocator), constant_area_(allocator) {}
@@ -844,8 +844,8 @@
   //
   int PreferredLoopAlignment() { return 16; }
   void Align(int alignment, int offset);
-  void Bind(Label* label) OVERRIDE;
-  void Jump(Label* label) OVERRIDE {
+  void Bind(Label* label) override;
+  void Jump(Label* label) override {
     jmp(label);
   }
   void Bind(NearLabel* label);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0589df5..e1de1f1 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -145,15 +145,15 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86_64";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
   }
 
-  void SetUpHelpers() OVERRIDE {
+  void SetUpHelpers() override {
     if (addresses_singleton_.size() == 0) {
       // One addressing mode to test the repeat drivers.
       addresses_singleton_.push_back(
@@ -291,7 +291,7 @@
     }
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     AssemblerTest::TearDown();
     STLDeleteElements(&registers_);
     STLDeleteElements(&fp_registers_);
@@ -301,29 +301,29 @@
     return addresses_;
   }
 
-  std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
+  std::vector<x86_64::CpuRegister*> GetRegisters() override {
     return registers_;
   }
 
-  std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+  std::vector<x86_64::XmmRegister*> GetFPRegisters() override {
     return fp_registers_;
   }
 
-  x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+  x86_64::Immediate CreateImmediate(int64_t imm_value) override {
     return x86_64::Immediate(imm_value);
   }
 
-  std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
     return secondary_register_names_[reg];
   }
 
-  std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetTertiaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(tertiary_register_names_.find(reg) != tertiary_register_names_.end());
     return tertiary_register_names_[reg];
   }
 
-  std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+  std::string GetQuaternaryRegisterName(const x86_64::CpuRegister& reg) override {
     CHECK(quaternary_register_names_.find(reg) != quaternary_register_names_.end());
     return quaternary_register_names_[reg];
   }
@@ -2002,11 +2002,11 @@
 
  protected:
   // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() OVERRIDE {
+  std::string GetArchitectureString() override {
     return "x86_64";
   }
 
-  std::string GetDisassembleParameters() OVERRIDE {
+  std::string GetDisassembleParameters() override {
     return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
   }
 
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index f6b2f9d..d5c0878 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -574,10 +574,10 @@
 }
 
 // Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
+class X86_64ExceptionSlowPath final : public SlowPath {
  public:
   explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
-  virtual void Emit(Assembler *sp_asm) OVERRIDE;
+  virtual void Emit(Assembler *sp_asm) override;
  private:
   const size_t stack_adjust_;
 };
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d766ad4..4411558 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -31,7 +31,7 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assembler,
                                                                   PointerSize::k64> {
  public:
   explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
@@ -46,107 +46,107 @@
   void BuildFrame(size_t frame_size,
                   ManagedRegister method_reg,
                   ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+                  const ManagedRegisterEntrySpills& entry_spills) override;
 
   // Emit code that will remove an activation from the stack
   void RemoveFrame(size_t frame_size,
                    ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) OVERRIDE;
+                   bool may_suspend) override;
 
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+  void IncreaseFrameSize(size_t adjust) override;
+  void DecreaseFrameSize(size_t adjust) override;
 
   // Store routines
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
+  void StoreRef(FrameOffset dest, ManagedRegister src) override;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
 
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override;
 
   void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
                                 FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
+                                ManagedRegister scratch) override;
 
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
 
   void StoreSpanning(FrameOffset dest,
                      ManagedRegister src,
                      FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
+                     ManagedRegister scratch) override;
 
   // Load routines
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
 
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
 
-  void LoadRef(ManagedRegister dest, FrameOffset  src) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset  src) override;
 
   void LoadRef(ManagedRegister dest,
                ManagedRegister base,
                MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
+               bool unpoison_reference) override;
 
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
 
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
 
   // Copying routines
   void Move(ManagedRegister dest, ManagedRegister src, size_t size);
 
   void CopyRawPtrFromThread(FrameOffset fr_offs,
                             ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
+                            ManagedRegister scratch) override;
 
   void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
+      override;
 
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override;
 
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override;
 
   void Copy(FrameOffset dest,
             ManagedRegister src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest_base,
             Offset dest_offset,
             FrameOffset src,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             FrameOffset src_base,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(ManagedRegister dest,
             Offset dest_offset,
             ManagedRegister src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
   void Copy(FrameOffset dest,
             Offset dest_offset,
             FrameOffset src,
             Offset src_offset,
             ManagedRegister scratch,
-            size_t size) OVERRIDE;
+            size_t size) override;
 
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
+  void MemoryBarrier(ManagedRegister) override;
 
   // Sign extension
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void SignExtend(ManagedRegister mreg, size_t size) override;
 
   // Zero extension
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+  void ZeroExtend(ManagedRegister mreg, size_t size) override;
 
   // Exploit fast access in managed code to Thread::Current()
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+  void GetCurrentThread(ManagedRegister tr) override;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) override;
 
   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
@@ -155,46 +155,46 @@
   void CreateHandleScopeEntry(ManagedRegister out_reg,
                               FrameOffset handlescope_offset,
                               ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off,
                               FrameOffset handlescope_offset,
                               ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
+                              bool null_allowed) override;
 
   // src holds a handle scope entry (Object**) load this into dst
-  virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+  virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
 
   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
   // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+  void VerifyObject(ManagedRegister src, bool could_be_null) override;
+  void VerifyObject(FrameOffset src, bool could_be_null) override;
 
   // Call to address held at [base+offset]
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) override;
 
   // Generate code to check if Thread::Current()->exception_ is non-null
   // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
 
   // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  std::unique_ptr<JNIMacroLabel> CreateLabel() override;
   // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label) OVERRIDE;
+  void Jump(JNIMacroLabel* label) override;
   // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) override;
   // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label) OVERRIDE;
+  void Bind(JNIMacroLabel* label) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
 };
 
-class X86_64JNIMacroLabel FINAL
+class X86_64JNIMacroLabel final
     : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
                                  art::Label,
                                  InstructionSet::kX86_64> {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index c223549..136066d 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -47,11 +47,11 @@
       : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp),
         deps_(nullptr) {}
 
-  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {}
-  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
-  bool IsRelocationPossible() OVERRIDE { return false; }
+  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
+  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
+  bool IsRelocationPossible() override { return false; }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return deps_; }
+  verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
   void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
 
  private:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 29df067..5655b3c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -613,7 +613,7 @@
   bool shutting_down_;
 };
 
-class Dex2Oat FINAL {
+class Dex2Oat final {
  public:
   explicit Dex2Oat(TimingLogger* timings) :
       compiler_kind_(Compiler::kOptimizing),
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 4247e17..60dba99 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -52,7 +52,7 @@
 
 class Dex2oatImageTest : public CommonRuntimeTest {
  public:
-  virtual void TearDown() OVERRIDE {}
+  virtual void TearDown() override {}
 
  protected:
   // Visitors take method and type references
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 2b96684..5fc3da4 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -55,7 +55,7 @@
 
 class Dex2oatTest : public Dex2oatEnvironmentTest {
  public:
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     Dex2oatEnvironmentTest::TearDown();
 
     output_ = "";
@@ -349,7 +349,7 @@
 
 class Dex2oatSwapUseTest : public Dex2oatSwapTest {
  protected:
-  void CheckHostResult(bool expect_use) OVERRIDE {
+  void CheckHostResult(bool expect_use) override {
     if (!kIsTargetBuild) {
       if (expect_use) {
         EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
@@ -361,7 +361,7 @@
     }
   }
 
-  std::string GetTestDexFileName() OVERRIDE {
+  std::string GetTestDexFileName() override {
     // Use Statics as it has a handful of functions.
     return CommonRuntimeTest::GetTestDexFileName("Statics");
   }
@@ -474,7 +474,7 @@
 class Dex2oatVeryLargeTest : public Dex2oatTest {
  protected:
   void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
-                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
     // Ignore, we'll do our own checks.
   }
 
@@ -627,7 +627,7 @@
 class Dex2oatLayoutTest : public Dex2oatTest {
  protected:
   void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
-                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+                   CompilerFilter::Filter result ATTRIBUTE_UNUSED) override {
     // Ignore, we'll do our own checks.
   }
 
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.h b/dex2oat/linker/arm/relative_patcher_arm_base.h
index f5a1395..0eb4417 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.h
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.h
@@ -31,10 +31,10 @@
  public:
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  protected:
   ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.h b/dex2oat/linker/arm/relative_patcher_thumb2.h
index 3a42928..dbf64a1 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.h
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.h
@@ -29,7 +29,7 @@
 
 namespace linker {
 
-class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Thumb2RelativePatcher final : public ArmBaseRelativePatcher {
  public:
   explicit Thumb2RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
                                  RelativePatcherTargetProvider* target_provider);
@@ -37,18 +37,18 @@
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 
  protected:
-  uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
-  uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+  uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+  uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
 
  private:
   void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.h b/dex2oat/linker/arm64/relative_patcher_arm64.h
index f7f673c..e95d0fe 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.h
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.h
@@ -28,7 +28,7 @@
 
 namespace linker {
 
-class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
+class Arm64RelativePatcher final : public ArmBaseRelativePatcher {
  public:
   Arm64RelativePatcher(RelativePatcherThunkProvider* thunk_provider,
                        RelativePatcherTargetProvider* target_provider,
@@ -36,24 +36,24 @@
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 
  protected:
-  uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE;
-  uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE;
+  uint32_t MaxPositiveDisplacement(const ThunkKey& key) override;
+  uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
 
  private:
   static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 4e7d636..194a0e1 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -94,35 +94,35 @@
 };
 
 template <typename ElfTypes>
-class ElfWriterQuick FINAL : public ElfWriter {
+class ElfWriterQuick final : public ElfWriter {
  public:
   ElfWriterQuick(const CompilerOptions& compiler_options,
                  File* elf_file);
   ~ElfWriterQuick();
 
-  void Start() OVERRIDE;
+  void Start() override;
   void PrepareDynamicSection(size_t rodata_size,
                              size_t text_size,
                              size_t data_bimg_rel_ro_size,
                              size_t bss_size,
                              size_t bss_methods_offset,
                              size_t bss_roots_offset,
-                             size_t dex_section_size) OVERRIDE;
-  void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
-  OutputStream* StartRoData() OVERRIDE;
-  void EndRoData(OutputStream* rodata) OVERRIDE;
-  OutputStream* StartText() OVERRIDE;
-  void EndText(OutputStream* text) OVERRIDE;
-  OutputStream* StartDataBimgRelRo() OVERRIDE;
-  void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
-  void WriteDynamicSection() OVERRIDE;
-  void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
-  bool StripDebugInfo() OVERRIDE;
-  bool End() OVERRIDE;
+                             size_t dex_section_size) override;
+  void PrepareDebugInfo(const debug::DebugInfo& debug_info) override;
+  OutputStream* StartRoData() override;
+  void EndRoData(OutputStream* rodata) override;
+  OutputStream* StartText() override;
+  void EndText(OutputStream* text) override;
+  OutputStream* StartDataBimgRelRo() override;
+  void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) override;
+  void WriteDynamicSection() override;
+  void WriteDebugInfo(const debug::DebugInfo& debug_info) override;
+  bool StripDebugInfo() override;
+  bool End() override;
 
-  virtual OutputStream* GetStream() OVERRIDE;
+  virtual OutputStream* GetStream() override;
 
-  size_t GetLoadedSize() OVERRIDE;
+  size_t GetLoadedSize() override;
 
   static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
                                std::vector<uint8_t>* buffer);
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 440b3a4..d575420 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -83,7 +83,7 @@
                const std::string& extra_dex = "",
                const std::initializer_list<std::string>& image_classes = {});
 
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonCompilerTest::SetUpRuntimeOptions(options);
     QuickCompilerCallbacks* new_callbacks =
         new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage);
@@ -92,7 +92,7 @@
     options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
   }
 
-  std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+  std::unique_ptr<HashSet<std::string>> GetImageClasses() override {
     return std::make_unique<HashSet<std::string>>(image_classes_);
   }
 
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 67ded32..97d82d9 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -755,7 +755,7 @@
 
 class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
  public:
-  bool operator()(ObjPtr<Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(Thread::Current());
     mirror::Class::ComputeName(hs.NewHandle(c));
     return true;
@@ -987,7 +987,7 @@
         classes_to_prune_(),
         defined_class_count_(0u) { }
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!image_writer_->KeepClass(klass.Ptr())) {
       classes_to_prune_.insert(klass.Ptr());
       if (klass->GetClassLoader() == class_loader_) {
@@ -1022,7 +1022,7 @@
   explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer)
       : image_writer_(image_writer), removed_class_count_(0) {}
 
-  virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
+  virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PruneClassesVisitor classes_visitor(image_writer_, class_loader);
     ClassTable* class_table =
@@ -1677,7 +1677,7 @@
 
   void VisitRoots(mirror::Object*** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots_->push_back(*roots[i]);
@@ -1686,7 +1686,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots_->push_back(roots[i]->AsMirrorPtr());
@@ -2104,14 +2104,14 @@
   void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED,
                   size_t count ATTRIBUTE_UNUSED,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(FATAL) << "Unsupported";
   }
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       // Copy the reference. Since we do not have the address for recording the relocation,
       // it needs to be recorded explicitly by the user of FixupRootVisitor.
@@ -2401,7 +2401,7 @@
   size_t oat_index_;
 };
 
-class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
+class ImageWriter::FixupClassVisitor final : public FixupVisitor {
  public:
   FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
       : FixupVisitor(image_writer, copy, oat_index) {}
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 7cf555b..e45023e 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -73,7 +73,7 @@
 namespace linker {
 
 // Write a Space built during compilation for use during execution.
-class ImageWriter FINAL {
+class ImageWriter final {
  public:
   ImageWriter(const CompilerOptions& compiler_options,
               uintptr_t image_begin,
diff --git a/dex2oat/linker/mips/relative_patcher_mips.h b/dex2oat/linker/mips/relative_patcher_mips.h
index d3a4c5a..4c385a3 100644
--- a/dex2oat/linker/mips/relative_patcher_mips.h
+++ b/dex2oat/linker/mips/relative_patcher_mips.h
@@ -23,28 +23,28 @@
 namespace art {
 namespace linker {
 
-class MipsRelativePatcher FINAL : public RelativePatcher {
+class MipsRelativePatcher final : public RelativePatcher {
  public:
   explicit MipsRelativePatcher(const MipsInstructionSetFeatures* features)
       : is_r6(features->IsR6()) {}
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  private:
   bool is_r6;
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.h b/dex2oat/linker/mips64/relative_patcher_mips64.h
index 9f5a125..7b7c2cc 100644
--- a/dex2oat/linker/mips64/relative_patcher_mips64.h
+++ b/dex2oat/linker/mips64/relative_patcher_mips64.h
@@ -22,27 +22,27 @@
 namespace art {
 namespace linker {
 
-class Mips64RelativePatcher FINAL : public RelativePatcher {
+class Mips64RelativePatcher final : public RelativePatcher {
  public:
   Mips64RelativePatcher() {}
 
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
+                 uint32_t target_offset) override;
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index 60fcfe8..9b47a0d 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -35,7 +35,7 @@
 // any number of oat files. It provides storage for method code offsets
 // and wraps RelativePatcher calls, adjusting relative offsets according
 // to the value set by SetAdjustment().
-class MultiOatRelativePatcher FINAL {
+class MultiOatRelativePatcher final {
  public:
   using const_iterator = SafeMap<MethodReference, uint32_t>::const_iterator;
 
@@ -139,7 +139,7 @@
 
     void GetThunkCode(const LinkerPatch& patch,
                       /*out*/ ArrayRef<const uint8_t>* code,
-                      /*out*/ std::string* debug_name) OVERRIDE;
+                      /*out*/ std::string* debug_name) override;
 
    private:
     CompiledMethodStorage* storage_;
@@ -149,7 +149,7 @@
   // Wrap the map in a class implementing RelativePatcherTargetProvider.
   class MethodOffsetMap : public RelativePatcherTargetProvider {
    public:
-    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE;
+    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override;
     SafeMap<MethodReference, uint32_t> map;
   };
 
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index 05fe36a..a5831b6 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -35,7 +35,7 @@
 
     uint32_t ReserveSpace(uint32_t offset,
                           const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-                          MethodReference method_ref) OVERRIDE {
+                          MethodReference method_ref) override {
       last_reserve_offset_ = offset;
       last_reserve_method_ = method_ref;
       offset += next_reserve_adjustment_;
@@ -43,7 +43,7 @@
       return offset;
     }
 
-    uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+    uint32_t ReserveSpaceEnd(uint32_t offset) override {
       last_reserve_offset_ = offset;
       last_reserve_method_ = kNullMethodRef;
       offset += next_reserve_adjustment_;
@@ -51,7 +51,7 @@
       return offset;
     }
 
-    uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+    uint32_t WriteThunks(OutputStream* out, uint32_t offset) override {
       last_write_offset_ = offset;
       if (next_write_alignment_ != 0u) {
         offset += next_write_alignment_;
@@ -79,7 +79,7 @@
     void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                    uint32_t literal_offset,
                    uint32_t patch_offset,
-                   uint32_t target_offset) OVERRIDE {
+                   uint32_t target_offset) override {
       last_literal_offset_ = literal_offset;
       last_patch_offset_ = patch_offset;
       last_target_offset_ = target_offset;
@@ -88,7 +88,7 @@
     void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                   const LinkerPatch& patch,
                                   uint32_t patch_offset,
-                                  uint32_t target_offset) OVERRIDE {
+                                  uint32_t target_offset) override {
       last_literal_offset_ = patch.LiteralOffset();
       last_patch_offset_ = patch_offset;
       last_target_offset_ = target_offset;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 9045c43..4748c15 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -103,16 +103,16 @@
   ChecksumUpdatingOutputStream(OutputStream* out, OatHeader* oat_header)
       : OutputStream(out->GetLocation()), out_(out), oat_header_(oat_header) { }
 
-  bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE {
+  bool WriteFully(const void* buffer, size_t byte_count) override {
     oat_header_->UpdateChecksum(buffer, byte_count);
     return out_->WriteFully(buffer, byte_count);
   }
 
-  off_t Seek(off_t offset, Whence whence) OVERRIDE {
+  off_t Seek(off_t offset, Whence whence) override {
     return out_->Seek(offset, whence);
   }
 
-  bool Flush() OVERRIDE {
+  bool Flush() override {
     return out_->Flush();
   }
 
@@ -826,7 +826,7 @@
         oat_class_index_(0u),
         method_offsets_index_(0u) {}
 
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
     DexMethodVisitor::StartClass(dex_file, class_def_index);
     if (kIsDebugBuild && writer_->MayHaveCompiledMethods()) {
       // There are no oat classes if there aren't any compiled methods.
@@ -836,7 +836,7 @@
     return true;
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     ++oat_class_index_;
     return DexMethodVisitor::EndClass();
   }
@@ -862,7 +862,7 @@
       : DexMethodVisitor(writer, /* offset */ 0u) {}
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassAccessor::Method& method) OVERRIDE {
+                   const ClassAccessor::Method& method) override {
     // Look for patches with .bss references and prepare maps with placeholders for their offsets.
     CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
         MethodReference(dex_file_, method.GetIndex()));
@@ -936,7 +936,7 @@
     DCHECK(num_classes == 0u || IsAligned<4u>(offset));
   }
 
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE {
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override {
     DexMethodVisitor::StartClass(dex_file, class_def_index);
     compiled_methods_.clear();
     compiled_methods_with_code_ = 0u;
@@ -944,7 +944,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
-                   const ClassAccessor::Method& method) OVERRIDE {
+                   const ClassAccessor::Method& method) override {
     // Fill in the compiled_methods_ array for methods that have a
     // CompiledMethod. We track the number of non-null entries in
     // compiled_methods_with_code_ since we only want to allocate
@@ -959,7 +959,7 @@
     return true;
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     ClassReference class_ref(dex_file_, class_def_index_);
     ClassStatus status;
     bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
@@ -1145,14 +1145,14 @@
       : OatDexMethodVisitor(writer, offset) {
   }
 
-  bool EndClass() OVERRIDE {
+  bool EndClass() override {
     OatDexMethodVisitor::EndClass();
     return true;
   }
 
   bool VisitMethod(size_t class_def_method_index,
                    const ClassAccessor::Method& method)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_)  {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
 
@@ -1248,7 +1248,7 @@
                                              std::move(ordered_methods)) {
   }
 
-  virtual bool VisitComplete() OVERRIDE {
+  virtual bool VisitComplete() override {
     offset_ = writer_->relative_patcher_->ReserveSpaceEnd(offset_);
     if (generate_debug_info_) {
       std::vector<debug::MethodDebugInfo> thunk_infos =
@@ -1261,7 +1261,7 @@
   }
 
   virtual bool VisitMethod(const OrderedMethodData& method_data)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = method_data.oat_class;
     CompiledMethod* compiled_method = method_data.compiled_method;
@@ -1445,7 +1445,7 @@
 
   bool VisitMethod(size_t class_def_method_index,
                    const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -1495,7 +1495,7 @@
   // in the same oat file. If the origin and the copied methods are
   // in different oat files don't touch the copied method.
   // References to other oat files are not supported yet.
-  bool StartClass(const DexFile* dex_file, size_t class_def_index) OVERRIDE
+  bool StartClass(const DexFile* dex_file, size_t class_def_index) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     OatDexMethodVisitor::StartClass(dex_file, class_def_index);
     // Skip classes that are not in the image.
@@ -1533,7 +1533,7 @@
     return true;
   }
 
-  bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
+  bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Skip methods that are not in the image.
     if (!IsImageClass()) {
@@ -1652,7 +1652,7 @@
     }
   }
 
-  virtual bool VisitStart() OVERRIDE {
+  virtual bool VisitStart() override {
     return true;
   }
 
@@ -1681,7 +1681,7 @@
     return true;
   }
 
-  virtual bool VisitMethod(const OrderedMethodData& method_data) OVERRIDE
+  virtual bool VisitMethod(const OrderedMethodData& method_data) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const MethodReference& method_ref = method_data.method_reference;
     UpdateDexFileAndDexCache(method_ref.dex_file);
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index b6135c9..564cf30 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -45,35 +45,35 @@
     const InstructionSetFeatures* features,
     RelativePatcherThunkProvider* thunk_provider,
     RelativePatcherTargetProvider* target_provider) {
-  class RelativePatcherNone FINAL : public RelativePatcher {
+  class RelativePatcherNone final : public RelativePatcher {
    public:
     RelativePatcherNone() { }
 
     uint32_t ReserveSpace(uint32_t offset,
                           const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-                          MethodReference method_ref ATTRIBUTE_UNUSED) OVERRIDE {
+                          MethodReference method_ref ATTRIBUTE_UNUSED) override {
       return offset;  // No space reserved; no patches expected.
     }
 
-    uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE {
+    uint32_t ReserveSpaceEnd(uint32_t offset) override {
       return offset;  // No space reserved; no patches expected.
     }
 
-    uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
+    uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) override {
       return offset;  // No thunks added; no patches expected.
     }
 
     void PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                    uint32_t literal_offset ATTRIBUTE_UNUSED,
                    uint32_t patch_offset ATTRIBUTE_UNUSED,
-                   uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+                   uint32_t target_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "Unexpected relative call patch.";
     }
 
     void PatchPcRelativeReference(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                   const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                   uint32_t patch_offset ATTRIBUTE_UNUSED,
-                                  uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
+                                  uint32_t target_offset ATTRIBUTE_UNUSED) override {
       LOG(FATAL) << "Unexpected relative dex cache array patch.";
     }
 
@@ -84,7 +84,7 @@
     }
 
     std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
-        uint32_t executable_offset ATTRIBUTE_UNUSED) OVERRIDE {
+        uint32_t executable_offset ATTRIBUTE_UNUSED) override {
       return std::vector<debug::MethodDebugInfo>();  // No thunks added.
     }
 
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 9556c5f..9725570 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -57,7 +57,7 @@
     patched_code_.reserve(16 * KB);
   }
 
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     OverrideInstructionSetFeatures(instruction_set_, variant_);
     CommonCompilerTest::SetUp();
 
@@ -67,7 +67,7 @@
                                        &method_offset_map_);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     compiled_methods_.clear();
     patcher_.reset();
     CommonCompilerTest::TearDown();
@@ -260,7 +260,7 @@
 
     void GetThunkCode(const LinkerPatch& patch,
                       /*out*/ ArrayRef<const uint8_t>* code,
-                      /*out*/ std::string* debug_name) OVERRIDE {
+                      /*out*/ std::string* debug_name) override {
       auto it = thunk_map_.find(ThunkKey(patch));
       CHECK(it != thunk_map_.end());
       const ThunkValue& value = it->second;
@@ -316,9 +316,9 @@
 
   // Map method reference to assinged offset.
   // Wrap the map in a class implementing RelativePatcherTargetProvider.
-  class MethodOffsetMap FINAL : public RelativePatcherTargetProvider {
+  class MethodOffsetMap final : public RelativePatcherTargetProvider {
    public:
-    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) OVERRIDE {
+    std::pair<bool, uint32_t> FindMethodOffset(MethodReference ref) override {
       auto it = map.find(ref);
       if (it == map.end()) {
         return std::pair<bool, uint32_t>(false, 0u);
diff --git a/dex2oat/linker/x86/relative_patcher_x86.h b/dex2oat/linker/x86/relative_patcher_x86.h
index e723580..3da62fb 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.h
+++ b/dex2oat/linker/x86/relative_patcher_x86.h
@@ -22,17 +22,17 @@
 namespace art {
 namespace linker {
 
-class X86RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86RelativePatcher final : public X86BaseRelativePatcher {
  public:
   X86RelativePatcher() { }
 
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 };
 
 }  // namespace linker
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.h b/dex2oat/linker/x86/relative_patcher_x86_base.h
index 4cc7b07..a1925e0 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.h
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.h
@@ -26,14 +26,14 @@
  public:
   uint32_t ReserveSpace(uint32_t offset,
                         const CompiledMethod* compiled_method,
-                        MethodReference method_ref) OVERRIDE;
-  uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+                        MethodReference method_ref) override;
+  uint32_t ReserveSpaceEnd(uint32_t offset) override;
+  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
   void PatchCall(std::vector<uint8_t>* code,
                  uint32_t literal_offset,
                  uint32_t patch_offset,
-                 uint32_t target_offset) OVERRIDE;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) OVERRIDE;
+                 uint32_t target_offset) override;
+  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
 
  protected:
   X86BaseRelativePatcher() { }
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.h b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
index a31e1eb..a82fef3 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.h
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
@@ -22,17 +22,17 @@
 namespace art {
 namespace linker {
 
-class X86_64RelativePatcher FINAL : public X86BaseRelativePatcher {
+class X86_64RelativePatcher final : public X86BaseRelativePatcher {
  public:
   X86_64RelativePatcher() { }
 
   void PatchPcRelativeReference(std::vector<uint8_t>* code,
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
-                                uint32_t target_offset) OVERRIDE;
+                                uint32_t target_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
-                                   uint32_t patch_offset) OVERRIDE;
+                                   uint32_t patch_offset) override;
 };
 
 }  // namespace linker
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index e7d5ed9..c81d0c7 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -112,15 +112,15 @@
  public:
   class Container : public DexContainer {
    public:
-    Section* GetMainSection() OVERRIDE {
+    Section* GetMainSection() override {
       return &main_section_;
     }
 
-    Section* GetDataSection() OVERRIDE {
+    Section* GetDataSection() override {
       return &data_section_;
     }
 
-    bool IsCompactDexContainer() const OVERRIDE {
+    bool IsCompactDexContainer() const override {
       return true;
     }
 
@@ -139,21 +139,21 @@
   // Return true if we can generate compact dex for the IR.
   bool CanGenerateCompactDex(std::string* error_msg);
 
-  bool Write(DexContainer* output, std::string* error_msg) OVERRIDE;
+  bool Write(DexContainer* output, std::string* error_msg) override;
 
-  std::unique_ptr<DexContainer> CreateDexContainer() const OVERRIDE;
+  std::unique_ptr<DexContainer> CreateDexContainer() const override;
 
-  void WriteHeader(Stream* stream) OVERRIDE;
+  void WriteHeader(Stream* stream) override;
 
-  size_t GetHeaderSize() const OVERRIDE;
+  size_t GetHeaderSize() const override;
 
   uint32_t WriteDebugInfoOffsetTable(Stream* stream);
 
-  void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) OVERRIDE;
+  void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) override;
 
-  void WriteStringData(Stream* stream, dex_ir::StringData* string_data) OVERRIDE;
+  void WriteStringData(Stream* stream, dex_ir::StringData* string_data) override;
 
-  void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) OVERRIDE;
+  void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) override;
 
   void SortDebugInfosByMethodIndex();
 
diff --git a/dexlayout/dex_container.h b/dexlayout/dex_container.h
index 2b9a5f9..2d742b0 100644
--- a/dexlayout/dex_container.h
+++ b/dexlayout/dex_container.h
@@ -57,19 +57,19 @@
    public:
     virtual ~VectorSection() {}
 
-    uint8_t* Begin() OVERRIDE {
+    uint8_t* Begin() override {
       return &data_[0];
     }
 
-    size_t Size() const OVERRIDE {
+    size_t Size() const override {
       return data_.size();
     }
 
-    void Resize(size_t size) OVERRIDE {
+    void Resize(size_t size) override {
       data_.resize(size, 0u);
     }
 
-    void Clear() OVERRIDE {
+    void Clear() override {
       data_.clear();
     }
 
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 8f853ea..b02ae50 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -233,7 +233,7 @@
     // Preallocate so that assignment does not invalidate pointers into the vector.
     collection_.reserve(size);
   }
-  virtual ~CollectionVector() OVERRIDE { }
+  virtual ~CollectionVector() override { }
 
   template<class... Args>
   T* CreateAndAddItem(Args&&... args) {
@@ -242,7 +242,7 @@
     return object;
   }
 
-  virtual uint32_t Size() const OVERRIDE { return collection_.size(); }
+  virtual uint32_t Size() const override { return collection_.size(); }
 
   Iterator<ElementType> begin() const { return Iterator<ElementType>(collection_, 0U, Size()); }
   Iterator<ElementType> end() const { return Iterator<ElementType>(collection_, Size(), Size()); }
@@ -406,7 +406,7 @@
                       data_size,
                       data_offset);
   }
-  ~Header() OVERRIDE { }
+  ~Header() override { }
 
   static size_t ItemSize() { return kHeaderItemSize; }
 
@@ -590,7 +590,7 @@
   explicit StringId(StringData* string_data) : string_data_(string_data) {
     size_ = kStringIdItemSize;
   }
-  ~StringId() OVERRIDE { }
+  ~StringId() override { }
 
   static size_t ItemSize() { return kStringIdItemSize; }
 
@@ -608,7 +608,7 @@
 class TypeId : public IndexedItem {
  public:
   explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; }
-  ~TypeId() OVERRIDE { }
+  ~TypeId() override { }
 
   static size_t ItemSize() { return kTypeIdItemSize; }
 
@@ -629,7 +629,7 @@
   explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) {
     size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t));
   }
-  ~TypeList() OVERRIDE { }
+  ~TypeList() override { }
 
   const TypeIdVector* GetTypeList() const { return type_list_.get(); }
 
@@ -644,7 +644,7 @@
   ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters)
       : shorty_(shorty), return_type_(return_type), parameters_(parameters)
       { size_ = kProtoIdItemSize; }
-  ~ProtoId() OVERRIDE { }
+  ~ProtoId() override { }
 
   static size_t ItemSize() { return kProtoIdItemSize; }
 
@@ -666,7 +666,7 @@
  public:
   FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
       : class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; }
-  ~FieldId() OVERRIDE { }
+  ~FieldId() override { }
 
   static size_t ItemSize() { return kFieldIdItemSize; }
 
@@ -688,7 +688,7 @@
  public:
   MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
       : class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; }
-  ~MethodId() OVERRIDE { }
+  ~MethodId() override { }
 
   static size_t ItemSize() { return kMethodIdItemSize; }
 
@@ -710,7 +710,7 @@
  public:
   FieldItem(uint32_t access_flags, const FieldId* field_id)
       : access_flags_(access_flags), field_id_(field_id) { }
-  ~FieldItem() OVERRIDE { }
+  ~FieldItem() override { }
 
   FieldItem(FieldItem&&) = default;
 
@@ -732,7 +732,7 @@
  public:
   MethodItem(uint32_t access_flags, const MethodId* method_id, CodeItem* code)
       : access_flags_(access_flags), method_id_(method_id), code_(code) { }
-  ~MethodItem() OVERRIDE { }
+  ~MethodItem() override { }
 
   MethodItem(MethodItem&&) = default;
 
@@ -876,7 +876,7 @@
         direct_methods_(direct_methods),
         virtual_methods_(virtual_methods) { }
 
-  ~ClassData() OVERRIDE = default;
+  ~ClassData() override = default;
   FieldItemVector* StaticFields() { return static_fields_.get(); }
   FieldItemVector* InstanceFields() { return instance_fields_.get(); }
   MethodItemVector* DirectMethods() { return direct_methods_.get(); }
@@ -912,7 +912,7 @@
         class_data_(class_data),
         static_values_(static_values) { size_ = kClassDefItemSize; }
 
-  ~ClassDef() OVERRIDE { }
+  ~ClassDef() override { }
 
   static size_t ItemSize() { return kClassDefItemSize; }
 
@@ -980,7 +980,7 @@
  public:
   TryItem(uint32_t start_addr, uint16_t insn_count, const CatchHandler* handlers)
       : start_addr_(start_addr), insn_count_(insn_count), handlers_(handlers) { }
-  ~TryItem() OVERRIDE { }
+  ~TryItem() override { }
 
   uint32_t StartAddr() const { return start_addr_; }
   uint16_t InsnCount() const { return insn_count_; }
@@ -1042,7 +1042,7 @@
         tries_(tries),
         handlers_(handlers) { }
 
-  ~CodeItem() OVERRIDE { }
+  ~CodeItem() override { }
 
   uint16_t RegistersSize() const { return registers_size_; }
   uint16_t InsSize() const { return ins_size_; }
@@ -1115,7 +1115,7 @@
   explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) {
     size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
   }
-  ~AnnotationSetItem() OVERRIDE { }
+  ~AnnotationSetItem() override { }
 
   std::vector<AnnotationItem*>* GetItems() { return items_.get(); }
 
@@ -1132,7 +1132,7 @@
   explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) {
     size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
   }
-  ~AnnotationSetRefList() OVERRIDE { }
+  ~AnnotationSetRefList() override { }
 
   std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); }
 
@@ -1227,7 +1227,7 @@
   explicit CallSiteId(EncodedArrayItem* call_site_item) : call_site_item_(call_site_item) {
     size_ = kCallSiteIdItemSize;
   }
-  ~CallSiteId() OVERRIDE { }
+  ~CallSiteId() override { }
 
   static size_t ItemSize() { return kCallSiteIdItemSize; }
 
@@ -1248,7 +1248,7 @@
         field_or_method_id_(field_or_method_id) {
     size_ = kMethodHandleItemSize;
   }
-  ~MethodHandleItem() OVERRIDE { }
+  ~MethodHandleItem() override { }
 
   static size_t ItemSize() { return kMethodHandleItemSize; }
 
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a83a46b..601d783 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -92,7 +92,7 @@
 template<class T> class CollectionMap : public CollectionBase {
  public:
   CollectionMap() = default;
-  virtual ~CollectionMap() OVERRIDE { }
+  virtual ~CollectionMap() override { }
 
   template <class... Args>
   T* CreateAndAddItem(CollectionVector<T>& vector,
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index db1898b..dd2ebad 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -192,15 +192,15 @@
 
   class Container : public DexContainer {
    public:
-    Section* GetMainSection() OVERRIDE {
+    Section* GetMainSection() override {
       return &main_section_;
     }
 
-    Section* GetDataSection() OVERRIDE {
+    Section* GetDataSection() override {
       return &data_section_;
     }
 
-    bool IsCompactDexContainer() const OVERRIDE {
+    bool IsCompactDexContainer() const override {
       return false;
     }
 
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 871cd08..00b8ef2 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -132,7 +132,7 @@
   exit(kErrorInvalidArguments);
 }
 
-class DexoptAnalyzer FINAL {
+class DexoptAnalyzer final {
  public:
   DexoptAnalyzer() :
       assume_profile_changed_(false),
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 49f9249..c1a6f59 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -39,15 +39,15 @@
 
 static const vixl::aarch32::Register tr(TR);
 
-class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler {
-  class CustomDisassemblerStream FINAL : public DisassemblerStream {
+class DisassemblerArm::CustomDisassembler final : public PrintDisassembler {
+  class CustomDisassemblerStream final : public DisassemblerStream {
    public:
     CustomDisassemblerStream(std::ostream& os,
                              const CustomDisassembler* disasm,
                              const DisassemblerOptions* options)
         : DisassemblerStream(os), disasm_(disasm), options_(options) {}
 
-    DisassemblerStream& operator<<(const PrintLabel& label) OVERRIDE {
+    DisassemblerStream& operator<<(const PrintLabel& label) override {
       const LocationType type = label.GetLocationType();
 
       switch (type) {
@@ -73,7 +73,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
+    DisassemblerStream& operator<<(vixl::aarch32::Register reg) override {
       if (reg.Is(tr)) {
         os() << "tr";
         return *this;
@@ -82,7 +82,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(const MemOperand& operand) OVERRIDE {
+    DisassemblerStream& operator<<(const MemOperand& operand) override {
       // VIXL must use a PrintLabel object whenever the base register is PC;
       // the following check verifies this invariant, and guards against bugs.
       DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -96,7 +96,7 @@
       return *this;
     }
 
-    DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) OVERRIDE {
+    DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) override {
       // VIXL must use a PrintLabel object whenever the base register is PC;
       // the following check verifies this invariant, and guards against bugs.
       DCHECK(!operand.GetBaseRegister().Is(pc));
@@ -116,7 +116,7 @@
         disassembler_stream_(os, this, options),
         is_t32_(true) {}
 
-  void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
+  void PrintCodeAddress(uint32_t prog_ctr) override {
     os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
   }
 
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index 237b577..dd6621d 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,14 +26,14 @@
 namespace art {
 namespace arm {
 
-class DisassemblerArm FINAL : public Disassembler {
+class DisassemblerArm final : public Disassembler {
   class CustomDisassembler;
 
  public:
   explicit DisassemblerArm(DisassemblerOptions* options);
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   uintptr_t GetPc(uintptr_t instr_ptr) const {
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 19e4dfb..89beaa9 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -29,7 +29,7 @@
 namespace art {
 namespace arm64 {
 
-class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
+class CustomDisassembler final : public vixl::aarch64::Disassembler {
  public:
   explicit CustomDisassembler(DisassemblerOptions* options)
       : vixl::aarch64::Disassembler(),
@@ -45,13 +45,13 @@
 
   // Use register aliases in the disassembly.
   void AppendRegisterNameToOutput(const vixl::aarch64::Instruction* instr,
-                                  const vixl::aarch64::CPURegister& reg) OVERRIDE;
+                                  const vixl::aarch64::CPURegister& reg) override;
 
   // Improve the disassembly of literal load instructions.
-  void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) OVERRIDE;
+  void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) override;
 
   // Improve the disassembly of thread offset.
-  void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) OVERRIDE;
+  void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) override;
 
  private:
   // Indicate if the disassembler should read data loaded from literal pools.
@@ -69,15 +69,15 @@
   DisassemblerOptions* options_;
 };
 
-class DisassemblerArm64 FINAL : public Disassembler {
+class DisassemblerArm64 final : public Disassembler {
  public:
   explicit DisassemblerArm64(DisassemblerOptions* options) :
       Disassembler(options), disasm(options) {
     decoder.AppendVisitor(&disasm);
   }
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   vixl::aarch64::Decoder decoder;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index afa6af3..bc74b43 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -24,7 +24,7 @@
 namespace art {
 namespace mips {
 
-class DisassemblerMips FINAL : public Disassembler {
+class DisassemblerMips final : public Disassembler {
  public:
   explicit DisassemblerMips(DisassemblerOptions* options, bool is_o32_abi)
       : Disassembler(options),
@@ -33,8 +33,8 @@
         is_o32_abi_(is_o32_abi) {}
 
   const char* RegName(uint32_t reg);
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   // Address and encoding of the last disassembled instruction.
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 31b62bc..a329280 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,13 +24,13 @@
 
 enum RegFile { GPR, MMX, SSE };
 
-class DisassemblerX86 FINAL : public Disassembler {
+class DisassemblerX86 final : public Disassembler {
  public:
   DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
       : Disassembler(options), supports_rex_(supports_rex) {}
 
-  size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
+  size_t Dump(std::ostream& os, const uint8_t* begin) override;
+  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
 
  private:
   size_t DumpNops(std::ostream& os, const uint8_t* instr);
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f54c551..420cd12 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -347,9 +347,9 @@
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
 
-  virtual ~ImgObjectVisitor() OVERRIDE { }
+  virtual ~ImgObjectVisitor() override { }
 
-  virtual void Visit(mirror::Object* object) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  virtual void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
     // Sanity check that we are reading a real mirror::Object
     CHECK(object->GetClass() != nullptr) << "Image object at address "
                                          << object
@@ -658,8 +658,8 @@
     dirty_func_(std::move(dirty_func)),
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
-  virtual ~ImgArtMethodVisitor() OVERRIDE { }
-  virtual void Visit(ArtMethod* method) OVERRIDE {
+  virtual ~ImgArtMethodVisitor() override { }
+  virtual void Visit(ArtMethod* method) override {
     dirty_func_(method, begin_image_ptr_, dirty_pages_);
   }
 
@@ -1672,7 +1672,7 @@
   using Base = CmdlineArgs;
 
   virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+                                  std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -1703,7 +1703,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  virtual ParseStatus ParseChecks(std::string* error_msg) override {
     // Perform the parent checks.
     ParseStatus parent_checks = Base::ParseChecks(error_msg);
     if (parent_checks != kParseOk) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 52096f0..c46aaf4 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -57,7 +57,7 @@
     boot_image_location_ = image_spaces[0]->GetImageLocation();
   }
 
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
     // Needs to live until CommonRuntimeTest::SetUp finishes, since we pass it a cstring.
     runtime_args_image_ = android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str());
     options->push_back(std::make_pair(runtime_args_image_, nullptr));
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index c4ac180..1bcfe87 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -25,7 +25,7 @@
 
 namespace art {
 
-class MallocAllocator FINAL : public Allocator {
+class MallocAllocator final : public Allocator {
  public:
   MallocAllocator() {}
   ~MallocAllocator() {}
@@ -44,7 +44,7 @@
 
 MallocAllocator g_malloc_allocator;
 
-class NoopAllocator FINAL : public Allocator {
+class NoopAllocator final : public Allocator {
  public:
   NoopAllocator() {}
   ~NoopAllocator() {}
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index 01f9013..c6d8993 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -50,7 +50,7 @@
     ArenaBitVectorAllocatorKindImpl<kArenaAllocatorCountAllocations>;
 
 template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator, private ArenaBitVectorAllocatorKind {
+class ArenaBitVectorAllocator final : public Allocator, private ArenaBitVectorAllocatorKind {
  public:
   static ArenaBitVectorAllocator* Create(ArenaAlloc* allocator, ArenaAllocKind kind) {
     void* storage = allocator->template Alloc<ArenaBitVectorAllocator>(kind);
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 5668b6c..76f57da 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -26,7 +26,7 @@
 
 // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
 // abstracting away the bit start offset to avoid needing passing as an argument everywhere.
-class BitMemoryRegion FINAL : public ValueObject {
+class BitMemoryRegion final : public ValueObject {
  public:
   struct Less {
     bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 62834c7..95f2cbb 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -209,11 +209,11 @@
   virtual ~CommonArtTestBase() {}
 
  protected:
-  virtual void SetUp() OVERRIDE {
+  virtual void SetUp() override {
     CommonArtTestImpl::SetUp();
   }
 
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     CommonArtTestImpl::TearDown();
   }
 };
diff --git a/libartbase/base/dumpable.h b/libartbase/base/dumpable.h
index 0c00505..bd8622f 100644
--- a/libartbase/base/dumpable.h
+++ b/libartbase/base/dumpable.h
@@ -29,7 +29,7 @@
 //   os << Dumpable<MyType>(my_type_instance);
 //
 template<typename T>
-class Dumpable FINAL {
+class Dumpable final {
  public:
   explicit Dumpable(const T& value) : value_(value) {
   }
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index a479b7d..81d55fc 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -37,7 +37,7 @@
         count_(count) {}
 
  private:
-  std::streamsize xsputn(const char* s, std::streamsize n) OVERRIDE {
+  std::streamsize xsputn(const char* s, std::streamsize n) override {
     std::streamsize result = n;  // Aborts on failure.
     const char* eol = static_cast<const char*>(memchr(s, '\n', n));
     while (eol != nullptr) {
@@ -54,7 +54,7 @@
     return result;
   }
 
-  int_type overflow(int_type c) OVERRIDE {
+  int_type overflow(int_type c) override {
     if (UNLIKELY(c == std::char_traits<char>::eof())) {
       out_sbuf_->pubsync();
       return c;
diff --git a/libartbase/base/leb128.h b/libartbase/base/leb128.h
index d5847fd..b866d37 100644
--- a/libartbase/base/leb128.h
+++ b/libartbase/base/leb128.h
@@ -357,7 +357,7 @@
 
 // An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
 template <typename Vector = std::vector<uint8_t>>
-class Leb128EncodingVector FINAL : private Vector,
+class Leb128EncodingVector final : private Vector,
                                    public Leb128Encoder<Vector> {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
 
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index f26cf07..33866bb 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -23,9 +23,6 @@
 #include "android-base/macros.h"
 #include "android-base/thread_annotations.h"
 
-#define OVERRIDE override
-#define FINAL final
-
 // Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
 // globally importing gtest/gtest.h into the main ART header files.
 #define ART_FRIEND_TEST(test_set_name, individual_test)\
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 15a5d71..02e29f1 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -28,7 +28,7 @@
 
 namespace art {
 
-class MallocArena FINAL : public Arena {
+class MallocArena final : public Arena {
  public:
   explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
   virtual ~MallocArena();
diff --git a/libartbase/base/malloc_arena_pool.h b/libartbase/base/malloc_arena_pool.h
index c48be59..9216c03 100644
--- a/libartbase/base/malloc_arena_pool.h
+++ b/libartbase/base/malloc_arena_pool.h
@@ -23,17 +23,17 @@
 
 namespace art {
 
-class MallocArenaPool FINAL : public ArenaPool {
+class MallocArenaPool final : public ArenaPool {
  public:
   MallocArenaPool();
   ~MallocArenaPool();
-  Arena* AllocArena(size_t size) OVERRIDE;
-  void FreeArenaChain(Arena* first) OVERRIDE;
-  size_t GetBytesAllocated() const OVERRIDE;
-  void ReclaimMemory() OVERRIDE;
-  void LockReclaimMemory() OVERRIDE;
+  Arena* AllocArena(size_t size) override;
+  void FreeArenaChain(Arena* first) override;
+  size_t GetBytesAllocated() const override;
+  void ReclaimMemory() override;
+  void LockReclaimMemory() override;
   // Is a nop for malloc pools.
-  void TrimMaps() OVERRIDE;
+  void TrimMaps() override;
 
  private:
   Arena* free_arenas_;
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 2060329..9c9ff92 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -34,7 +34,7 @@
 // Memory regions are useful for accessing memory with bounds check in
 // debug mode. They can be safely passed by value and do not assume ownership
 // of the region.
-class MemoryRegion FINAL : public ValueObject {
+class MemoryRegion final : public ValueObject {
  public:
   struct ContentEquals {
     constexpr bool operator()(const MemoryRegion& lhs, const MemoryRegion& rhs) const {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index d61dab6..19be3ef 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -89,13 +89,13 @@
   virtual ~FdFile();
 
   // RandomAccessFile API.
-  int Close() OVERRIDE WARN_UNUSED;
-  int64_t Read(char* buf, int64_t byte_count, int64_t offset) const OVERRIDE WARN_UNUSED;
-  int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
-  int64_t GetLength() const OVERRIDE;
-  int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+  int Close() override WARN_UNUSED;
+  int64_t Read(char* buf, int64_t byte_count, int64_t offset) const override WARN_UNUSED;
+  int SetLength(int64_t new_length) override WARN_UNUSED;
+  int64_t GetLength() const override;
+  int64_t Write(const char* buf, int64_t byte_count, int64_t offset) override WARN_UNUSED;
 
-  int Flush() OVERRIDE WARN_UNUSED;
+  int Flush() override WARN_UNUSED;
 
   // Short for SetLength(0); Flush(); Close();
   // If the file was opened with a path name and unlink = true, also calls Unlink() on the path.
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 1846a13..f50bf1c 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -40,9 +40,9 @@
 class MemMapContainer : public DexFileContainer {
  public:
   explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
-  virtual ~MemMapContainer() OVERRIDE { }
+  virtual ~MemMapContainer() override { }
 
-  int GetPermissions() OVERRIDE {
+  int GetPermissions() override {
     if (!mem_map_.IsValid()) {
       return 0;
     } else {
@@ -50,11 +50,11 @@
     }
   }
 
-  bool IsReadOnly() OVERRIDE {
+  bool IsReadOnly() override {
     return GetPermissions() == PROT_READ;
   }
 
-  bool EnableWrite() OVERRIDE {
+  bool EnableWrite() override {
     CHECK(IsReadOnly());
     if (!mem_map_.IsValid()) {
       return false;
@@ -63,7 +63,7 @@
     }
   }
 
-  bool DisableWrite() OVERRIDE {
+  bool DisableWrite() override {
     CHECK(!IsReadOnly());
     if (!mem_map_.IsValid()) {
       return false;
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 420b347..40d4673 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -51,7 +51,7 @@
                             std::vector<uint32_t>* checksums,
                             std::string* error_msg,
                             int zip_fd = -1,
-                            bool* only_contains_uncompressed_dex = nullptr) const OVERRIDE;
+                            bool* only_contains_uncompressed_dex = nullptr) const override;
 
   // Opens .dex file, backed by existing memory
   std::unique_ptr<const DexFile> Open(const uint8_t* base,
@@ -61,7 +61,7 @@
                                       const OatDexFile* oat_dex_file,
                                       bool verify,
                                       bool verify_checksum,
-                                      std::string* error_msg) const OVERRIDE;
+                                      std::string* error_msg) const override;
 
   // Opens .dex file that has been memory-mapped by the caller.
   std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index 3f311b7..a7d0363 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -44,7 +44,7 @@
 }
 
 class ArtDexFileLoaderTest : public CommonArtTest {
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonArtTest::SetUp();
     // Open a jar file from the boot classpath for use in basic tests of dex accessors.
     std::vector<std::string> lib_core_dex_file_names = GetLibCoreDexFileNames();
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index affc9a2..6db68c0 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -253,15 +253,15 @@
 
   // Returns true if the byte string points to the magic value.
   static bool IsMagicValid(const uint8_t* magic);
-  virtual bool IsMagicValid() const OVERRIDE;
+  virtual bool IsMagicValid() const override;
 
   // Returns true if the byte string after the magic is the correct value.
   static bool IsVersionValid(const uint8_t* magic);
-  virtual bool IsVersionValid() const OVERRIDE;
+  virtual bool IsVersionValid() const override;
 
   // TODO This is completely a guess. We really need to do better. b/72402467
   // We ask for 64 megabytes which should be big enough for any realistic dex file.
-  virtual size_t GetDequickenedSize() const OVERRIDE {
+  virtual size_t GetDequickenedSize() const override {
     return 64 * MB;
   }
 
@@ -269,9 +269,9 @@
     return down_cast<const Header&>(DexFile::GetHeader());
   }
 
-  virtual bool SupportsDefaultMethods() const OVERRIDE;
+  virtual bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
 
   uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
     return debug_info_offsets_.GetOffset(dex_method_index);
@@ -281,7 +281,7 @@
                                     size_t base_size,
                                     const uint8_t* data_begin,
                                     size_t data_size);
-  virtual uint32_t CalculateChecksum() const OVERRIDE;
+  virtual uint32_t CalculateChecksum() const override;
 
  private:
   CompactDexFile(const uint8_t* base,
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 6d9ca4a..d9e483d 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -36,21 +36,21 @@
 class VectorContainer : public DexFileContainer {
  public:
   explicit VectorContainer(std::vector<uint8_t>&& vector) : vector_(std::move(vector)) { }
-  virtual ~VectorContainer() OVERRIDE { }
+  virtual ~VectorContainer() override { }
 
-  int GetPermissions() OVERRIDE {
+  int GetPermissions() override {
     return 0;
   }
 
-  bool IsReadOnly() OVERRIDE {
+  bool IsReadOnly() override {
     return true;
   }
 
-  bool EnableWrite() OVERRIDE {
+  bool EnableWrite() override {
     return false;
   }
 
-  bool DisableWrite() OVERRIDE {
+  bool DisableWrite() override {
     return false;
   }
 
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index 6807025..ad8a184 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -708,12 +708,12 @@
 
 // Class for accessing operands for instructions with a range format
 // (e.g. 3rc and 4rcc).
-class RangeInstructionOperands FINAL : public InstructionOperands {
+class RangeInstructionOperands final : public InstructionOperands {
  public:
   RangeInstructionOperands(uint32_t first_operand, size_t num_operands)
       : InstructionOperands(num_operands), first_operand_(first_operand) {}
   ~RangeInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const uint32_t first_operand_;
@@ -723,13 +723,13 @@
 
 // Class for accessing operands for instructions with a variable
 // number of arguments format (e.g. 35c and 45cc).
-class VarArgsInstructionOperands FINAL : public InstructionOperands {
+class VarArgsInstructionOperands final : public InstructionOperands {
  public:
   VarArgsInstructionOperands(const uint32_t (&operands)[Instruction::kMaxVarArgRegs],
                              size_t num_operands)
       : InstructionOperands(num_operands), operands_(operands) {}
   ~VarArgsInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const uint32_t (&operands_)[Instruction::kMaxVarArgRegs];
@@ -739,12 +739,12 @@
 
 // Class for accessing operands without the receiver by wrapping an
 // existing InstructionOperands instance.
-class NoReceiverInstructionOperands FINAL : public InstructionOperands {
+class NoReceiverInstructionOperands final : public InstructionOperands {
  public:
   explicit NoReceiverInstructionOperands(const InstructionOperands* const inner)
       : InstructionOperands(inner->GetNumberOfOperands() - 1), inner_(inner) {}
   ~NoReceiverInstructionOperands() {}
-  uint32_t GetOperand(size_t operand_index) const OVERRIDE;
+  uint32_t GetOperand(size_t operand_index) const override;
 
  private:
   const InstructionOperands* const inner_;
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index 999e5b9..a289635 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -73,17 +73,17 @@
 
   // Returns true if the byte string points to the magic value.
   static bool IsMagicValid(const uint8_t* magic);
-  virtual bool IsMagicValid() const OVERRIDE;
+  virtual bool IsMagicValid() const override;
 
   // Returns true if the byte string after the magic is the correct value.
   static bool IsVersionValid(const uint8_t* magic);
-  virtual bool IsVersionValid() const OVERRIDE;
+  virtual bool IsVersionValid() const override;
 
-  virtual bool SupportsDefaultMethods() const OVERRIDE;
+  virtual bool SupportsDefaultMethods() const override;
 
-  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+  uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
 
-  virtual size_t GetDequickenedSize() const OVERRIDE {
+  virtual size_t GetDequickenedSize() const override {
     return Size();
   }
 
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 42c3320..417abaa 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -35,7 +35,7 @@
 
 class ProfileCompilationInfoTest : public CommonArtTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonArtTest::SetUp();
     allocator_.reset(new ArenaAllocator(&pool_));
   }
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a5cc38b..5e56c9a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -129,7 +129,7 @@
 }
 
 template <typename ElfTypes>
-class OatSymbolizer FINAL {
+class OatSymbolizer final {
  public:
   OatSymbolizer(const OatFile* oat_file, const std::string& output_name, bool no_bits) :
       oat_file_(oat_file),
@@ -1980,7 +1980,7 @@
    public:
     explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
 
-    virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    virtual void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::ostream& indent_os = image_dumper_->vios_.Stream();
       indent_os << method << " " << " ArtMethod: " << ArtMethod::PrettyMethod(method) << "\n";
       image_dumper_->DumpMethod(method, indent_os);
@@ -3351,7 +3351,7 @@
   using Base = CmdlineArgs;
 
   virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+                                  std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -3408,7 +3408,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  virtual ParseStatus ParseChecks(std::string* error_msg) override {
     // Infer boot image location from the image location if possible.
     if (boot_image_location_ == nullptr) {
       boot_image_location_ = image_location_;
@@ -3536,7 +3536,7 @@
 };
 
 struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
-  virtual bool NeedsRuntime() OVERRIDE {
+  virtual bool NeedsRuntime() override {
     CHECK(args_ != nullptr);
 
     // If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
@@ -3563,7 +3563,7 @@
           !args_->symbolize_;
   }
 
-  virtual bool ExecuteWithoutRuntime() OVERRIDE {
+  virtual bool ExecuteWithoutRuntime() override {
     CHECK(args_ != nullptr);
     CHECK(args_->oat_filename_ != nullptr);
 
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index 6e991de..d9f34a5 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -58,13 +58,13 @@
   explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
 
   bool IsMethodBeingInspected(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   bool IsMethodSafeToJit(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   bool MethodNeedsDebugVersion(art::ArtMethod* method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+      override REQUIRES_SHARED(art::Locks::mutator_lock_);
 
  private:
   DeoptManager* manager_;
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 6a8ba48..e98517f 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -50,7 +50,7 @@
 // pending exceptions since they can cause new ones to be thrown. In accordance with the JVMTI
 // specification we allow exceptions originating from events to overwrite the current exception,
 // including exceptions originating from earlier events.
-class ScopedEventDispatchEnvironment FINAL : public art::ValueObject {
+class ScopedEventDispatchEnvironment final : public art::ValueObject {
  public:
   ScopedEventDispatchEnvironment() : env_(nullptr), throw_(nullptr, nullptr) {
     DCHECK_EQ(art::Thread::Current()->GetState(), art::ThreadState::kNative);
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index f71a5dc..43d0b10 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -265,7 +265,7 @@
   explicit JvmtiDdmChunkListener(EventHandler* handler) : handler_(handler) {}
 
   void DdmPublishChunk(uint32_t type, const art::ArrayRef<const uint8_t>& data)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kDdmPublishChunk)) {
       art::Thread* self = art::Thread::Current();
       handler_->DispatchEvent<ArtJvmtiEvent::kDdmPublishChunk>(
@@ -288,7 +288,7 @@
   explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
 
   void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     DCHECK_EQ(self, art::Thread::Current());
 
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kVmObjectAlloc)) {
@@ -337,7 +337,7 @@
   explicit JvmtiMonitorListener(EventHandler* handler) : handler_(handler) {}
 
   void MonitorContendedLocking(art::Monitor* m)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEnter)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -351,7 +351,7 @@
   }
 
   void MonitorContendedLocked(art::Monitor* m)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEntered)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -365,7 +365,7 @@
   }
 
   void ObjectWaitStart(art::Handle<art::mirror::Object> obj, int64_t timeout)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -392,7 +392,7 @@
   //
   // See b/65558434 for more discussion.
   void MonitorWaitFinished(art::Monitor* m, bool timeout)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
       art::Thread* self = art::Thread::Current();
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -429,11 +429,11 @@
         start_enabled_(false),
         finish_enabled_(false) {}
 
-  void StartPause() OVERRIDE {
+  void StartPause() override {
     handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
   }
 
-  void EndPause() OVERRIDE {
+  void EndPause() override {
     handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
   }
 
@@ -475,7 +475,7 @@
   }
 }
 
-class JvmtiMethodTraceListener FINAL : public art::instrumentation::InstrumentationListener {
+class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
  public:
   explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
 
@@ -484,7 +484,7 @@
                      art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                      art::ArtMethod* method,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
@@ -501,7 +501,7 @@
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> return_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_EQ(
@@ -528,7 +528,7 @@
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const art::JValue& return_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_NE(
@@ -556,7 +556,7 @@
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (!method->IsRuntimeMethod() &&
         event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       jvalue val;
@@ -586,7 +586,7 @@
                   art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                   art::ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     DCHECK(!method->IsRuntimeMethod());
     // Default methods might be copied to multiple classes. We need to get the canonical version of
     // this method so that we can check for breakpoints correctly.
@@ -613,7 +613,7 @@
                  art::ArtMethod* method,
                  uint32_t dex_pc,
                  art::ArtField* field)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // DCHECK(!self->IsExceptionPending());
@@ -638,7 +638,7 @@
                     uint32_t dex_pc,
                     art::ArtField* field,
                     art::Handle<art::mirror::Object> new_val)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // DCHECK(!self->IsExceptionPending());
@@ -670,7 +670,7 @@
                     uint32_t dex_pc,
                     art::ArtField* field,
                     const art::JValue& field_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       DCHECK(!self->IsExceptionPending());
@@ -700,7 +700,7 @@
   }
 
   void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
     jboolean is_exception_pending = self->IsExceptionPending();
     RunEventCallback<ArtJvmtiEvent::kFramePop>(
@@ -720,7 +720,7 @@
     // Finds the location where this exception will most likely be caught. We ignore intervening
     // native frames (which could catch the exception) and return the closest java frame with a
     // compatible catch statement.
-    class CatchLocationFinder FINAL : public art::StackVisitor {
+    class CatchLocationFinder final : public art::StackVisitor {
      public:
       CatchLocationFinder(art::Thread* target,
                           art::Handle<art::mirror::Class> exception_class,
@@ -733,7 +733,7 @@
           catch_method_ptr_(out_catch_method),
           catch_dex_pc_ptr_(out_catch_pc) {}
 
-      bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
         art::ArtMethod* method = GetMethod();
         DCHECK(method != nullptr);
         if (method->IsRuntimeMethod()) {
@@ -782,7 +782,7 @@
 
   // Call-back when an exception is thrown.
   void ExceptionThrown(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     DCHECK(self->IsExceptionThrownByCurrentMethod(exception_object.Get()));
     // The instrumentation events get rid of this for us.
     DCHECK(!self->IsExceptionPending());
@@ -812,7 +812,7 @@
 
   // Call-back when an exception is handled.
   void ExceptionHandled(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     // Since the exception has already been handled there shouldn't be one pending.
     DCHECK(!self->IsExceptionPending());
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kExceptionCatch)) {
@@ -839,7 +839,7 @@
               art::ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     return;
   }
 
@@ -849,7 +849,7 @@
                                 art::ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     return;
   }
 
@@ -959,7 +959,7 @@
         : runtime_(runtime) {}
 
     bool operator()(art::ObjPtr<art::mirror::Class> klass)
-        OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+        override REQUIRES(art::Locks::mutator_lock_) {
       if (!klass->IsLoaded()) {
         // Skip classes that aren't loaded since they might not have fully allocated and initialized
         // their methods. Furthemore since the jvmti-plugin must have been loaded by this point
diff --git a/openjdkjvmti/object_tagging.h b/openjdkjvmti/object_tagging.h
index 1b8366a..4181302 100644
--- a/openjdkjvmti/object_tagging.h
+++ b/openjdkjvmti/object_tagging.h
@@ -45,15 +45,15 @@
 struct ArtJvmTiEnv;
 class EventHandler;
 
-class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
+class ObjectTagTable final : public JvmtiWeakTable<jlong> {
  public:
   ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
       : event_handler_(event_handler), jvmti_env_(env) {}
 
-  bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
+  bool Set(art::mirror::Object* obj, jlong tag) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
-  bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
+  bool SetLocked(art::mirror::Object* obj, jlong tag) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(allow_disallow_lock_);
 
@@ -73,8 +73,8 @@
   }
 
  protected:
-  bool DoesHandleNullOnSweep() OVERRIDE;
-  void HandleNullSweep(jlong tag) OVERRIDE;
+  bool DoesHandleNullOnSweep() override;
+  void HandleNullSweep(jlong tag) override;
 
  private:
   EventHandler* event_handler_;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 209add3..f1d6fb0 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -163,7 +163,7 @@
                       const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                       /*out*/art::DexFile const** final_dex_file,
                       /*out*/art::DexFile::ClassDef const** final_class_def)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     bool is_enabled =
         event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
         event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookNonRetransformable);
@@ -381,7 +381,7 @@
     void VisitRoots(art::mirror::Object*** roots,
                     size_t count,
                     const art::RootInfo& info ATTRIBUTE_UNUSED)
-        OVERRIDE {
+        override {
       for (size_t i = 0; i != count; ++i) {
         if (*roots[i] == input_) {
           *roots[i] = output_;
@@ -392,7 +392,7 @@
     void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
                     size_t count,
                     const art::RootInfo& info ATTRIBUTE_UNUSED)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       for (size_t i = 0; i != count; ++i) {
         if (roots[i]->AsMirrorPtr() == input_) {
           roots[i]->Assign(output_);
@@ -418,7 +418,7 @@
       WeakGlobalUpdate(art::mirror::Class* root_input, art::mirror::Class* root_output)
           : input_(root_input), output_(root_output) {}
 
-      art::mirror::Object* IsMarked(art::mirror::Object* obj) OVERRIDE {
+      art::mirror::Object* IsMarked(art::mirror::Object* obj) override {
         if (obj == input_) {
           return output_;
         }
diff --git a/openjdkjvmti/ti_dump.cc b/openjdkjvmti/ti_dump.cc
index 253580e..c9abb71 100644
--- a/openjdkjvmti/ti_dump.cc
+++ b/openjdkjvmti/ti_dump.cc
@@ -44,7 +44,7 @@
 namespace openjdkjvmti {
 
 struct DumpCallback : public art::RuntimeSigQuitCallback {
-  void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void SigQuit() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Thread* thread = art::Thread::Current();
     art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
     event_handler->DispatchEvent<ArtJvmtiEvent::kDataDumpRequest>(art::Thread::Current());
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index d23370b..85aa946 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -760,7 +760,7 @@
                               user_data);
 }
 
-class FollowReferencesHelper FINAL {
+class FollowReferencesHelper final {
  public:
   FollowReferencesHelper(HeapUtil* h,
                          jvmtiEnv* jvmti_env,
@@ -828,7 +828,7 @@
   }
 
  private:
-  class CollectAndReportRootsVisitor FINAL : public art::RootVisitor {
+  class CollectAndReportRootsVisitor final : public art::RootVisitor {
    public:
     CollectAndReportRootsVisitor(FollowReferencesHelper* helper,
                                  ObjectTagTable* tag_table,
@@ -841,7 +841,7 @@
           stop_reports_(false) {}
 
     void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info)
-        OVERRIDE
+        override
         REQUIRES_SHARED(art::Locks::mutator_lock_)
         REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
       for (size_t i = 0; i != count; ++i) {
@@ -852,7 +852,7 @@
     void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
                     size_t count,
                     const art::RootInfo& info)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_)
+        override REQUIRES_SHARED(art::Locks::mutator_lock_)
         REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
       for (size_t i = 0; i != count; ++i) {
         AddRoot(roots[i]->AsMirrorPtr(), info);
@@ -1347,7 +1347,7 @@
     explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
 
     bool operator()(art::ObjPtr<art::mirror::Class> klass)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        override REQUIRES_SHARED(art::Locks::mutator_lock_) {
       if (klass->IsLoaded() || klass->IsErroneous()) {
         classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
       }
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 87d832c..1588df4 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -66,7 +66,7 @@
   void RegisterNativeMethod(art::ArtMethod* method,
                             const void* cur_method,
                             /*out*/void** new_method)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kNativeMethodBind)) {
       art::Thread* thread = art::Thread::Current();
       art::JNIEnvExt* jnienv = thread->GetJniEnv();
@@ -550,7 +550,7 @@
   CommonLocalVariableClosure(jint depth, jint slot)
       : result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run");
     std::unique_ptr<art::Context> context(art::Context::Create());
@@ -702,7 +702,7 @@
   jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimByte:
       case art::Primitive::kPrimChar:
@@ -722,7 +722,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -816,7 +816,7 @@
   jvmtiError GetTypeError(art::ArtMethod* method,
                           art::Primitive::Type slot_type,
                           const std::string& descriptor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (slot_type) {
       case art::Primitive::kPrimNot: {
         if (type_ != art::Primitive::kPrimNot) {
@@ -852,7 +852,7 @@
   }
 
   jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
         uint32_t ptr_val;
@@ -941,7 +941,7 @@
         depth_(depth),
         val_(nullptr) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
     art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run");
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     std::unique_ptr<art::Context> context(art::Context::Create());
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 7157974..4fa97f1 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -56,7 +56,7 @@
     return soa.AddLocalReference<jthread>(soa.Self()->GetPeer());
   }
 
-  void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
+  void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     art::Thread* self = art::Thread::Current();
     switch (phase) {
       case RuntimePhase::kInitialAgents:
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 8707e27..2ec2f04 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -158,7 +158,7 @@
           obsoleted_methods_(obsoleted_methods),
           obsolete_maps_(obsolete_maps) { }
 
-  ~ObsoleteMethodStackVisitor() OVERRIDE {}
+  ~ObsoleteMethodStackVisitor() override {}
 
  public:
   // Returns true if we successfully installed obsolete methods on this thread, filling
@@ -177,7 +177,7 @@
     visitor.WalkStack();
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES(art::Locks::mutator_lock_) {
     art::ScopedAssertNoThreadSuspension snts("Fixing up the stack for obsolete methods.");
     art::ArtMethod* old_method = GetMethod();
     if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bcbab14..1189b1d 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -186,7 +186,7 @@
 }
 
 struct SearchCallback : public art::RuntimePhaseCallback {
-  void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void NextRuntimePhase(RuntimePhase phase) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (phase == RuntimePhase::kStart) {
       // It's time to update the system properties.
       Update();
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 318d98d..b6969af 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -128,7 +128,7 @@
         start_result(0),
         stop_result(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     auto frames_fn = [&](jvmtiFrameInfo info) {
       frames.push_back(info);
     };
@@ -195,7 +195,7 @@
     DCHECK_GE(start_input, 0u);
   }
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     auto frames_fn = [&](jvmtiFrameInfo info) {
       frame_buffer[index] = info;
       ++index;
@@ -287,7 +287,7 @@
   GetAllStackTracesVectorClosure(size_t stop, Data* data_)
       : barrier(0), stop_input(stop), data(data_) {}
 
-  void Run(art::Thread* thread) OVERRIDE
+  void Run(art::Thread* thread) override
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!data->mutex) {
     art::Thread* self = art::Thread::Current();
@@ -678,7 +678,7 @@
  public:
   GetFrameCountClosure() : count(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     GetFrameCountVisitor visitor(self);
     visitor.WalkStack(false);
 
@@ -759,7 +759,7 @@
  public:
   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
 
-  void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     GetLocationVisitor visitor(self, n);
     visitor.WalkStack(false);
 
@@ -842,7 +842,7 @@
     delete context_;
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     if (!GetMethod()->IsRuntimeMethod()) {
       art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
@@ -867,7 +867,7 @@
   }
 
   void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     for (const art::Handle<art::mirror::Object>& m : monitors) {
       if (m.Get() == obj) {
         return;
@@ -889,7 +889,7 @@
   explicit MonitorInfoClosure(Fn handle_results)
       : err_(OK), handle_results_(handle_results) {}
 
-  void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     // Find the monitors on the stack.
     MonitorVisitor visitor(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 949b566..e533094 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -82,7 +82,7 @@
                                          thread.get());
   }
 
-  void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void ThreadStart(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (!started) {
       // Runtime isn't started. We only expect at most the signal handler or JIT threads to be
       // started here.
@@ -101,7 +101,7 @@
     Post<ArtJvmtiEvent::kThreadStart>(self);
   }
 
-  void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  void ThreadDeath(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     Post<ArtJvmtiEvent::kThreadEnd>(self);
   }
 
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 8797553..d87ca56 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -68,7 +68,7 @@
 namespace openjdkjvmti {
 
 // A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
-class TransformationFaultHandler FINAL : public art::FaultHandler {
+class TransformationFaultHandler final : public art::FaultHandler {
  public:
   explicit TransformationFaultHandler(art::FaultManager* manager)
       : art::FaultHandler(manager),
@@ -84,7 +84,7 @@
     uninitialized_class_definitions_.clear();
   }
 
-  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
     DCHECK_EQ(sig, SIGSEGV);
     art::Thread* self = art::Thread::Current();
     if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 8169979..02fc925 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -815,7 +815,7 @@
  public:
   explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
-  void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
     dest->SetDeclaringClass(
         patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
@@ -834,7 +834,7 @@
  public:
   explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
-  void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
     patch_oat_->FixupMethod(method, dest);
   }
@@ -877,7 +877,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
     }
@@ -885,7 +885,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
     }
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 370f59d..286b686 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -40,7 +40,7 @@
 
 class ProfileAssistantTest : public CommonRuntimeTest {
  public:
-  void PostRuntimeCreate() OVERRIDE {
+  void PostRuntimeCreate() override {
     allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
diff --git a/profman/profman.cc b/profman/profman.cc
index 9b47097..cecd3c2 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -185,7 +185,7 @@
 
 // TODO(calin): This class has grown too much from its initial design. Split the functionality
 // into smaller, more contained pieces.
-class ProfMan FINAL {
+class ProfMan final {
  public:
   ProfMan() :
       reference_profile_file_fd_(kInvalidFd),
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 927b533..6a8133e 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -34,14 +34,14 @@
                                                  Handle<mirror::Class> klass,
                                                  verifier::HardFailLogMode log_level,
                                                  std::string* error_msg)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool InitializeClass(Thread *self,
                        Handle<mirror::Class> klass,
                        bool can_run_clinit,
                        bool can_init_parents)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 };
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4ceede..d4dbbf9 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,7 +46,7 @@
 
 class ArchTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use 64-bit ISA for runtime setup to make method size potentially larger
     // than necessary (rather than smaller) during CreateCalleeSaveMethod
     options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -55,7 +55,7 @@
   // Do not do any of the finalization. We don't want to run any code, we don't need the heap
   // prepared, it actually will be a problem with setting the instruction set to x86_64 in
   // SetUpRuntimeOptions.
-  void FinalizeSetup() OVERRIDE {
+  void FinalizeSetup() override {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
 };
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index b980296..845cdaa 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -26,7 +26,7 @@
 namespace art {
 namespace arm {
 
-class ArmContext FINAL : public Context {
+class ArmContext final : public Context {
  public:
   ArmContext() {
     Reset();
@@ -34,55 +34,55 @@
 
   virtual ~ArmContext() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(PC, new_pc);
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(R0, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfSRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f82534b..d964148 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -25,7 +25,7 @@
 using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
 
 // Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class ArmInstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
   static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,18 +47,18 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static ArmFeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+  bool HasAtLeast(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
   // Return a string of the form "div,lpae" or "none".
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Is the divide instruction feature enabled?
   bool HasDivideInstruction() const {
@@ -82,7 +82,7 @@
   // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   ArmInstructionSetFeatures(bool has_div,
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index e64cfb8..95dac90 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -26,7 +26,7 @@
 namespace art {
 namespace arm64 {
 
-class Arm64Context FINAL : public Context {
+class Arm64Context final : public Context {
  public:
   Arm64Context() {
     Reset();
@@ -34,56 +34,56 @@
 
   ~Arm64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_lr) OVERRIDE {
+  void SetPC(uintptr_t new_lr) override {
     SetGPR(kPC, new_lr);
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(X0, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, arraysize(gprs_));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, arraysize(gprs_));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     // Note: PC isn't an available GPR (outside of internals), so don't allow retrieving the value.
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfXRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfDRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
   static constexpr size_t kPC = kNumberOfXRegisters;
 
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index af2d4c7..163a2d8 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -25,7 +25,7 @@
 using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
 
 // Instruction set features relevant to the ARM64 architecture.
-class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
   static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -47,16 +47,16 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static Arm64FeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kArm64;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
   // Return a string of the form "a53" or "none".
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Generate code addressing Cortex-A53 erratum 835769?
   bool NeedFixCortexA53_835769() const {
@@ -74,7 +74,7 @@
   // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 7e073b2..960aea1 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,53 +33,53 @@
   }
   virtual ~MipsContext() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(T9, new_pc);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(A0, new_arg0_value);
   }
 
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 76bc639..ab5bb3c 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -28,7 +28,7 @@
 using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
 
 // Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+class MipsInstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
   static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
@@ -50,15 +50,15 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static MipsFeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kMips;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Is this an ISA revision greater than 2 opening up new opcodes.
   bool IsMipsIsaRevGreaterThanEqual2() const {
@@ -87,7 +87,7 @@
   // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index b2a6138..857abfd 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -33,53 +33,53 @@
   }
   virtual ~Mips64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(SP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     SetGPR(T9, new_pc);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(A0, new_arg0_value);
   }
 
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index 27e544e..e204d9d 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -25,7 +25,7 @@
 using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
 
 // Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
   static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
@@ -48,15 +48,15 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static Mips64FeaturesUniquePtr FromAssembly();
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kMips64;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   // Does it have MSA (MIPS SIMD Architecture) support.
   bool HasMsa() const {
@@ -69,7 +69,7 @@
   // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE;
+                                 std::string* error_msg) const override;
 
  private:
   explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index b0c0e43..e8df90e 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -37,7 +37,7 @@
 class StubTest : public CommonRuntimeTest {
  protected:
   // We need callee-save methods set up in the Runtime for exceptions.
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // Do the normal setup.
     CommonRuntimeTest::SetUp();
 
@@ -54,7 +54,7 @@
     }
   }
 
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use a smaller heap
     for (std::pair<std::string, const void*>& pair : *options) {
       if (pair.first.find("-Xmx") == 0) {
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 0ebb22b..5b438c3 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -26,62 +26,62 @@
 namespace art {
 namespace x86 {
 
-class X86Context FINAL : public Context {
+class X86Context final : public Context {
  public:
   X86Context() {
     Reset();
   }
   virtual ~X86Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(ESP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     eip_ = new_pc;
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(EAX, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2..acf13c4 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -49,17 +49,17 @@
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
   static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
 
-  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+  bool Equals(const InstructionSetFeatures* other) const override;
 
-  bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+  bool HasAtLeast(const InstructionSetFeatures* other) const override;
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  virtual InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86;
   }
 
-  uint32_t AsBitmap() const OVERRIDE;
+  uint32_t AsBitmap() const override;
 
-  std::string GetFeatureString() const OVERRIDE;
+  std::string GetFeatureString() const override;
 
   virtual ~X86InstructionSetFeatures() {}
 
@@ -71,7 +71,7 @@
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   virtual std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE {
+                                 std::string* error_msg) const override {
     return AddFeaturesFromSplitString(features, false, error_msg);
   }
 
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d242693..ab38614 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -26,62 +26,62 @@
 namespace art {
 namespace x86_64 {
 
-class X86_64Context FINAL : public Context {
+class X86_64Context final : public Context {
  public:
   X86_64Context() {
     Reset();
   }
   virtual ~X86_64Context() {}
 
-  void Reset() OVERRIDE;
+  void Reset() override;
 
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
+  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
 
-  void SetSP(uintptr_t new_sp) OVERRIDE {
+  void SetSP(uintptr_t new_sp) override {
     SetGPR(RSP, new_sp);
   }
 
-  void SetPC(uintptr_t new_pc) OVERRIDE {
+  void SetPC(uintptr_t new_pc) override {
     rip_ = new_pc;
   }
 
-  void SetArg0(uintptr_t new_arg0_value) OVERRIDE {
+  void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(RDI, new_arg0_value);
   }
 
-  bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg] != nullptr;
   }
 
-  uintptr_t* GetGPRAddress(uint32_t reg) OVERRIDE {
+  uintptr_t* GetGPRAddress(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     return gprs_[reg];
   }
 
-  uintptr_t GetGPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetGPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
     DCHECK(IsAccessibleGPR(reg));
     return *gprs_[reg];
   }
 
-  void SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetGPR(uint32_t reg, uintptr_t value) override;
 
-  bool IsAccessibleFPR(uint32_t reg) OVERRIDE {
+  bool IsAccessibleFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     return fprs_[reg] != nullptr;
   }
 
-  uintptr_t GetFPR(uint32_t reg) OVERRIDE {
+  uintptr_t GetFPR(uint32_t reg) override {
     DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
     DCHECK(IsAccessibleFPR(reg));
     return *fprs_[reg];
   }
 
-  void SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
+  void SetFPR(uint32_t reg, uintptr_t value) override;
 
-  void SmashCallerSaves() OVERRIDE;
-  NO_RETURN void DoLongJump() OVERRIDE;
+  void SmashCallerSaves() override;
+  NO_RETURN void DoLongJump() override;
 
  private:
   // Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index e76490b..76258fa 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -25,7 +25,7 @@
 using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
 
 // Instruction set features relevant to the X86_64 architecture.
-class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+class X86_64InstructionSetFeatures final : public X86InstructionSetFeatures {
  public:
   // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
   static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
@@ -59,7 +59,7 @@
     return Convert(X86InstructionSetFeatures::FromAssembly(true));
   }
 
-  InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const override {
     return InstructionSet::kX86_64;
   }
 
@@ -69,7 +69,7 @@
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const OVERRIDE {
+                                 std::string* error_msg) const override {
     return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg);
   }
 
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 123595c..5afd000 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -40,7 +40,7 @@
 class String;
 }  // namespace mirror
 
-class ArtField FINAL {
+class ArtField final {
  public:
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce08cb0..48ddc69 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -66,7 +66,7 @@
 using MethodDexCacheType = std::atomic<MethodDexCachePair>;
 }  // namespace mirror
 
-class ArtMethod FINAL {
+class ArtMethod final {
  public:
   // Should the class state be checked on sensitive operations?
   DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index a9fbafe..851c23f 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -31,11 +31,11 @@
 
 namespace art {
 
-class MemMapArena FINAL : public Arena {
+class MemMapArena final : public Arena {
  public:
   MemMapArena(size_t size, bool low_4gb, const char* name);
   virtual ~MemMapArena();
-  void Release() OVERRIDE;
+  void Release() override;
 
  private:
   static MemMap Allocate(size_t size, bool low_4gb, const char* name);
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
index 24e150e..e98ef07 100644
--- a/runtime/base/mem_map_arena_pool.h
+++ b/runtime/base/mem_map_arena_pool.h
@@ -21,17 +21,17 @@
 
 namespace art {
 
-class MemMapArenaPool FINAL : public ArenaPool {
+class MemMapArenaPool final : public ArenaPool {
  public:
   explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
   virtual ~MemMapArenaPool();
-  Arena* AllocArena(size_t size) OVERRIDE;
-  void FreeArenaChain(Arena* first) OVERRIDE;
-  size_t GetBytesAllocated() const OVERRIDE;
-  void ReclaimMemory() OVERRIDE;
-  void LockReclaimMemory() OVERRIDE;
+  Arena* AllocArena(size_t size) override;
+  void FreeArenaChain(Arena* first) override;
+  size_t GetBytesAllocated() const override;
+  void ReclaimMemory() override;
+  void LockReclaimMemory() override;
   // Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
-  void TrimMaps() OVERRIDE;
+  void TrimMaps() override;
 
  private:
   const bool low_4gb_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 044c4c2..28b2912 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -125,7 +125,7 @@
   }
 }
 
-class ScopedAllMutexesLock FINAL {
+class ScopedAllMutexesLock final {
  public:
   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
@@ -144,7 +144,7 @@
   const BaseMutex* const mutex_;
 };
 
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
  public:
   explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
     for (uint32_t i = 0;
@@ -166,7 +166,7 @@
 };
 
 // Scoped class that generates events at the beginning and end of lock contention.
-class ScopedContentionRecorder FINAL : public ValueObject {
+class ScopedContentionRecorder final : public ValueObject {
  public:
   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
       : mutex_(kLogLockContentions ? mutex : nullptr),
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index fba209a..d127d0f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -297,7 +297,7 @@
   // For negative capabilities in clang annotations.
   const Mutex& operator!() const { return *this; }
 
-  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+  void WakeupToRespondToEmptyCheckpoint() override;
 
  private:
 #if ART_USE_FUTEXES
@@ -418,7 +418,7 @@
   // For negative capabilities in clang annotations.
   const ReaderWriterMutex& operator!() const { return *this; }
 
-  void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
+  void WakeupToRespondToEmptyCheckpoint() override;
 
  private:
 #if ART_USE_FUTEXES
diff --git a/runtime/cha.cc b/runtime/cha.cc
index ce84e8c..3ea920d 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -181,7 +181,7 @@
 // headers, sets the should_deoptimize flag on stack to 1.
 // TODO: also set the register value to 1 when should_deoptimize is allocated in
 // a register.
-class CHAStackVisitor FINAL  : public StackVisitor {
+class CHAStackVisitor final  : public StackVisitor {
  public:
   CHAStackVisitor(Thread* thread_in,
                   Context* context,
@@ -190,7 +190,7 @@
         method_headers_(method_headers) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     // Avoid types of methods that do not have an oat quick method header.
     if (method == nullptr ||
@@ -245,13 +245,13 @@
   DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
 };
 
-class CHACheckpoint FINAL : public Closure {
+class CHACheckpoint final : public Closure {
  public:
   explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
       : barrier_(0),
         method_headers_(method_headers) {}
 
-  void Run(Thread* thread) OVERRIDE {
+  void Run(Thread* thread) override {
     // Note thread and self may not be equal if thread was already suspended at
     // the point of the request.
     Thread* self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f80d34c..65f05d9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -884,7 +884,7 @@
   explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
     : image_pointer_size_(image_pointer_size) {}
 
-  void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kIsDebugBuild && !method->IsRuntimeMethod()) {
       CHECK(method->GetDeclaringClass() != nullptr);
     }
@@ -1390,7 +1390,7 @@
 
 // Helper class for ArtMethod checks when adding an image. Keeps all required functionality
 // together and caches some intermediate results.
-class ImageSanityChecks FINAL {
+class ImageSanityChecks final {
  public:
   static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1951,7 +1951,7 @@
         done_(false) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (!done_ && class_table != nullptr) {
       DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
@@ -1972,7 +1972,7 @@
                                      ClassVisitor* visitor)
         : defining_class_loader_(defining_class_loader), visitor_(visitor) { }
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (klass->GetClassLoader() != defining_class_loader_) {
         return true;
       }
@@ -2009,7 +2009,7 @@
 
 class GetClassesInToVector : public ClassVisitor {
  public:
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE {
+  bool operator()(ObjPtr<mirror::Class> klass) override {
     classes_.push_back(klass);
     return true;
   }
@@ -2021,7 +2021,7 @@
   explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
       : arr_(arr), index_(0) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ++index_;
     if (index_ <= arr_->GetLength()) {
       arr_->Set(index_ - 1, klass);
@@ -3845,7 +3845,7 @@
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES(Locks::classlinker_classes_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (class_table != nullptr) {
       class_table->FreezeSnapshot();
@@ -3871,7 +3871,7 @@
        result_(result) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
     // Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
@@ -5563,7 +5563,7 @@
 // Comparator for name and signature of a method, used in finding overriding methods. Implementation
 // avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
 // caches in the implementation below.
-class MethodNameAndSignatureComparator FINAL : public ValueObject {
+class MethodNameAndSignatureComparator final : public ValueObject {
  public:
   explicit MethodNameAndSignatureComparator(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_) :
@@ -8555,7 +8555,7 @@
   CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     ClassTable* const class_table = class_loader->GetClassTable();
     if (class_table != nullptr) {
       num_zygote_classes += class_table->NumZygoteClasses(class_loader);
@@ -8825,7 +8825,7 @@
         extra_stats_(),
         last_extra_stats_(extra_stats_.end()) { }
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!klass->IsProxyClass() &&
         !klass->IsArrayClass() &&
         klass->IsResolved() &&
@@ -8913,7 +8913,7 @@
       : method_(method),
         pointer_size_(pointer_size) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) override {
     if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
       holder_ = klass;
     }
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e40f1db..52ddd13 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -442,7 +442,7 @@
 
   class TestRootVisitor : public SingleRootVisitor {
    public:
-    void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+    void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
       EXPECT_TRUE(root != nullptr);
     }
   };
@@ -450,7 +450,7 @@
 
 class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
  protected:
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonRuntimeTest::SetUpRuntimeOptions(options);
   }
 };
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 234b66a..bf17e64 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -157,11 +157,11 @@
   virtual ~CommonRuntimeTestBase() {}
 
  protected:
-  virtual void SetUp() OVERRIDE {
+  virtual void SetUp() override {
     CommonRuntimeTestImpl::SetUp();
   }
 
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     CommonRuntimeTestImpl::TearDown();
   }
 };
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 60975b0..012ebcb 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -25,7 +25,7 @@
 
 namespace art {
 
-class CompilerFilter FINAL {
+class CompilerFilter final {
  public:
   // Note: Order here matters. Later filter choices are considered "as good
   // as" earlier filter choices.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e607b31..366b5ec 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -138,7 +138,7 @@
   return os;
 }
 
-class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
  public:
   DebugInstrumentationListener() {}
   virtual ~DebugInstrumentationListener() {}
@@ -147,7 +147,7 @@
                      Handle<mirror::Object> this_object,
                      ArtMethod* method,
                      uint32_t dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -176,7 +176,7 @@
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -195,7 +195,7 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     // We're not recorded to listen to this kind of event, so complain.
     LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc;
@@ -205,7 +205,7 @@
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
       // We also listen to kMethodExited instrumentation event and the current instruction is a
       // RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -229,7 +229,7 @@
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
   }
 
@@ -239,19 +239,19 @@
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
   }
 
   void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
                        Handle<mirror::Throwable> exception_object)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     Dbg::PostException(exception_object.Get());
   }
 
   // We only care about branches in the Jit.
   void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc << ", " << dex_pc_offset;
   }
@@ -262,20 +262,20 @@
                                 ArtMethod* method,
                                 uint32_t dex_pc,
                                 ArtMethod* target ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
                << " " << dex_pc;
   }
 
   // TODO Might be worth it to post ExceptionCatch event.
   void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
-                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE {
+                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
     LOG(ERROR) << "Unexpected exception handled event in debugger";
   }
 
   // TODO Might be worth it to implement this.
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
-                       const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE {
+                       const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
     LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
   }
 
@@ -1087,7 +1087,7 @@
  public:
   explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
 
-  bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!c->IsPrimitive()) {
       classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
     }
@@ -2450,7 +2450,7 @@
       expandBufAdd4BE(buf_, frame_count_);
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (GetMethod()->IsRuntimeMethod()) {
         return true;  // The debugger can't do anything useful with a frame that has no Method*.
       }
@@ -2608,7 +2608,7 @@
 }
 
 // Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor FINAL : public StackVisitor {
+class FindFrameVisitor final : public StackVisitor {
  public:
   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -3040,7 +3040,7 @@
       throw_dex_pc_(dex::kDexNoIndex) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     DCHECK(method != nullptr);
     if (method->IsRuntimeMethod()) {
@@ -3693,7 +3693,7 @@
     : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
       needs_deoptimization_(false) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     // The visitor is meant to be used when handling exception from compiled code only.
     CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
                             << ArtMethod::PrettyMethod(GetMethod());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e1de991..33444f8 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -54,20 +54,20 @@
 class Thread;
 
 struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
-  bool IsMethodBeingInspected(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsMethodSafeToJit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool MethodNeedsDebugVersion(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 struct DebuggerDdmCallback : public DdmCallback {
   void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+      override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 struct InternalDebuggerControlCallback : public DebuggerControlCallback {
-  void StartDebugger() OVERRIDE;
-  void StopDebugger() OVERRIDE;
-  bool IsDebuggerConfigured() OVERRIDE;
+  void StartDebugger() override;
+  void StopDebugger() override;
+  bool IsDebuggerConfigured() override;
 };
 
 /*
@@ -831,15 +831,15 @@
 
   class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
    public:
-    void ThreadStart(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-    void ThreadDeath(Thread* self) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
+    void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
   };
 
   class DbgClassLoadCallback : public ClassLoadCallback {
    public:
-    void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
     void ClassPrepare(Handle<mirror::Class> temp_klass,
-                      Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+                      Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
   };
 
   static DbgThreadLifecycleCallback thread_lifecycle_callback_;
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 00a95cc..0b99722 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -42,7 +42,7 @@
 // Test class that provides some helpers to set a test up for compilation using dex2oat.
 class Dex2oatEnvironmentTest : public CommonRuntimeTest {
  public:
-  virtual void SetUp() OVERRIDE {
+  virtual void SetUp() override {
     CommonRuntimeTest::SetUp();
     const ArtDexFileLoader dex_file_loader;
 
@@ -106,7 +106,7 @@
     ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
   }
 
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
     // options->push_back(std::make_pair("-verbose:oat", nullptr));
 
     // Set up the image location.
@@ -117,7 +117,7 @@
     callbacks_.reset();
   }
 
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     ClearDirectory(odex_dir_.c_str());
     ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
 
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 3203ee5..b4e52ac 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -26,11 +26,11 @@
 
 class DexoptTest : public Dex2oatEnvironmentTest {
  public:
-  virtual void SetUp() OVERRIDE;
+  virtual void SetUp() override;
 
   virtual void PreRuntimeCreate();
 
-  virtual void PostRuntimeCreate() OVERRIDE;
+  virtual void PostRuntimeCreate() override;
 
   // Generate an oat file for the purposes of test.
   // The oat file will be generated for dex_location in the given oat_location
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aca169b..fccfce4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -615,13 +615,13 @@
 }
 
 // Visits arguments on the stack placing them into the shadow frame.
-class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
  public:
   BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
                                uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
  private:
   ShadowFrame* const sf_;
@@ -707,7 +707,7 @@
       explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
           : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-      bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
         // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
         // logic. Just always say we want to continue.
         return true;
@@ -824,13 +824,13 @@
 
 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
 // to jobjects.
-class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
+class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
  public:
   BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
  private:
   ScopedObjectAccessUnchecked* const soa_;
@@ -959,7 +959,7 @@
 
 // Visitor returning a reference argument at a given position in a Quick stack frame.
 // NOTE: Only used for testing purposes.
-class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
  public:
   GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
                                      const char* shorty,
@@ -972,7 +972,7 @@
           CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
         }
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
     if (cur_pos_ == arg_pos_) {
       Primitive::Type type = GetParamPrimitiveType();
       CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
@@ -1014,7 +1014,7 @@
 }
 
 // Visitor returning all the reference arguments in a Quick stack frame.
-class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor {
+class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
  public:
   GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
                                     bool is_static,
@@ -1022,7 +1022,7 @@
                                     uint32_t shorty_len)
       : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
     Primitive::Type type = GetParamPrimitiveType();
     if (type == Primitive::kPrimNot) {
       StackReference<mirror::Object>* ref_arg =
@@ -1059,13 +1059,13 @@
 
 // Read object references held in arguments from quick frames and place in a JNI local references,
 // so they don't get garbage collected.
-class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
  public:
   RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
                                uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1957,7 +1957,7 @@
   uint32_t num_stack_entries_;
 };
 
-class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
+class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
  public:
   explicit ComputeGenericJniFrameSize(bool critical_native)
     : num_handle_scope_references_(0), critical_native_(critical_native) {}
@@ -2038,10 +2038,10 @@
     return sp8;
   }
 
-  uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
+  uintptr_t PushHandle(mirror::Object* /* ptr */) override;
 
   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
-  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
+  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -2117,7 +2117,7 @@
 
 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
 // of transitioning into native code.
-class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
+class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
  public:
   BuildGenericJniFrameVisitor(Thread* self,
                               bool is_static,
@@ -2150,7 +2150,7 @@
     }
   }
 
-  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -2168,7 +2168,7 @@
 
  private:
   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
-  class FillJniCall FINAL : public FillNativeCall {
+  class FillJniCall final : public FillNativeCall {
    public:
     FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
                 HandleScope* handle_scope, bool critical_native)
@@ -2177,7 +2177,7 @@
         cur_entry_(0),
         critical_native_(critical_native) {}
 
-    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+    uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
 
     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 89694e3..0f0fb69 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -26,7 +26,7 @@
 
 class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use 64-bit ISA for runtime setup to make method size potentially larger
     // than necessary (rather than smaller) during CreateCalleeSaveMethod
     options->push_back(std::make_pair("imageinstructionset", "x86_64"));
@@ -35,7 +35,7 @@
   // Do not do any of the finalization. We don't want to run any code, we don't need the heap
   // prepared, it actually will be a problem with setting the instruction set to x86_64 in
   // SetUpRuntimeOptions.
-  void FinalizeSetup() OVERRIDE {
+  void FinalizeSetup() override {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
 
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 3e2664c..02eeefe 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -90,11 +90,11 @@
   DISALLOW_COPY_AND_ASSIGN(FaultHandler);
 };
 
-class NullPointerHandler FINAL : public FaultHandler {
+class NullPointerHandler final : public FaultHandler {
  public:
   explicit NullPointerHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
   static bool IsValidImplicitCheck(siginfo_t* siginfo) {
     // Our implicit NPE checks always limit the range to a page.
@@ -108,31 +108,31 @@
   DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
 };
 
-class SuspensionHandler FINAL : public FaultHandler {
+class SuspensionHandler final : public FaultHandler {
  public:
   explicit SuspensionHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(SuspensionHandler);
 };
 
-class StackOverflowHandler FINAL : public FaultHandler {
+class StackOverflowHandler final : public FaultHandler {
  public:
   explicit StackOverflowHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(StackOverflowHandler);
 };
 
-class JavaStackTraceHandler FINAL : public FaultHandler {
+class JavaStackTraceHandler final : public FaultHandler {
  public:
   explicit JavaStackTraceHandler(FaultManager* manager);
 
-  bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  bool Action(int sig, siginfo_t* siginfo, void* context) override NO_THREAD_SAFETY_ANALYSIS;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(JavaStackTraceHandler);
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 3a09634..f0a82e0 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -33,7 +33,7 @@
                                            space::ContinuousSpace* space)
       : ModUnionTableReferenceCache(name, heap, space) {}
 
-  bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+  bool ShouldAddReference(const mirror::Object* ref) const override ALWAYS_INLINE {
     return !space_->HasAddress(ref);
   }
 };
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0dd05cd..40dc6e1 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -329,8 +329,8 @@
 
 class EmptyMarkObjectVisitor : public MarkObjectVisitor {
  public:
-  mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
-  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
+  mirror::Object* MarkObject(mirror::Object* obj) override {return obj;}
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {}
 };
 
 void ModUnionTable::FilterCards() {
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 7a3c06a..ec6f144 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -125,33 +125,33 @@
   virtual ~ModUnionTableReferenceCache() {}
 
   // Clear and store cards for a space.
-  void ProcessCards() OVERRIDE;
+  void ProcessCards() override;
 
   // Update table based on cleared cards and mark all references to the other spaces.
-  void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+  void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_);
 
-  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+  virtual void VisitObjects(ObjectCallback callback, void* arg) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
   // VisitMarkedRange can't know if the callback will modify the bitmap or not.
-  void Verify() OVERRIDE
+  void Verify() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_);
 
   // Function that tells whether or not to add a reference to the table.
   virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
 
-  virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+  virtual bool ContainsCardFor(uintptr_t addr) override;
 
-  virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  virtual void Dump(std::ostream& os) override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void SetCards() OVERRIDE;
+  virtual void SetCards() override;
 
-  virtual void ClearTable() OVERRIDE;
+  virtual void ClearTable() override;
 
  protected:
   // Cleared card array, used to update the mod-union table.
@@ -172,27 +172,27 @@
   virtual ~ModUnionTableCardCache() {}
 
   // Clear and store cards for a space.
-  virtual void ProcessCards() OVERRIDE;
+  virtual void ProcessCards() override;
 
   // Mark all references to the alloc space(s).
-  virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
+  virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
+  virtual void VisitObjects(ObjectCallback callback, void* arg) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Nothing to verify.
-  virtual void Verify() OVERRIDE {}
+  virtual void Verify() override {}
 
-  virtual void Dump(std::ostream& os) OVERRIDE;
+  virtual void Dump(std::ostream& os) override;
 
-  virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+  virtual bool ContainsCardFor(uintptr_t addr) override;
 
-  virtual void SetCards() OVERRIDE;
+  virtual void SetCards() override;
 
-  virtual void ClearTable() OVERRIDE;
+  virtual void ClearTable() override;
 
  protected:
   // Cleared card bitmap, used to update the mod-union table.
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index d59ff71..5aa5550 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -98,12 +98,12 @@
  public:
   explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                 bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
+                                 bool do_atomic_update ATTRIBUTE_UNUSED) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     MarkObject(ref->AsMirrorPtr());
   }
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+  virtual mirror::Object* MarkObject(mirror::Object* obj) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     out_->insert(obj);
@@ -122,7 +122,7 @@
       space::ContinuousSpace* target_space)
       : ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
 
-  bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+  bool ShouldAddReference(const mirror::Object* ref) const override {
     return target_space_->HasAddress(ref);
   }
 
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index a1d1986..b9c1dc6 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -196,7 +196,7 @@
 
   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
   // annotalysis.
-  bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
     if (trace_->GetDepth() >= max_depth_) {
       return false;
     }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7a5f79..f73ecf1 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -229,7 +229,7 @@
   explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
       : concurrent_copying_(concurrent_copying) {}
 
-  void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -250,7 +250,7 @@
   explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
       : concurrent_copying_(concurrent_copying) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -393,7 +393,7 @@
       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  virtual void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -467,7 +467,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  virtual void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
     ConcurrentCopying* cc = concurrent_copying_;
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
@@ -1072,7 +1072,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1096,7 +1096,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(concurrent_copying_->is_marking_);
@@ -1291,7 +1291,7 @@
   }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(root != nullptr);
     operator()(root);
   }
@@ -1457,7 +1457,7 @@
         disable_weak_ref_access_(disable_weak_ref_access) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -1727,7 +1727,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a deadlock b/31500969.
     CHECK(concurrent_copying_->weak_ref_access_enabled_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 0ebe6f0..a956d38 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,7 +71,7 @@
                              bool measure_read_barrier_slow_path = false);
   ~ConcurrentCopying();
 
-  virtual void RunPhases() OVERRIDE
+  virtual void RunPhases() override
       REQUIRES(!immune_gray_stack_lock_,
                !mark_stack_lock_,
                !rb_slow_path_histogram_lock_,
@@ -87,15 +87,15 @@
 
   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const OVERRIDE {
+  virtual GcType GetGcType() const override {
     return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
         ? kGcTypeSticky
         : kGcTypePartial;
   }
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  virtual CollectorType GetCollectorType() const override {
     return kCollectorTypeCC;
   }
-  virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
+  virtual void RevokeAllThreadLocalBuffers() override;
   void SetRegionSpace(space::RegionSpace* region_space) {
     DCHECK(region_space != nullptr);
     region_space_ = region_space;
@@ -144,7 +144,7 @@
   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
 
-  virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+  virtual mirror::Object* IsMarked(mirror::Object* from_ref) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -167,7 +167,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   template<bool kGrayImmuneObject>
   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
@@ -175,12 +175,12 @@
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
   accounting::ObjectStack* GetAllocationStack();
   accounting::ObjectStack* GetLiveStack();
-  virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+  virtual void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -205,20 +205,20 @@
       REQUIRES(!mark_stack_lock_);
   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
-                                      ObjPtr<mirror::Reference> reference) OVERRIDE
+                                      ObjPtr<mirror::Reference> reference) override
       REQUIRES_SHARED(Locks::mutator_lock_);
   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
+  virtual mirror::Object* MarkObject(mirror::Object* from_ref) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
-                                 bool do_atomic_update) OVERRIDE
+                                 bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
-                                           bool do_atomic_update) OVERRIDE
+                                           bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SweepSystemWeaks(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
@@ -293,7 +293,7 @@
                                                       mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
+  void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
   // Set the read barrier mark entrypoints to non-null.
   void ActivateReadBarrierEntrypoints();
 
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 145bd02..677e3f3 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -167,19 +167,19 @@
                         end,
                         /*limit*/end) {}
 
-  space::SpaceType GetType() const OVERRIDE {
+  space::SpaceType GetType() const override {
     return space::kSpaceTypeMallocSpace;
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return nullptr;
   }
 };
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 58a75ee..840a4b0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@
  public:
   explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
 
-  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+  void VisitRoot(mirror::Object* root, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
   }
@@ -607,7 +607,7 @@
  public:
   explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
 
-  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+  void VisitRoot(mirror::Object* root, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // See if the root is on any space bitmap.
     auto* heap = Runtime::Current()->GetHeap();
@@ -1110,7 +1110,7 @@
   explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
 
   virtual mirror::Object* IsMarked(mirror::Object* obj)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     mark_sweep_->VerifyIsLive(obj);
     return obj;
@@ -1144,7 +1144,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1154,14 +1154,14 @@
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
+      override REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
     }
   }
 
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     ScopedTrace trace("Marking thread roots");
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* const self = Thread::Current();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index af2bb97..012e179 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -57,7 +57,7 @@
 
   ~MarkSweep() {}
 
-  virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
+  virtual void RunPhases() override REQUIRES(!mark_stack_lock_);
   void InitializePhase();
   void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -72,11 +72,11 @@
     return is_concurrent_;
   }
 
-  virtual GcType GetGcType() const OVERRIDE {
+  virtual GcType GetGcType() const override {
     return kGcTypeFull;
   }
 
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  virtual CollectorType GetCollectorType() const override {
     return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
   }
 
@@ -188,24 +188,24 @@
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                           bool do_atomic_update) OVERRIDE
+                                           bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                           size_t count,
-                          const RootInfo& info) OVERRIDE
+                          const RootInfo& info) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Marks an object.
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+  virtual mirror::Object* MarkObject(mirror::Object* obj) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -216,7 +216,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                 bool do_atomic_update) OVERRIDE
+                                 bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -231,7 +231,7 @@
 
  protected:
   // Returns object if the object is marked in the heap bitmap, otherwise null.
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  virtual mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void MarkObjectNonNull(mirror::Object* obj,
@@ -279,7 +279,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   virtual void ProcessMarkStack()
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 8b0d3dd..308699b 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,7 @@
 class PartialMarkSweep : public MarkSweep {
  public:
   // Virtual as overridden by StickyMarkSweep.
-  virtual GcType GetGcType() const OVERRIDE {
+  virtual GcType GetGcType() const override {
     return kGcTypePartial;
   }
 
@@ -37,7 +37,7 @@
   // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
   // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
   // StickyMarkSweep.
-  virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  virtual void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d1d45c8..49cd02e 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,7 @@
 
   ~SemiSpace() {}
 
-  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  virtual void RunPhases() override NO_THREAD_SAFETY_ANALYSIS;
   virtual void InitializePhase();
   virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
@@ -72,10 +72,10 @@
   virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
   void MarkReachableObjects()
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const OVERRIDE {
+  virtual GcType GetGcType() const override {
     return kGcTypePartial;
   }
-  virtual CollectorType GetCollectorType() const OVERRIDE {
+  virtual CollectorType GetCollectorType() const override {
     return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
   }
 
@@ -106,11 +106,11 @@
   void MarkObjectIfNotInToSpace(CompressedReferenceType* obj_ptr)
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+  virtual mirror::Object* MarkObject(mirror::Object* root) override
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
-                                 bool do_atomic_update) OVERRIDE
+                                 bool do_atomic_update) override
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void ScanObject(mirror::Object* obj)
@@ -145,11 +145,11 @@
   void SweepSystemWeaks()
       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info) OVERRIDE
+                          const RootInfo& info) override
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
@@ -162,12 +162,12 @@
  protected:
   // Returns null if the object is not marked, otherwise returns the forwarding address (same as
   // object for non movable things).
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  virtual mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
-                                           bool do_atomic_update) OVERRIDE
+                                           bool do_atomic_update) override
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 45f912f..f92a103 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -24,9 +24,9 @@
 namespace gc {
 namespace collector {
 
-class StickyMarkSweep FINAL : public PartialMarkSweep {
+class StickyMarkSweep final : public PartialMarkSweep {
  public:
-  GcType GetGcType() const OVERRIDE {
+  GcType GetGcType() const override {
     return kGcTypeSticky;
   }
 
@@ -34,7 +34,7 @@
   ~StickyMarkSweep() {}
 
   virtual void MarkConcurrentRoots(VisitRootFlags flags)
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -42,15 +42,15 @@
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
-  void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  void BindBitmaps() override REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkReachableObjects()
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Sweep(bool swap_bitmaps)
-      OVERRIDE
+      override
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf06cf9..16fd786 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1327,7 +1327,7 @@
  public:
   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
   }
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+  virtual void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
     thread->GetJniEnv()->TrimLocals();
     // If thread is a running mutator, then act on behalf of the trim thread.
     // See the code in ThreadList::RunCheckpoint.
@@ -2213,7 +2213,7 @@
 }
 
 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
-class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
+class ZygoteCompactingCollector final : public collector::SemiSpace {
  public:
   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
       : SemiSpace(heap, false, "zygote collector"),
@@ -2769,7 +2769,7 @@
   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (root == obj_) {
       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
     }
@@ -2826,7 +2826,7 @@
         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
   }
 
-  virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
+  virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (root == nullptr) {
       LOG(ERROR) << "Root is null with info " << root_info.GetType();
@@ -3259,10 +3259,10 @@
 }
 
 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
+  virtual mirror::Object* MarkObject(mirror::Object* obj) override {
     return obj;
   }
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
   }
 };
 
@@ -3633,7 +3633,7 @@
  public:
   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
       : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
-  virtual void Run(Thread* self) OVERRIDE {
+  virtual void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->ConcurrentGC(self, cause_, force_full_);
     heap->ClearConcurrentGCRequest();
@@ -3691,7 +3691,7 @@
  public:
   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
 
-  virtual void Run(Thread* self) OVERRIDE {
+  virtual void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->DoPendingCollectorTransition();
     heap->ClearPendingCollectorTransition(self);
@@ -3733,7 +3733,7 @@
 class Heap::HeapTrimTask : public HeapTask {
  public:
   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
-  virtual void Run(Thread* self) OVERRIDE {
+  virtual void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     heap->Trim(self);
     heap->ClearPendingTrim(self);
@@ -4176,7 +4176,7 @@
 class Heap::TriggerPostForkCCGcTask : public HeapTask {
  public:
   explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // Trigger a GC, if not already done. The first GC after fork, whenever
     // takes place, will adjust the thresholds to normal levels.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8720a3e..7cbad3b 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -29,7 +29,7 @@
 
 class HeapTest : public CommonRuntimeTest {
  public:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     MemMap::Init();
     std::string error_msg;
     // Reserve the preferred address to force the heap to use another one for testing.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9b31558..02e84b5 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -35,11 +35,11 @@
 
 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
 // implementation as its intended to be evacuated.
-class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
+class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeBumpPointerSpace;
   }
 
@@ -51,27 +51,27 @@
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_);
+      override REQUIRES(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(size_t num_bytes);
   mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
 
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
 
   // NOPS unless we support free lists.
-  size_t Free(Thread*, mirror::Object*) OVERRIDE {
+  size_t Free(Thread*, mirror::Object*) override {
     return 0;
   }
 
-  size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+  size_t FreeList(Thread*, size_t, mirror::Object**) override {
     return 0;
   }
 
@@ -94,16 +94,16 @@
     return GetMemMap()->Size();
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return nullptr;
   }
 
   // Reset the space to empty.
-  void Clear() OVERRIDE REQUIRES(!block_lock_);
+  void Clear() override REQUIRES(!block_lock_);
 
   void Dump(std::ostream& os) const;
 
@@ -122,7 +122,7 @@
     return Begin() == End();
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return true;
   }
 
@@ -141,7 +141,7 @@
   // Allocate a new TLAB, returns false if the allocation failed.
   bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
 
-  BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
+  BumpPointerSpace* AsBumpPointerSpace() override {
     return this;
   }
 
@@ -151,7 +151,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!block_lock_);
 
-  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
 
   // Record objects / bytes freed.
   void RecordFree(int32_t objects, int32_t bytes) {
@@ -159,7 +159,7 @@
     bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 66537d5..09f3970 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -53,36 +53,36 @@
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
+      override REQUIRES(!lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_) {
+      override REQUIRES(!lock_) {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  virtual size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
     return num_bytes;
   }
 
   // DlMallocSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
 
@@ -103,23 +103,23 @@
     return mspace_;
   }
 
-  size_t Trim() OVERRIDE;
+  size_t Trim() override;
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
   // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+  void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
 
   // Returns the number of bytes that the space has currently obtained from the system. This is
   // greater or equal to the amount of live data in the space.
-  size_t GetFootprint() OVERRIDE;
+  size_t GetFootprint() override;
 
   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
-  size_t GetFootprintLimit() OVERRIDE;
+  size_t GetFootprintLimit() override;
 
   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
-  void SetFootprintLimit(size_t limit) OVERRIDE;
+  void SetFootprintLimit(size_t limit) override;
 
   MallocSpace* CreateInstance(MemMap&& mem_map,
                               const std::string& name,
@@ -128,22 +128,22 @@
                               uint8_t* end,
                               uint8_t* limit,
                               size_t growth_limit,
-                              bool can_move_objects) OVERRIDE;
+                              bool can_move_objects) override;
 
-  uint64_t GetBytesAllocated() OVERRIDE;
-  uint64_t GetObjectsAllocated() OVERRIDE;
+  uint64_t GetBytesAllocated() override;
+  uint64_t GetObjectsAllocated() override;
 
-  virtual void Clear() OVERRIDE;
+  virtual void Clear() override;
 
-  bool IsDlMallocSpace() const OVERRIDE {
+  bool IsDlMallocSpace() const override {
     return true;
   }
 
-  DlMallocSpace* AsDlMallocSpace() OVERRIDE {
+  DlMallocSpace* AsDlMallocSpace() override {
     return this;
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
@@ -165,7 +165,7 @@
       REQUIRES(lock_);
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
-                        size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
+                        size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
     return CreateMspace(base, morecore_start, initial_size);
   }
   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 20bce66..93cf947 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -86,11 +86,11 @@
     return image_location_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return live_bitmap_.get();
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
     // special cases to test against.
     return live_bitmap_.get();
@@ -102,7 +102,7 @@
   void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index a1ffa06..d93385d 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -113,7 +113,7 @@
 template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
 class ImageSpaceLoadingTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     if (kImage) {
       options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
                             nullptr);
@@ -152,7 +152,7 @@
 
 class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     const char* android_data = getenv("ANDROID_DATA");
     CHECK(android_data != nullptr);
     old_android_data_ = android_data;
@@ -172,7 +172,7 @@
     ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     int result = unlink(bad_dalvik_cache_.c_str());
     CHECK_EQ(result, 0) << strerror(errno);
     result = rmdir(bad_android_data_.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 76ea9fd..09d0251 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -39,12 +39,12 @@
 namespace gc {
 namespace space {
 
-class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace final : public LargeObjectMapSpace {
  public:
   explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
   }
 
-  ~MemoryToolLargeObjectMapSpace() OVERRIDE {
+  ~MemoryToolLargeObjectMapSpace() override {
     // Historical note: We were deleting large objects to keep Valgrind happy if there were
     // any large objects such as Dex cache arrays which aren't freed since they are held live
     // by the class linker.
@@ -52,7 +52,7 @@
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE {
+      override {
     mirror::Object* obj =
         LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
                                    usable_size, bytes_tl_bulk_allocated);
@@ -68,21 +68,21 @@
     return object_without_rdz;
   }
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
   }
 
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override {
     return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
   }
 
-  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+  size_t Free(Thread* self, mirror::Object* obj) override {
     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
     MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     return LargeObjectMapSpace::Free(self, object_with_rdz);
   }
 
-  bool Contains(const mirror::Object* obj) const OVERRIDE {
+  bool Contains(const mirror::Object* obj) const override {
     return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
   }
 
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b69bd91..39ff2c3 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -41,7 +41,7 @@
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeLargeObjectSpace;
   }
   void SwapBitmaps();
@@ -49,10 +49,10 @@
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
   virtual ~LargeObjectSpace() {}
 
-  uint64_t GetBytesAllocated() OVERRIDE {
+  uint64_t GetBytesAllocated() override {
     return num_bytes_allocated_;
   }
-  uint64_t GetObjectsAllocated() OVERRIDE {
+  uint64_t GetObjectsAllocated() override {
     return num_objects_allocated_;
   }
   uint64_t GetTotalBytesAllocated() const {
@@ -61,22 +61,22 @@
   uint64_t GetTotalObjectsAllocated() const {
     return total_objects_allocated_;
   }
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
   // LargeObjectSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
-  bool IsAllocSpace() const OVERRIDE {
+  bool IsAllocSpace() const override {
     return true;
   }
-  AllocSpace* AsAllocSpace() OVERRIDE {
+  AllocSpace* AsAllocSpace() override {
     return this;
   }
   collector::ObjectBytePair Sweep(bool swap_bitmaps);
-  virtual bool CanMoveObjects() const OVERRIDE {
+  virtual bool CanMoveObjects() const override {
     return false;
   }
   // Current address at which the space begins, which may vary as the space is filled.
@@ -96,7 +96,7 @@
     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     return Begin() <= byte_obj && byte_obj < End();
   }
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if the large object is a zygote large object. Potentially slow.
@@ -140,11 +140,11 @@
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       REQUIRES(!lock_);
   size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
-  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
-  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
 
  protected:
   struct LargeObject {
@@ -154,8 +154,8 @@
   explicit LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
 
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -164,22 +164,22 @@
 };
 
 // A continuous large object space with a free-list to handle holes.
-class FreeListSpace FINAL : public LargeObjectSpace {
+class FreeListSpace final : public LargeObjectSpace {
  public:
   static constexpr size_t kAlignment = kPageSize;
 
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
-  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
-  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+      override REQUIRES(!lock_);
+  size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
   void Dump(std::ostream& os) const REQUIRES(!lock_);
 
-  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
 
  protected:
   FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
@@ -198,8 +198,8 @@
   }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
 
   class SortByPrevFree {
    public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index e4a6f15..6bf2d71 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,7 +133,7 @@
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return can_move_objects_;
   }
 
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 32bd204..33bddfa 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -29,28 +29,28 @@
           size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace final : public BaseMallocSpaceType {
  public:
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE;
+      override;
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_);
+      override REQUIRES(Locks::mutator_lock_);
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
 
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
+  void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
 
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
 
   template <typename... Params>
   MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8ad26ba..0bf4f38 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -39,7 +39,7 @@
 static constexpr bool kCyclicRegionAllocation = true;
 
 // A space that consists of equal-sized regions.
-class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+class RegionSpace final : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
@@ -49,7 +49,7 @@
     kEvacModeForceAll,
   };
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeRegionSpace;
   }
 
@@ -65,14 +65,14 @@
                         /* out */ size_t* bytes_allocated,
                         /* out */ size_t* usable_size,
                         /* out */ size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!region_lock_);
+      override REQUIRES(!region_lock_);
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self,
                                     size_t num_bytes,
                                     /* out */ size_t* bytes_allocated,
                                     /* out */ size_t* usable_size,
                                     /* out */ size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+      override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
   // The main allocation routine.
   template<bool kForEvac>
   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
@@ -90,29 +90,29 @@
   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
 
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
-  size_t Free(Thread*, mirror::Object*) OVERRIDE {
+  size_t Free(Thread*, mirror::Object*) override {
     UNIMPLEMENTED(FATAL);
     return 0;
   }
-  size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+  size_t FreeList(Thread*, size_t, mirror::Object**) override {
     UNIMPLEMENTED(FATAL);
     return 0;
   }
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return mark_bitmap_.get();
   }
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return mark_bitmap_.get();
   }
 
-  void Clear() OVERRIDE REQUIRES(!region_lock_);
+  void Clear() override REQUIRES(!region_lock_);
 
   // Remove read and write memory protection from the whole region space,
   // i.e. make memory pages backing the region area not readable and not
@@ -188,7 +188,7 @@
     return num_regions_;
   }
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return true;
   }
 
@@ -197,7 +197,7 @@
     return byte_obj >= Begin() && byte_obj < Limit();
   }
 
-  RegionSpace* AsRegionSpace() OVERRIDE {
+  RegionSpace* AsRegionSpace() override {
     return this;
   }
 
@@ -212,10 +212,10 @@
     WalkInternal<true /* kToSpaceOnly */>(visitor);
   }
 
-  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+  accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
     return nullptr;
   }
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   // Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c630826..5162a06 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,24 +52,24 @@
 
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(!lock_);
+      override REQUIRES(!lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE REQUIRES(Locks::mutator_lock_) {
+      override REQUIRES(Locks::mutator_lock_) {
     return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
                                        bytes_tl_bulk_allocated);
   }
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
     return AllocationSizeNonvirtual<true>(obj, usable_size);
   }
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
+  size_t Free(Thread* self, mirror::Object* ptr) override
       REQUIRES_SHARED(Locks::mutator_lock_);
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -93,7 +93,7 @@
   // run without allocating a new run.
   ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
                                                  size_t* bytes_allocated);
-  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+  size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
     return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
   }
   ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
@@ -107,13 +107,13 @@
     return rosalloc_;
   }
 
-  size_t Trim() OVERRIDE;
-  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
-  size_t GetFootprint() OVERRIDE;
-  size_t GetFootprintLimit() OVERRIDE;
-  void SetFootprintLimit(size_t limit) OVERRIDE;
+  size_t Trim() override;
+  void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
+  size_t GetFootprint() override;
+  size_t GetFootprintLimit() override;
+  void SetFootprintLimit(size_t limit) override;
 
-  void Clear() OVERRIDE;
+  void Clear() override;
 
   MallocSpace* CreateInstance(MemMap&& mem_map,
                               const std::string& name,
@@ -122,10 +122,10 @@
                               uint8_t* end,
                               uint8_t* limit,
                               size_t growth_limit,
-                              bool can_move_objects) OVERRIDE;
+                              bool can_move_objects) override;
 
-  uint64_t GetBytesAllocated() OVERRIDE;
-  uint64_t GetObjectsAllocated() OVERRIDE;
+  uint64_t GetBytesAllocated() override;
+  uint64_t GetObjectsAllocated() override;
 
   size_t RevokeThreadLocalBuffers(Thread* thread);
   size_t RevokeAllThreadLocalBuffers();
@@ -135,11 +135,11 @@
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
-  bool IsRosAllocSpace() const OVERRIDE {
+  bool IsRosAllocSpace() const override {
     return true;
   }
 
-  RosAllocSpace* AsRosAllocSpace() OVERRIDE {
+  RosAllocSpace* AsRosAllocSpace() override {
     return this;
   }
 
@@ -149,7 +149,7 @@
 
   virtual ~RosAllocSpace();
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE {
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
     rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
   }
 
@@ -174,7 +174,7 @@
                               size_t* usable_size, size_t* bytes_tl_bulk_allocated);
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
-                        size_t maximum_size, bool low_memory_mode) OVERRIDE {
+                        size_t maximum_size, bool low_memory_mode) override {
     return CreateRosAlloc(
         base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
   }
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4e173a8..2fe1f82 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -352,7 +352,7 @@
     return mark_bitmap_.get();
   }
 
-  virtual bool IsDiscontinuousSpace() const OVERRIDE {
+  virtual bool IsDiscontinuousSpace() const override {
     return true;
   }
 
@@ -409,14 +409,14 @@
 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
  public:
-  bool IsAllocSpace() const OVERRIDE {
+  bool IsAllocSpace() const override {
     return true;
   }
-  AllocSpace* AsAllocSpace() OVERRIDE {
+  AllocSpace* AsAllocSpace() override {
     return this;
   }
 
-  bool IsContinuousMemMapAllocSpace() const OVERRIDE {
+  bool IsContinuousMemMapAllocSpace() const override {
     return true;
   }
   ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
@@ -435,11 +435,11 @@
   // Clear the space back to an empty space.
   virtual void Clear() = 0;
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
     return live_bitmap_.get();
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
     return mark_bitmap_.get();
   }
 
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 200c79f..1f73577 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -27,7 +27,7 @@
 namespace space {
 
 // A zygote space is a space which you cannot allocate into or free from.
-class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
+class ZygoteSpace final : public ContinuousMemMapAllocSpace {
  public:
   // Returns the remaining storage in the out_map field.
   static ZygoteSpace* Create(const std::string& name,
@@ -38,28 +38,28 @@
 
   void Dump(std::ostream& os) const;
 
-  SpaceType GetType() const OVERRIDE {
+  SpaceType GetType() const override {
     return kSpaceTypeZygoteSpace;
   }
 
-  ZygoteSpace* AsZygoteSpace() OVERRIDE {
+  ZygoteSpace* AsZygoteSpace() override {
     return this;
   }
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
 
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override;
 
-  size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE;
+  size_t Free(Thread* self, mirror::Object* ptr) override;
 
-  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
 
   // ZygoteSpaces don't have thread local state.
-  size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+  size_t RevokeThreadLocalBuffers(art::Thread*) override {
     return 0U;
   }
-  size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+  size_t RevokeAllThreadLocalBuffers() override {
     return 0U;
   }
 
@@ -71,13 +71,13 @@
     return objects_allocated_.load(std::memory_order_seq_cst);
   }
 
-  void Clear() OVERRIDE;
+  void Clear() override;
 
-  bool CanMoveObjects() const OVERRIDE {
+  bool CanMoveObjects() const override {
     return false;
   }
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 60105f4..ef85b39 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -45,7 +45,7 @@
   }
   virtual ~SystemWeakHolder() {}
 
-  void Allow() OVERRIDE
+  void Allow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     CHECK(!kUseReadBarrier);
@@ -54,7 +54,7 @@
     new_weak_condition_.Broadcast(Thread::Current());
   }
 
-  void Disallow() OVERRIDE
+  void Disallow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     CHECK(!kUseReadBarrier);
@@ -62,7 +62,7 @@
     allow_new_system_weak_ = false;
   }
 
-  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
+  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
       REQUIRES(!allow_disallow_lock_) {
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
     new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 897ab01..07725b9 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -44,7 +44,7 @@
         disallow_count_(0),
         sweep_count_(0) {}
 
-  void Allow() OVERRIDE
+  void Allow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Allow();
@@ -52,7 +52,7 @@
     allow_count_++;
   }
 
-  void Disallow() OVERRIDE
+  void Disallow() override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Disallow();
@@ -60,7 +60,7 @@
     disallow_count_++;
   }
 
-  void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
+  void Broadcast(bool broadcast_for_checkpoint) override
       REQUIRES(!allow_disallow_lock_) {
     SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
 
@@ -70,7 +70,7 @@
     }
   }
 
-  void Sweep(IsMarkedVisitor* visitor) OVERRIDE
+  void Sweep(IsMarkedVisitor* visitor) override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 38581ce..caa8802 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -33,7 +33,7 @@
      : HeapTask(NanoTime() + MsToNs(10)), task_processor_(task_processor), counter_(counter),
        max_recursion_(max_recursion) {
   }
-  virtual void Run(Thread* self) OVERRIDE {
+  virtual void Run(Thread* self) override {
     if (max_recursion_ > 0) {
       task_processor_->AddTask(self,
                                new RecursiveTask(task_processor_, counter_, max_recursion_ - 1));
@@ -52,7 +52,7 @@
   WorkUntilDoneTask(TaskProcessor* task_processor, Atomic<bool>* done_running)
       : task_processor_(task_processor), done_running_(done_running) {
   }
-  virtual void Run(Thread* self) OVERRIDE {
+  virtual void Run(Thread* self) override {
     task_processor_->RunAllTasks(self);
     done_running_->store(true, std::memory_order_seq_cst);
   }
@@ -105,7 +105,7 @@
   TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
      : HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
   }
-  virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
+  virtual void Run(Thread* thread ATTRIBUTE_UNUSED) override {
     ASSERT_EQ(*counter_, expected_counter_);
     ++*counter_;
   }
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index d6a2fa0..5d234ea 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -198,7 +198,7 @@
   CollectRootVisitor(ObjectSet* visited, WorkQueue* work) : visited_(visited), work_(work) {}
 
   void VisitRoot(mirror::Object* obj, const RootInfo& info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (obj != nullptr && visited_->insert(obj).second) {
       std::ostringstream oss;
       oss << info.ToString() << " = " << obj << "(" << obj->PrettyTypeOf() << ")";
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 986e28e..0bd43f9 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -133,7 +133,7 @@
 // critical.
 class SingleRootVisitor : public RootVisitor {
  private:
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(*roots[i], info);
@@ -141,7 +141,7 @@
   }
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info) OVERRIDE
+                          const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(roots[i]->AsMirrorPtr(), info);
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 28a2302..9eaf1ec 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -259,7 +259,7 @@
 
 // Scoped handle storage of a fixed size that is stack allocated.
 template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
  public:
   explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
   ALWAYS_INLINE ~StackHandleScope();
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index a41d284..4c7efe6 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -27,7 +27,7 @@
 
 class HiddenApiTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     // Do the normal setup.
     CommonRuntimeTest::SetUp();
     self_ = Thread::Current();
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3f44928..e8a47d1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -303,7 +303,7 @@
   }
   virtual ~EndianOutputBuffered() {}
 
-  void UpdateU4(size_t offset, uint32_t new_value) OVERRIDE {
+  void UpdateU4(size_t offset, uint32_t new_value) override {
     DCHECK_LE(offset, length_ - 4);
     buffer_[offset + 0] = static_cast<uint8_t>((new_value >> 24) & 0xFF);
     buffer_[offset + 1] = static_cast<uint8_t>((new_value >> 16) & 0xFF);
@@ -312,12 +312,12 @@
   }
 
  protected:
-  void HandleU1List(const uint8_t* values, size_t count) OVERRIDE {
+  void HandleU1List(const uint8_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     buffer_.insert(buffer_.end(), values, values + count);
   }
 
-  void HandleU1AsU2List(const uint8_t* values, size_t count) OVERRIDE {
+  void HandleU1AsU2List(const uint8_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     // All 8-bits are grouped in 2 to make 16-bit block like Java Char
     if (count & 1) {
@@ -330,7 +330,7 @@
     }
   }
 
-  void HandleU2List(const uint16_t* values, size_t count) OVERRIDE {
+  void HandleU2List(const uint16_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint16_t value = *values;
@@ -340,7 +340,7 @@
     }
   }
 
-  void HandleU4List(const uint32_t* values, size_t count) OVERRIDE {
+  void HandleU4List(const uint32_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint32_t value = *values;
@@ -352,7 +352,7 @@
     }
   }
 
-  void HandleU8List(const uint64_t* values, size_t count) OVERRIDE {
+  void HandleU8List(const uint64_t* values, size_t count) override {
     DCHECK_EQ(length_, buffer_.size());
     for (size_t i = 0; i < count; ++i) {
       uint64_t value = *values;
@@ -368,7 +368,7 @@
     }
   }
 
-  void HandleEndRecord() OVERRIDE {
+  void HandleEndRecord() override {
     DCHECK_EQ(buffer_.size(), length_);
     if (kIsDebugBuild && started_) {
       uint32_t stored_length =
@@ -388,7 +388,7 @@
   std::vector<uint8_t> buffer_;
 };
 
-class FileEndianOutput FINAL : public EndianOutputBuffered {
+class FileEndianOutput final : public EndianOutputBuffered {
  public:
   FileEndianOutput(File* fp, size_t reserved_size)
       : EndianOutputBuffered(reserved_size), fp_(fp), errors_(false) {
@@ -402,7 +402,7 @@
   }
 
  protected:
-  void HandleFlush(const uint8_t* buffer, size_t length) OVERRIDE {
+  void HandleFlush(const uint8_t* buffer, size_t length) override {
     if (!errors_) {
       errors_ = !fp_->WriteFully(buffer, length);
     }
@@ -413,14 +413,14 @@
   bool errors_;
 };
 
-class VectorEndianOuputput FINAL : public EndianOutputBuffered {
+class VectorEndianOuputput final : public EndianOutputBuffered {
  public:
   VectorEndianOuputput(std::vector<uint8_t>& data, size_t reserved_size)
       : EndianOutputBuffered(reserved_size), full_data_(data) {}
   ~VectorEndianOuputput() {}
 
  protected:
-  void HandleFlush(const uint8_t* buf, size_t length) OVERRIDE {
+  void HandleFlush(const uint8_t* buf, size_t length) override {
     size_t old_size = full_data_.size();
     full_data_.resize(old_size + length);
     memcpy(full_data_.data() + old_size, buf, length);
@@ -604,7 +604,7 @@
   }
 
   void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+      override REQUIRES_SHARED(Locks::mutator_lock_);
   void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
                       uint32_t thread_serial);
 
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4196e19..b42433c 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -85,7 +85,7 @@
   explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
       : instrumentation_(instrumentation) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
     instrumentation_->InstallStubsForClass(klass.Ptr());
     return true;  // we visit all classes.
   }
@@ -264,7 +264,7 @@
 // existing instrumentation frames.
 static void InstrumentationInstallStack(Thread* thread, void* arg)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  struct InstallStackVisitor FINAL : public StackVisitor {
+  struct InstallStackVisitor final : public StackVisitor {
     InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
         : StackVisitor(thread_in, context, kInstrumentationStackWalk),
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -273,7 +273,7 @@
           last_return_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
       if (m == nullptr) {
         if (kVerboseInstrumentation) {
@@ -429,7 +429,7 @@
     REQUIRES(Locks::mutator_lock_) {
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
 
-  struct RestoreStackVisitor FINAL : public StackVisitor {
+  struct RestoreStackVisitor final : public StackVisitor {
     RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
                         Instrumentation* instrumentation)
         : StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
@@ -439,7 +439,7 @@
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
           frames_removed_(0) {}
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (instrumentation_stack_->size() == 0) {
         return false;  // Stop.
       }
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 8ac26af..9146245 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,7 @@
 namespace art {
 namespace instrumentation {
 
-class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+class TestInstrumentationListener final : public instrumentation::InstrumentationListener {
  public:
   TestInstrumentationListener()
     : received_method_enter_event(false),
@@ -59,7 +59,7 @@
                      Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                      ArtMethod* method ATTRIBUTE_UNUSED,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_enter_event = true;
   }
 
@@ -68,7 +68,7 @@
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_object_event = true;
   }
 
@@ -77,7 +77,7 @@
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const JValue& return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_event = true;
   }
 
@@ -85,7 +85,7 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_unwind_event = true;
   }
 
@@ -93,7 +93,7 @@
                   Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                   ArtMethod* method ATTRIBUTE_UNUSED,
                   uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_dex_pc_moved_event = true;
   }
 
@@ -102,7 +102,7 @@
                  ArtMethod* method ATTRIBUTE_UNUSED,
                  uint32_t dex_pc ATTRIBUTE_UNUSED,
                  ArtField* field ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_read_event = true;
   }
 
@@ -112,7 +112,7 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     ArtField* field ATTRIBUTE_UNUSED,
                     Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_object_event = true;
   }
 
@@ -122,19 +122,19 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     ArtField* field ATTRIBUTE_UNUSED,
                     const JValue& field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_event = true;
   }
 
   void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
                        Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_thrown_event = true;
   }
 
   void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
                         Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_handled_event = true;
   }
 
@@ -142,7 +142,7 @@
               ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_branch_event = true;
   }
 
@@ -151,12 +151,12 @@
                                 ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 ArtMethod* callee ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_invoke_virtual_or_interface_event = true;
   }
 
   void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_watched_frame_pop  = true;
   }
 
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b56c48d..8b4fe44 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -86,7 +86,7 @@
 
 class TestPredicate : public IsMarkedVisitor {
  public:
-  mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Object* IsMarked(mirror::Object* s) override REQUIRES_SHARED(Locks::mutator_lock_) {
     bool erased = false;
     for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
       if (*it == s) {
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 25ac6e2..452a76b 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -27,12 +27,12 @@
 
 class StackVisitor;
 
-class JavaFrameRootInfo FINAL : public RootInfo {
+class JavaFrameRootInfo final : public RootInfo {
  public:
   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
   }
-  void Describe(std::ostream& os) const OVERRIDE
+  void Describe(std::ostream& os) const override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t GetVReg() const {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ed449b5..a6bc029 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -399,7 +399,7 @@
 
 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
   struct CollectClasses : public ClassVisitor {
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       classes_.push_back(klass.Ptr());
       return true;
     }
@@ -576,7 +576,7 @@
   memory_use_.AddValue(bytes);
 }
 
-class JitCompileTask FINAL : public Task {
+class JitCompileTask final : public Task {
  public:
   enum TaskKind {
     kAllocateProfile,
@@ -596,7 +596,7 @@
     soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
   }
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     if (kind_ == kCompile) {
       Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
@@ -611,7 +611,7 @@
     ProfileSaver::NotifyJitActivity();
   }
 
-  void Finalize() OVERRIDE {
+  void Finalize() override {
     delete this;
   }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index d9c7900..33adc18 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1088,14 +1088,14 @@
   }
 }
 
-class MarkCodeVisitor FINAL : public StackVisitor {
+class MarkCodeVisitor final : public StackVisitor {
  public:
   MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
         code_cache_(code_cache_in),
         bitmap_(code_cache_->GetLiveBitmap()) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
     if (method_header == nullptr) {
       return true;
@@ -1113,12 +1113,12 @@
   CodeCacheBitmap* const bitmap_;
 };
 
-class MarkCodeClosure FINAL : public Closure {
+class MarkCodeClosure final : public Closure {
  public:
   MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
       : code_cache_(code_cache), barrier_(barrier) {}
 
-  void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedTrace trace(__PRETTY_FUNCTION__);
     DCHECK(thread == Thread::Current() || thread->IsSuspended());
     MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 6ccda8b..d9ef922 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -255,7 +255,7 @@
         class_loaders_(class_loaders) {}
 
   void Visit(ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     class_loaders_->push_back(hs_->NewHandle(class_loader));
   }
 
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 8424610..f695c8f 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -40,7 +40,7 @@
 
 class ProfileCompilationInfoTest : public CommonRuntimeTest {
  public:
-  void PostRuntimeCreate() OVERRIDE {
+  void PostRuntimeCreate() override {
     allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
   }
 
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 74e4a30..fe1c168 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -34,7 +34,7 @@
   }
 
 
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     CommonRuntimeTest::TearDown();
   }
 
@@ -137,7 +137,7 @@
 
 class JavaVmExtStackTraceTest : public JavaVmExtTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
     options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
   }
 };
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index a25049e..a4b151a 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -84,7 +84,7 @@
     }
   }
 
-  virtual void TearDown() OVERRIDE {
+  virtual void TearDown() override {
     CleanUpJniEnv();
     CommonCompilerTest::TearDown();
   }
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c3e167c..811ee51 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -63,7 +63,7 @@
 using StringDexCacheType = std::atomic<StringDexCachePair>;
 
 // C++ mirror of java.lang.Class
-class MANAGED Class FINAL : public Object {
+class MANAGED Class final : public Object {
  public:
   // A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
   // this is the value.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 87f4f0a..ba91e4f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -141,7 +141,7 @@
 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
 
 // C++ mirror of java.lang.DexCache.
-class MANAGED DexCache FINAL : public Object {
+class MANAGED DexCache final : public Object {
  public:
   // Size of java.lang.DexCache.class.
   static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 7a70cae..528740b 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,7 +34,7 @@
 
 class DexCacheMethodHandlesTest : public DexCacheTest {
  protected:
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+  virtual void SetUpRuntimeOptions(RuntimeOptions* options) override {
     CommonRuntimeTest::SetUpRuntimeOptions(options);
   }
 };
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d72c786..9e3c9af 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -23,7 +23,7 @@
 namespace art {
 namespace mirror {
 
-class MANAGED IfTable FINAL : public ObjectArray<Object> {
+class MANAGED IfTable final : public ObjectArray<Object> {
  public:
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index db511d6..7775de3 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -26,7 +26,7 @@
 namespace mirror {
 
 // C++ mirror of java.lang.reflect.Proxy.
-class MANAGED Proxy FINAL : public Object {
+class MANAGED Proxy final : public Object {
  private:
   HeapReference<Object> h_;
 
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 55a2ef0..37ac575 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -27,7 +27,7 @@
 namespace mirror {
 
 // C++ mirror of java.lang.StackTraceElement
-class MANAGED StackTraceElement FINAL : public Object {
+class MANAGED StackTraceElement final : public Object {
  public:
   String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0e2fc90..d08717c 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -40,7 +40,7 @@
 };
 
 // C++ mirror of java.lang.String
-class MANAGED String FINAL : public Object {
+class MANAGED String final : public Object {
  public:
   // Size of java.lang.String.class.
   static uint32_t ClassSize(PointerSize pointer_size);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 56c953b..864e1ea 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -353,7 +353,7 @@
 //
 
 template <typename T>
-class JValueByteSwapper FINAL {
+class JValueByteSwapper final {
  public:
   static void ByteSwap(JValue* value);
   static void MaybeByteSwap(bool byte_swap, JValue* value) {
@@ -392,7 +392,7 @@
  public:
   explicit AtomicGetAccessor(JValue* result) : result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     StoreResult(atom->load(MO), result_);
   }
@@ -406,7 +406,7 @@
  public:
   explicit AtomicSetAccessor(T new_value) : new_value_(new_value) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     atom->store(new_value_, MO);
   }
@@ -431,7 +431,7 @@
   AtomicStrongCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     bool success = atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
     StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -453,7 +453,7 @@
   AtomicStrongCompareAndExchangeAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     atom->compare_exchange_strong(expected_value_, desired_value_, MOS, MOF);
     StoreResult(expected_value_, result_);
@@ -475,7 +475,7 @@
   AtomicWeakCompareAndSetAccessor(T expected_value, T desired_value, JValue* result)
       : expected_value_(expected_value), desired_value_(desired_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     bool success = atom->compare_exchange_weak(expected_value_, desired_value_, MOS, MOF);
     StoreResult(success ? JNI_TRUE : JNI_FALSE, result_);
@@ -496,7 +496,7 @@
  public:
   AtomicGetAndSetAccessor(T new_value, JValue* result) : new_value_(new_value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->exchange(new_value_, MO);
     StoreResult(old_value, result_);
@@ -540,7 +540,7 @@
  public:
   AtomicGetAndAddAccessor(T addend, JValue* result) : addend_(addend), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     constexpr bool kIsFloatingPoint = std::is_floating_point<T>::value;
     T old_value = AtomicGetAndAddOperator<T, kIsFloatingPoint, MO>::Apply(addr, addend_);
     StoreResult(old_value, result_);
@@ -562,7 +562,7 @@
  public:
   AtomicGetAndAddWithByteSwapAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* const atom = reinterpret_cast<std::atomic<T>*>(addr);
     T current_value = atom->load(std::memory_order_relaxed);
     T sum;
@@ -591,7 +591,7 @@
  public:
   AtomicGetAndBitwiseOrAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_or(value_, MO);
     StoreResult(old_value, result_);
@@ -610,7 +610,7 @@
  public:
   AtomicGetAndBitwiseAndAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_and(value_, MO);
     StoreResult(old_value, result_);
@@ -630,7 +630,7 @@
  public:
   AtomicGetAndBitwiseXorAccessor(T value, JValue* result) : value_(value), result_(result) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     std::atomic<T>* atom = reinterpret_cast<std::atomic<T>*>(addr);
     T old_value = atom->fetch_xor(value_, MO);
     StoreResult(old_value, result_);
@@ -679,7 +679,7 @@
   explicit TypeAdaptorAccessor(Object::Accessor<U>* inner_accessor)
       : inner_accessor_(inner_accessor) {}
 
-  void Access(T* addr) OVERRIDE {
+  void Access(T* addr) override {
     static_assert(sizeof(T) == sizeof(U), "bad conversion");
     inner_accessor_->Access(reinterpret_cast<U*>(addr));
   }
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d47bc0d..6e5786a 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -184,7 +184,7 @@
     if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
       // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
       // enough that it's OK to walk the stack twice.
-      struct NextMethodVisitor FINAL : public StackVisitor {
+      struct NextMethodVisitor final : public StackVisitor {
         explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
             : StackVisitor(thread,
                            nullptr,
@@ -193,7 +193,7 @@
               count_(0),
               method_(nullptr),
               dex_pc_(0) {}
-        bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+        bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
           ArtMethod* m = GetMethod();
           if (m->IsRuntimeMethod()) {
             // Continue if this is a runtime method.
@@ -271,7 +271,7 @@
 
 // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
 
-struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+struct NthCallerWithDexPcVisitor final : public StackVisitor {
   explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -279,7 +279,7 @@
         dex_pc_(0),
         current_frame_number_(0),
         wanted_frame_number_(frame) {}
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m == nullptr || m->IsRuntimeMethod()) {
       // Runtime method, upcall, or resolution issue. Skip.
@@ -514,7 +514,7 @@
                 if (should_dump_stacks) {
                   // Very long contention. Dump stacks.
                   struct CollectStackTrace : public Closure {
-                    void Run(art::Thread* thread) OVERRIDE
+                    void Run(art::Thread* thread) override
                         REQUIRES_SHARED(art::Locks::mutator_lock_) {
                       thread->DumpJavaStack(oss);
                     }
@@ -1574,7 +1574,7 @@
  public:
   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
 
-  virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
+  virtual mirror::Object* IsMarked(mirror::Object* object) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Monitor::Deflate(self_, object)) {
       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index 5c962c3..c943402 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -54,7 +54,7 @@
     kEndStackWalk,
   };
 
-  bool VisitFrame() FINAL REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() final REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       return true;
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index bff8d76..c88748f 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -34,7 +34,7 @@
 
 class MonitorTest : public CommonRuntimeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     // Use a smaller heap
     SetUpRuntimeOptionsForFillHeap(options);
 
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 7ac4086..6f98a6d 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -207,7 +207,7 @@
    public:
     explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
 
-    bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       klass->DumpClass(LOG_STREAM(ERROR), flags_);
       return true;
     }
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9b3fd16..0e61940 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -332,7 +332,7 @@
   explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     ObjPtr<mirror::String> string = root->AsString();
     table_->operator[](string->ToModifiedUtf8()) = string;
   }
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 5b47eac..72dae47 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -120,9 +120,9 @@
         : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           class_set_(class_set) {}
 
-    ~NonDebuggableStacksVisitor() OVERRIDE {}
+    ~NonDebuggableStacksVisitor() override {}
 
-    bool VisitFrame() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES(Locks::mutator_lock_) {
       if (GetMethod()->IsRuntimeMethod()) {
         return true;
       }
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 9c777cc..496a6f3 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -21,22 +21,22 @@
 
 namespace art {
 
-class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
+class NoopCompilerCallbacks final : public CompilerCallbacks {
  public:
   NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
   ~NoopCompilerCallbacks() {}
 
-  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
+  void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {
   }
 
-  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
+  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
 
   // This is only used by compilers which need to be able to run without relocation even when it
   // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
   // to disable the relocation since both deal with writing out the images directly.
-  bool IsRelocationPossible() OVERRIDE { return false; }
+  bool IsRelocationPossible() override { return false; }
 
-  verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; }
+  verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c7daef8..4780aea 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -889,7 +889,7 @@
 // OatFile via dlopen //
 ////////////////////////
 
-class DlOpenOatFile FINAL : public OatFileBase {
+class DlOpenOatFile final : public OatFileBase {
  public:
   DlOpenOatFile(const std::string& filename, bool executable)
       : OatFileBase(filename, executable),
@@ -911,7 +911,7 @@
 
  protected:
   const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
-                                          std::string* error_msg) const OVERRIDE {
+                                          std::string* error_msg) const override {
     const uint8_t* ptr =
         reinterpret_cast<const uint8_t*>(dlsym(dlopen_handle_, symbol_name.c_str()));
     if (ptr == nullptr) {
@@ -920,21 +920,21 @@
     return ptr;
   }
 
-  void PreLoad() OVERRIDE;
+  void PreLoad() override;
 
   bool Load(const std::string& elf_filename,
             uint8_t* oat_file_begin,
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            std::string* error_msg) override;
 
   bool Load(int, uint8_t*, bool, bool, bool, std::string*) {
     return false;
   }
 
   // Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
-  void PreSetup(const std::string& elf_filename) OVERRIDE;
+  void PreSetup(const std::string& elf_filename) override;
 
  private:
   bool Dlopen(const std::string& elf_filename,
@@ -1156,7 +1156,7 @@
 // OatFile via our own ElfFile implementation //
 ////////////////////////////////////////////////
 
-class ElfOatFile FINAL : public OatFileBase {
+class ElfOatFile final : public OatFileBase {
  public:
   ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
 
@@ -1179,7 +1179,7 @@
 
  protected:
   const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name,
-                                          std::string* error_msg) const OVERRIDE {
+                                          std::string* error_msg) const override {
     const uint8_t* ptr = elf_file_->FindDynamicSymbolAddress(symbol_name);
     if (ptr == nullptr) {
       *error_msg = "(Internal implementation could not find symbol)";
@@ -1187,7 +1187,7 @@
     return ptr;
   }
 
-  void PreLoad() OVERRIDE {
+  void PreLoad() override {
   }
 
   bool Load(const std::string& elf_filename,
@@ -1195,16 +1195,16 @@
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            std::string* error_msg) override;
 
   bool Load(int oat_fd,
             uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
             bool writable,
             bool executable,
             bool low_4gb,
-            std::string* error_msg) OVERRIDE;
+            std::string* error_msg) override;
 
-  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
+  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
   }
 
  private:
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4ed26fa..21e2144 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -146,7 +146,7 @@
 
   const OatHeader& GetOatHeader() const;
 
-  class OatMethod FINAL {
+  class OatMethod final {
    public:
     void LinkMethod(ArtMethod* method) const;
 
@@ -201,7 +201,7 @@
     friend class OatClass;
   };
 
-  class OatClass FINAL {
+  class OatClass final {
    public:
     ClassStatus GetStatus() const {
       return status_;
@@ -444,7 +444,7 @@
 // support forward declarations of inner classes, and we want to
 // forward-declare OatDexFile so that we can store an opaque pointer to an
 // OatDexFile in DexFile.
-class OatDexFile FINAL {
+class OatDexFile final {
  public:
   // Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
   std::unique_ptr<const DexFile> OpenDexFile(std::string* error_msg) const;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 36dea60..f1e485b 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -30,7 +30,7 @@
 
 class ProxyTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
     // The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
     // CommonRuntimeTest so we need to do that now.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7f5717f..7b92151 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -58,7 +58,7 @@
       full_fragment_done_(false) {}
 
 // Finds catch handler.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
+class CatchBlockStackVisitor final : public StackVisitor {
  public:
   CatchBlockStackVisitor(Thread* self,
                          Context* context,
@@ -72,7 +72,7 @@
         skip_frames_(skip_frames) {
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     if (method == nullptr) {
@@ -350,7 +350,7 @@
 }
 
 // Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
+class DeoptimizeStackVisitor final : public StackVisitor {
  public:
   DeoptimizeStackVisitor(Thread* self,
                          Context* context,
@@ -399,7 +399,7 @@
     }
   }
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     ArtMethod* method = GetMethod();
     if (method == nullptr || single_frame_done_) {
@@ -667,14 +667,14 @@
 }
 
 // Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
+class DumpFramesWithTypeStackVisitor final : public StackVisitor {
  public:
   explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         show_details_(show_details) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     if (show_details_) {
       LOG(INFO) << "|> pc   = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 30d4587..2431507 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2638,7 +2638,7 @@
   explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
       : instrumentation_(instrumentation) {}
 
-  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
     for (auto& m : klass->GetMethods(pointer_size)) {
       const void* code = m.GetEntryPointFromQuickCompiledCode();
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ed0472f..e1e0e23 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -50,7 +50,7 @@
 
 class RuntimeCallbacksTest : public CommonRuntimeTest {
  protected:
-  void SetUp() OVERRIDE {
+  void SetUp() override {
     CommonRuntimeTest::SetUp();
 
     Thread* self = Thread::Current();
@@ -60,7 +60,7 @@
     AddListener();
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     {
       Thread* self = Thread::Current();
       ScopedObjectAccess soa(self);
@@ -101,10 +101,10 @@
   }
 
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&cb_);
   }
 
@@ -117,7 +117,7 @@
   };
 
   struct Callback : public ThreadLifecycleCallback {
-    void ThreadStart(Thread* self) OVERRIDE {
+    void ThreadStart(Thread* self) override {
       if (state == CallbackState::kBase) {
         state = CallbackState::kStarted;
         stored_self = self;
@@ -126,7 +126,7 @@
       }
     }
 
-    void ThreadDeath(Thread* self) OVERRIDE {
+    void ThreadDeath(Thread* self) override {
       if (state == CallbackState::kStarted && self == stored_self) {
         state = CallbackState::kDied;
       } else {
@@ -219,10 +219,10 @@
 
 class ClassLoadCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveClassLoadCallback(&cb_);
   }
 
@@ -259,7 +259,7 @@
                                 const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
                                 /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
                                 /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
-        OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+        override REQUIRES_SHARED(Locks::mutator_lock_) {
       const std::string& location = initial_dex_file.GetLocation();
       std::string event =
           std::string("PreDefine:") + descriptor + " <" +
@@ -267,14 +267,14 @@
       data.push_back(event);
     }
 
-    void ClassLoad(Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string tmp;
       std::string event = std::string("Load:") + klass->GetDescriptor(&tmp);
       data.push_back(event);
     }
 
     void ClassPrepare(Handle<mirror::Class> temp_klass,
-                      Handle<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+                      Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string tmp, tmp2;
       std::string event = std::string("Prepare:") + klass->GetDescriptor(&tmp)
           + "[" + temp_klass->GetDescriptor(&tmp2) + "]";
@@ -319,15 +319,15 @@
 
 class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&cb_);
   }
 
   struct Callback : public RuntimeSigQuitCallback {
-    void SigQuit() OVERRIDE {
+    void SigQuit() override {
       ++sigquit_count;
     }
 
@@ -362,20 +362,20 @@
 
 class RuntimePhaseCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&cb_);
   }
 
-  void TearDown() OVERRIDE {
+  void TearDown() override {
     // Bypass RuntimeCallbacksTest::TearDown, as the runtime is already gone.
     CommonRuntimeTest::TearDown();
   }
 
   struct Callback : public RuntimePhaseCallback {
-    void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) OVERRIDE {
+    void NextRuntimePhase(RuntimePhaseCallback::RuntimePhase p) override {
       if (p == RuntimePhaseCallback::RuntimePhase::kInitialAgents) {
         if (start_seen > 0 || init_seen > 0 || death_seen > 0) {
           LOG(FATAL) << "Unexpected order";
@@ -434,10 +434,10 @@
 
 class MonitorWaitCallbacksTest : public RuntimeCallbacksTest {
  protected:
-  void AddListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void AddListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(&cb_);
   }
-  void RemoveListener() OVERRIDE REQUIRES(Locks::mutator_lock_) {
+  void RemoveListener() override REQUIRES(Locks::mutator_lock_) {
     Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(&cb_);
   }
 
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ce99fb9..eb9c661 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -461,7 +461,7 @@
     NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
         : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
 
-    bool VisitFrame() OVERRIDE {
+    bool VisitFrame() override {
       frames++;
       return true;
     }
@@ -487,7 +487,7 @@
           next_dex_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       if (found_frame_) {
         ArtMethod* method = GetMethod();
         if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -520,7 +520,7 @@
     explicit DescribeStackVisitor(Thread* thread_in)
         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
       return true;
     }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index df7f19d..8a637a2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1486,7 +1486,7 @@
  public:
   explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     wrapped_->Run(self);
     barrier_.Pass(self);
   }
@@ -1844,7 +1844,7 @@
   static constexpr size_t kMaxRepetition = 3u;
 
   VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     ObjPtr<mirror::Class> c = m->GetDeclaringClass();
@@ -1883,24 +1883,24 @@
     return VisitMethodResult::kContinueMethod;
   }
 
-  VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+  VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
     return VisitMethodResult::kContinueMethod;
   }
 
   void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - waiting on ", ThreadList::kInvalidThreadId);
   }
   void VisitSleepingObject(mirror::Object* obj)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - sleeping on ", ThreadList::kInvalidThreadId);
   }
   void VisitBlockedOnObject(mirror::Object* obj,
                             ThreadState state,
                             uint32_t owner_tid)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const char* msg;
     switch (state) {
@@ -1919,7 +1919,7 @@
     PrintObject(obj, msg, owner_tid);
   }
   void VisitLockedObject(mirror::Object* obj)
-      OVERRIDE
+      override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - locked ", ThreadList::kInvalidThreadId);
   }
@@ -2216,7 +2216,7 @@
 
   // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
   void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+      override NO_THREAD_SAFETY_ANALYSIS {
     if (self_->HoldsLock(entered_monitor)) {
       LOG(WARNING) << "Calling MonitorExit on object "
                    << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -2845,7 +2845,7 @@
 
    protected:
     VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
           soaa_, m, GetDexPc(/* abort on error */ false));
@@ -2856,7 +2856,7 @@
       return VisitMethodResult::kContinueMethod;
     }
 
-    VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+    VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
       lock_objects_.push_back({});
       lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
 
@@ -2866,24 +2866,24 @@
     }
 
     void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitSleepingObject(mirror::Object* obj)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitBlockedOnObject(mirror::Object* obj,
                               ThreadState state ATTRIBUTE_UNUSED,
                               uint32_t owner_tid ATTRIBUTE_UNUSED)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitLockedObject(mirror::Object* obj)
-        OVERRIDE
+        override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
     }
@@ -3450,7 +3450,7 @@
 
 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
 //       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor FINAL : public StackVisitor {
+struct CurrentMethodVisitor final : public StackVisitor {
   CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : StackVisitor(thread,
@@ -3461,7 +3461,7 @@
         method_(nullptr),
         dex_pc_(0),
         abort_on_error_(abort_on_error) {}
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       // Continue if this is a runtime method.
@@ -3857,7 +3857,7 @@
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+      override REQUIRES_SHARED(Locks::mutator_lock_) {
     VerifyObject(root);
   }
 };
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9222024..cddc275 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -199,7 +199,7 @@
 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
 
 // A closure used by Thread::Dump.
-class DumpCheckpoint FINAL : public Closure {
+class DumpCheckpoint final : public Closure {
  public:
   DumpCheckpoint(std::ostream* os, bool dump_native_stack)
       : os_(os),
@@ -211,7 +211,7 @@
     }
   }
 
-  void Run(Thread* thread) OVERRIDE {
+  void Run(Thread* thread) override {
     // Note thread and self may not be equal if thread was already suspended at the point of the
     // request.
     Thread* self = Thread::Current();
diff --git a/runtime/trace.h b/runtime/trace.h
index 1fae250..5d96493 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -102,7 +102,7 @@
 // Class for recording event traces. Trace data is either collected
 // synchronously during execution (TracingMode::kMethodTracingActive),
 // or by a separate sampling thread (TracingMode::kSampleProfilingActive).
-class Trace FINAL : public instrumentation::InstrumentationListener {
+class Trace final : public instrumentation::InstrumentationListener {
  public:
   enum TraceFlag {
     kTraceCountAllocs = 1,
@@ -181,57 +181,57 @@
                      ArtMethod* method,
                      uint32_t dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void MethodExited(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void MethodUnwind(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
-      OVERRIDE;
+      override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void InvokeVirtualOrInterface(Thread* thread,
                                 Handle<mirror::Object> this_object,
                                 ArtMethod* caller,
                                 uint32_t dex_pc,
                                 ArtMethod* callee)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
-      REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+      REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
   static std::vector<ArtMethod*>* AllocStackTrace();
   // Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7adf140..de6edd2 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -39,7 +39,7 @@
 }  // namespace mirror
 class InternTable;
 
-class Transaction FINAL {
+class Transaction final {
  public:
   static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
   static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e67067c..e5e71a4 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -25,7 +25,7 @@
 namespace art {
 namespace verifier {
 
-class InstructionFlags FINAL {
+class InstructionFlags final {
  public:
   InstructionFlags() : flags_(0) {}
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 29da376..04a7dfb 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -378,11 +378,11 @@
 };
 
 // Bottom type.
-class ConflictType FINAL : public RegType {
+class ConflictType final : public RegType {
  public:
-  bool IsConflict() const OVERRIDE { return true; }
+  bool IsConflict() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
   static const ConflictType* GetInstance() PURE;
@@ -396,7 +396,7 @@
   // Destroy the singleton instance.
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kConflict;
   }
 
@@ -414,11 +414,11 @@
 // A variant of the bottom type used to specify an undefined value in the
 // incoming registers.
 // Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType FINAL : public RegType {
+class UndefinedType final : public RegType {
  public:
-  bool IsUndefined() const OVERRIDE { return true; }
+  bool IsUndefined() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
   static const UndefinedType* GetInstance() PURE;
@@ -432,7 +432,7 @@
   // Destroy the singleton instance.
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -453,7 +453,7 @@
                 const StringPiece& descriptor,
                 uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 };
 
 class Cat1Type : public PrimitiveType {
@@ -462,10 +462,10 @@
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
-class IntegerType FINAL : public Cat1Type {
+class IntegerType final : public Cat1Type {
  public:
-  bool IsInteger() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsInteger() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass,
                                            const StringPiece& descriptor,
                                            uint16_t cache_id)
@@ -473,7 +473,7 @@
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kInteger;
   }
 
@@ -487,10 +487,10 @@
   static const IntegerType* instance_;
 };
 
-class BooleanType FINAL : public Cat1Type {
+class BooleanType final : public Cat1Type {
  public:
-  bool IsBoolean() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsBoolean() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass,
                                            const StringPiece& descriptor,
                                            uint16_t cache_id)
@@ -498,7 +498,7 @@
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kBoolean;
   }
 
@@ -513,10 +513,10 @@
   static const BooleanType* instance_;
 };
 
-class ByteType FINAL : public Cat1Type {
+class ByteType final : public Cat1Type {
  public:
-  bool IsByte() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsByte() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass,
                                         const StringPiece& descriptor,
                                         uint16_t cache_id)
@@ -524,7 +524,7 @@
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kByte;
   }
 
@@ -538,10 +538,10 @@
   static const ByteType* instance_;
 };
 
-class ShortType FINAL : public Cat1Type {
+class ShortType final : public Cat1Type {
  public:
-  bool IsShort() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsShort() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass,
                                          const StringPiece& descriptor,
                                          uint16_t cache_id)
@@ -549,7 +549,7 @@
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kShort;
   }
 
@@ -562,10 +562,10 @@
   static const ShortType* instance_;
 };
 
-class CharType FINAL : public Cat1Type {
+class CharType final : public Cat1Type {
  public:
-  bool IsChar() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsChar() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const CharType* CreateInstance(ObjPtr<mirror::Class> klass,
                                         const StringPiece& descriptor,
                                         uint16_t cache_id)
@@ -573,7 +573,7 @@
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kChar;
   }
 
@@ -587,10 +587,10 @@
   static const CharType* instance_;
 };
 
-class FloatType FINAL : public Cat1Type {
+class FloatType final : public Cat1Type {
  public:
-  bool IsFloat() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsFloat() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
   static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass,
                                          const StringPiece& descriptor,
                                          uint16_t cache_id)
@@ -598,7 +598,7 @@
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kFloat;
   }
 
@@ -619,11 +619,11 @@
            uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
-class LongLoType FINAL : public Cat2Type {
+class LongLoType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsLongLo() const OVERRIDE { return true; }
-  bool IsLong() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsLongLo() const override { return true; }
+  bool IsLong() const override { return true; }
   static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass,
                                           const StringPiece& descriptor,
                                           uint16_t cache_id)
@@ -631,7 +631,7 @@
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kLongLo;
   }
 
@@ -645,10 +645,10 @@
   static const LongLoType* instance_;
 };
 
-class LongHiType FINAL : public Cat2Type {
+class LongHiType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsLongHi() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsLongHi() const override { return true; }
   static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass,
                                           const StringPiece& descriptor,
                                           uint16_t cache_id)
@@ -656,7 +656,7 @@
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -670,11 +670,11 @@
   static const LongHiType* instance_;
 };
 
-class DoubleLoType FINAL : public Cat2Type {
+class DoubleLoType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsDoubleLo() const OVERRIDE { return true; }
-  bool IsDouble() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsDoubleLo() const override { return true; }
+  bool IsDouble() const override { return true; }
   static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass,
                                             const StringPiece& descriptor,
                                             uint16_t cache_id)
@@ -682,7 +682,7 @@
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kDoubleLo;
   }
 
@@ -696,10 +696,10 @@
   static const DoubleLoType* instance_;
 };
 
-class DoubleHiType FINAL : public Cat2Type {
+class DoubleHiType final : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual bool IsDoubleHi() const OVERRIDE { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  virtual bool IsDoubleHi() const override { return true; }
   static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass,
                                             const StringPiece& descriptor,
                                             uint16_t cache_id)
@@ -707,7 +707,7 @@
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -751,30 +751,30 @@
     }
   }
 
-  bool IsZero() const OVERRIDE {
+  bool IsZero() const override {
     return IsPreciseConstant() && ConstantValue() == 0;
   }
-  bool IsOne() const OVERRIDE {
+  bool IsOne() const override {
     return IsPreciseConstant() && ConstantValue() == 1;
   }
 
-  bool IsConstantChar() const OVERRIDE {
+  bool IsConstantChar() const override {
     return IsConstant() && ConstantValue() >= 0 &&
            ConstantValue() <= std::numeric_limits<uint16_t>::max();
   }
-  bool IsConstantByte() const OVERRIDE {
+  bool IsConstantByte() const override {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<int8_t>::min() &&
            ConstantValue() <= std::numeric_limits<int8_t>::max();
   }
-  bool IsConstantShort() const OVERRIDE {
+  bool IsConstantShort() const override {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<int16_t>::min() &&
            ConstantValue() <= std::numeric_limits<int16_t>::max();
   }
-  virtual bool IsConstantTypes() const OVERRIDE { return true; }
+  virtual bool IsConstantTypes() const override { return true; }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 
@@ -782,7 +782,7 @@
   const uint32_t constant_;
 };
 
-class PreciseConstType FINAL : public ConstantType {
+class PreciseConstType final : public ConstantType {
  public:
   PreciseConstType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -790,94 +790,94 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsPreciseConstant() const OVERRIDE { return true; }
+  bool IsPreciseConstant() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class PreciseConstLoType FINAL : public ConstantType {
+class PreciseConstLoType final : public ConstantType {
  public:
   PreciseConstLoType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsPreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsPreciseConstantLo() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class PreciseConstHiType FINAL : public ConstantType {
+class PreciseConstHiType final : public ConstantType {
  public:
   PreciseConstHiType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsPreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsPreciseConstantHi() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstType FINAL : public ConstantType {
+class ImpreciseConstType final : public ConstantType {
  public:
   ImpreciseConstType(uint32_t constat, uint16_t cache_id)
        REQUIRES_SHARED(Locks::mutator_lock_)
        : ConstantType(constat, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstant() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstant() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstLoType FINAL : public ConstantType {
+class ImpreciseConstLoType final : public ConstantType {
  public:
   ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstantLo() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
-class ImpreciseConstHiType FINAL : public ConstantType {
+class ImpreciseConstHiType final : public ConstantType {
  public:
   ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {
     CheckConstructorInvariants(this);
   }
-  bool IsImpreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsImpreciseConstantHi() const override { return true; }
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kNotAssignable;
   }
 };
 
 // Special "null" type that captures the semantics of null / bottom.
-class NullType FINAL : public RegType {
+class NullType final : public RegType {
  public:
-  bool IsNull() const OVERRIDE {
+  bool IsNull() const override {
     return true;
   }
 
@@ -892,15 +892,15 @@
 
   static void Destroy();
 
-  std::string Dump() const OVERRIDE {
+  std::string Dump() const override {
     return "null";
   }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 
-  bool IsConstantTypes() const OVERRIDE {
+  bool IsConstantTypes() const override {
     return true;
   }
 
@@ -925,15 +925,15 @@
                     uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
-  bool IsUninitializedTypes() const OVERRIDE;
-  bool IsNonZeroReferenceTypes() const OVERRIDE;
+  bool IsUninitializedTypes() const override;
+  bool IsNonZeroReferenceTypes() const override;
 
   uint32_t GetAllocationPc() const {
     DCHECK(IsUninitializedTypes());
     return allocation_pc_;
   }
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 
@@ -942,7 +942,7 @@
 };
 
 // Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType FINAL : public UninitializedType {
+class UninitializedReferenceType final : public UninitializedType {
  public:
   UninitializedReferenceType(ObjPtr<mirror::Class> klass,
                              const StringPiece& descriptor,
@@ -953,16 +953,16 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUninitializedReference() const OVERRIDE { return true; }
+  bool IsUninitializedReference() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 // Similar to UnresolvedReferenceType but not yet having been passed to a
 // constructor.
-class UnresolvedUninitializedRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedRefType final : public UninitializedType {
  public:
   UnresolvedUninitializedRefType(const StringPiece& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
@@ -971,19 +971,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedAndUninitializedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // Similar to UninitializedReferenceType but special case for the this argument
 // of a constructor.
-class UninitializedThisReferenceType FINAL : public UninitializedType {
+class UninitializedThisReferenceType final : public UninitializedType {
  public:
   UninitializedThisReferenceType(ObjPtr<mirror::Class> klass,
                                  const StringPiece& descriptor,
@@ -993,17 +993,17 @@
     CheckConstructorInvariants(this);
   }
 
-  virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
+  virtual bool IsUninitializedThisReference() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
-class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
+class UnresolvedUninitializedThisRefType final : public UninitializedType {
  public:
   UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
                                      uint16_t cache_id)
@@ -1012,19 +1012,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
+  bool IsUnresolvedAndUninitializedThisReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // A type of register holding a reference to an Object of type GetClass or a
 // sub-class.
-class ReferenceType FINAL : public RegType {
+class ReferenceType final : public RegType {
  public:
   ReferenceType(ObjPtr<mirror::Class> klass,
                 const StringPiece& descriptor,
@@ -1033,15 +1033,15 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsReference() const OVERRIDE { return true; }
+  bool IsReference() const override { return true; }
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+  bool IsNonZeroReferenceTypes() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1049,22 +1049,22 @@
 // A type of register holding a reference to an Object of type GetClass and only
 // an object of that
 // type.
-class PreciseReferenceType FINAL : public RegType {
+class PreciseReferenceType final : public RegType {
  public:
   PreciseReferenceType(ObjPtr<mirror::Class> klass,
                        const StringPiece& descriptor,
                        uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsPreciseReference() const OVERRIDE { return true; }
+  bool IsPreciseReference() const override { return true; }
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
+  bool IsNonZeroReferenceTypes() const override { return true; }
 
-  bool HasClassVirtual() const OVERRIDE { return true; }
+  bool HasClassVirtual() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1076,9 +1076,9 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
-  bool IsNonZeroReferenceTypes() const OVERRIDE;
+  bool IsNonZeroReferenceTypes() const override;
 
-  AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+  AssignmentType GetAssignmentTypeImpl() const override {
     return AssignmentType::kReference;
   }
 };
@@ -1086,7 +1086,7 @@
 // Similar to ReferenceType except the Class couldn't be loaded. Assignability
 // and other tests made
 // of this type must be conservative.
-class UnresolvedReferenceType FINAL : public UnresolvedType {
+class UnresolvedReferenceType final : public UnresolvedType {
  public:
   UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1094,18 +1094,18 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 };
 
 // Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass FINAL : public UnresolvedType {
+class UnresolvedSuperClass final : public UnresolvedType {
  public:
   UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
                        uint16_t cache_id)
@@ -1116,19 +1116,19 @@
     CheckConstructorInvariants(this);
   }
 
-  bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+  bool IsUnresolvedSuperClass() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
   uint16_t GetUnresolvedSuperClassChildId() const {
     DCHECK(IsUnresolvedSuperClass());
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   const uint16_t unresolved_child_id_;
   const RegTypeCache* const reg_type_cache_;
@@ -1136,7 +1136,7 @@
 
 // A merge of unresolved (and resolved) types. If the types were resolved this may be
 // Conflict or another known ReferenceType.
-class UnresolvedMergedType FINAL : public UnresolvedType {
+class UnresolvedMergedType final : public UnresolvedType {
  public:
   // Note: the constructor will copy the unresolved BitVector, not use it directly.
   UnresolvedMergedType(const RegType& resolved,
@@ -1154,17 +1154,17 @@
     return unresolved_types_;
   }
 
-  bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
+  bool IsUnresolvedMergedReference() const override { return true; }
 
-  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+  bool IsUnresolvedTypes() const override { return true; }
 
-  bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsObjectArrayTypes() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
-  std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+  std::string Dump() const override REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
+  void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   const RegTypeCache* const reg_type_cache_;
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 15a38f3..0430d20 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -1042,7 +1042,7 @@
 
 class RegTypeOOMTest : public RegTypeTest {
  protected:
-  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
     SetUpRuntimeOptionsForFillHeap(options);
 
     // We must not appear to be a compiler, or we'll abort on the host.
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 8b66529..e726500 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -36,11 +36,11 @@
   static CodeSimulatorArm64* CreateCodeSimulatorArm64();
   virtual ~CodeSimulatorArm64();
 
-  void RunFrom(intptr_t code_buffer) OVERRIDE;
+  void RunFrom(intptr_t code_buffer) override;
 
-  bool GetCReturnBool() const OVERRIDE;
-  int32_t GetCReturnInt32() const OVERRIDE;
-  int64_t GetCReturnInt64() const OVERRIDE;
+  bool GetCReturnBool() const override;
+  int32_t GetCReturnInt32() const override;
+  int64_t GetCReturnInt64() const override;
 
  private:
   CodeSimulatorArm64();
diff --git a/test/167-visit-locks/visit_locks.cc b/test/167-visit-locks/visit_locks.cc
index e79c880..8955f5a 100644
--- a/test/167-visit-locks/visit_locks.cc
+++ b/test/167-visit-locks/visit_locks.cc
@@ -42,7 +42,7 @@
         : StackVisitor(thread, context, StackWalkKind::kIncludeInlinedFrames) {
     }
 
-    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
 
       // Ignore runtime methods.
diff --git a/test/1945-proxy-method-arguments/get_args.cc b/test/1945-proxy-method-arguments/get_args.cc
index 211ae10..859e229 100644
--- a/test/1945-proxy-method-arguments/get_args.cc
+++ b/test/1945-proxy-method-arguments/get_args.cc
@@ -27,7 +27,7 @@
 namespace {
 
 // Visit a proxy method Quick frame at a given depth.
-class GetProxyQuickFrameVisitor FINAL : public StackVisitor {
+class GetProxyQuickFrameVisitor final : public StackVisitor {
  public:
   GetProxyQuickFrameVisitor(art::Thread* target, art::Context* ctx, size_t frame_depth)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -36,7 +36,7 @@
         frame_depth_(frame_depth),
         quick_frame_(nullptr) {}
 
-  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (GetMethod()->IsRuntimeMethod()) {
       return true;
     }
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
index 0799b6e..424e9f1 100644
--- a/test/203-multi-checkpoint/multi_checkpoint.cc
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -28,7 +28,7 @@
   bool second_run;
   bool second_run_interleaved;
 
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
     if (!first_run_start) {
       CHECK(!second_run);
@@ -62,7 +62,7 @@
 }
 
 struct SetupClosure : public Closure {
-  void Run(Thread* self) OVERRIDE {
+  void Run(Thread* self) override {
     CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
     ScopedObjectAccess soa(self);
     MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 093a93f..93bb148 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -29,7 +29,7 @@
 
 namespace art {
 
-class TestFaultHandler FINAL : public FaultHandler {
+class TestFaultHandler final : public FaultHandler {
  public:
   explicit TestFaultHandler(FaultManager* manager)
       : FaultHandler(manager),
@@ -51,7 +51,7 @@
     manager_->RemoveHandler(this);
   }
 
-  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+  bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
     CHECK_EQ(sig, SIGSEGV);
     CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
              GetTargetPointer()) << "Segfault on unexpected address!";
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b17be6b..b5166ce 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -35,7 +35,7 @@
   explicit FindPointerAllocatorVisitor(void* ptr) : is_found(false), ptr_(ptr) {}
 
   bool Visit(LinearAlloc* alloc)
-      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+      REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) override {
     is_found = alloc->Contains(ptr_);
     return !is_found;
   }
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 57c0274..2a06a7b 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -87,7 +87,7 @@
     jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
                 jlong size ATTRIBUTE_UNUSED,
                 jlong* tag_ptr ATTRIBUTE_UNUSED,
-                jint length ATTRIBUTE_UNUSED) OVERRIDE {
+                jint length ATTRIBUTE_UNUSED) override {
       counter++;
       if (counter == stop_after) {
         return JVMTI_VISIT_ABORT;
@@ -120,7 +120,7 @@
     jintArray lengths) {
   class DataIterationConfig : public IterationConfig {
    public:
-    jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) OVERRIDE {
+    jint Handle(jlong class_tag, jlong size, jlong* tag_ptr, jint length) override {
       class_tags_.push_back(class_tag);
       sizes_.push_back(size);
       tags_.push_back(*tag_ptr);
@@ -164,7 +164,7 @@
     jint Handle(jlong class_tag ATTRIBUTE_UNUSED,
                 jlong size ATTRIBUTE_UNUSED,
                 jlong* tag_ptr,
-                jint length ATTRIBUTE_UNUSED) OVERRIDE {
+                jint length ATTRIBUTE_UNUSED) override {
       jlong current_tag = *tag_ptr;
       if (current_tag != 0) {
         *tag_ptr = current_tag + 10;
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index b07554c..b0e0f07 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -41,8 +41,6 @@
 
 using android::base::StringPrintf;
 
-#define FINAL final
-#define OVERRIDE override
 #define UNREACHABLE  __builtin_unreachable
 
 extern "C" JNIEXPORT void JNICALL Java_art_Test913_forceGarbageCollection(
@@ -144,7 +142,7 @@
     jint stop_after,
     jint follow_set,
     jobject jniRef) {
-  class PrintIterationConfig FINAL : public IterationConfig {
+  class PrintIterationConfig final : public IterationConfig {
    public:
     PrintIterationConfig(jint _stop_after, jint _follow_set)
         : counter_(0),
@@ -160,7 +158,7 @@
                 jlong* tag_ptr,
                 jlong* referrer_tag_ptr,
                 jint length,
-                void* user_data ATTRIBUTE_UNUSED) OVERRIDE {
+                void* user_data ATTRIBUTE_UNUSED) override {
       jlong tag = *tag_ptr;
 
       // Ignore any jni-global roots with untagged classes. These can be from the environment,
@@ -303,7 +301,7 @@
       }
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         char* name = nullptr;
         if (info_.jni_local.method != nullptr) {
           jvmti_env->GetMethodName(info_.jni_local.method, &name, nullptr, nullptr);
@@ -349,7 +347,7 @@
       }
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         char* name = nullptr;
         if (info_.stack_local.method != nullptr) {
           jvmti_env->GetMethodName(info_.stack_local.method, &name, nullptr, nullptr);
@@ -391,7 +389,7 @@
           : Elem(referrer, referree, size, length), string_(string) {}
 
      protected:
-      std::string PrintArrowType() const OVERRIDE {
+      std::string PrintArrowType() const override {
         return string_;
       }
 
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 192274e..97a589f 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -77,7 +77,7 @@
         prev_was_runtime_(true),
         require_deoptable_(require_deoptable) {}
 
-  virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+  virtual bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (goal_ == GetMethod()) {
       method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
       method_found_ = true;
diff --git a/test/ti-agent/ti_macros.h b/test/ti-agent/ti_macros.h
index d913383..a871270 100644
--- a/test/ti-agent/ti_macros.h
+++ b/test/ti-agent/ti_macros.h
@@ -19,8 +19,6 @@
 
 #include "android-base/macros.h"
 
-#define FINAL final
-#define OVERRIDE override
 #define UNREACHABLE  __builtin_unreachable
 
 #endif  // ART_TEST_TI_AGENT_TI_MACROS_H_
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index fc62410..500c1c5 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -93,7 +93,7 @@
   using Base = CmdlineArgs;
 
   virtual ParseStatus ParseCustom(const StringPiece& option,
-                                  std::string* error_msg) OVERRIDE {
+                                  std::string* error_msg) override {
     {
       ParseStatus base_parse = Base::ParseCustom(option, error_msg);
       if (base_parse != kParseUnknownArgument) {
@@ -119,7 +119,7 @@
     return kParseOk;
   }
 
-  virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+  virtual ParseStatus ParseChecks(std::string* error_msg) override {
     // Perform the parent checks.
     ParseStatus parent_checks = Base::ParseChecks(error_msg);
     if (parent_checks != kParseOk) {
@@ -166,16 +166,16 @@
 };
 
 struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
-  bool NeedsRuntime() OVERRIDE {
+  bool NeedsRuntime() override {
     return true;
   }
 
-  bool ExecuteWithoutRuntime() OVERRIDE {
+  bool ExecuteWithoutRuntime() override {
     LOG(FATAL) << "Unreachable";
     UNREACHABLE();
   }
 
-  bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+  bool ExecuteWithRuntime(Runtime* runtime) override {
     CHECK(args_ != nullptr);
 
     const size_t dex_reps = args_->dex_file_verifier_
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 3542d95..55d2f44 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -65,8 +65,8 @@
 // Analyze debug info sizes.
 class AnalyzeDebugInfo  : public Experiment {
  public:
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
   int64_t total_bytes_ = 0u;
@@ -91,8 +91,8 @@
 // Count numbers of dex indices.
 class CountDexIndices : public Experiment {
  public:
-  void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
+  void ProcessDexFile(const DexFile& dex_file) override;
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
 
   void Dump(std::ostream& os, uint64_t total_size) const;
 
@@ -162,9 +162,9 @@
 // Measure various code metrics including args per invoke-virtual, fill/spill move patterns.
 class CodeMetrics : public Experiment {
  public:
-  void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+  void ProcessDexFile(const DexFile& dex_file) override;
 
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
   static constexpr size_t kMaxArgCount = 6;
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
index a5c202e..3559afa 100644
--- a/tools/dexanalyze/dexanalyze_strings.h
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -32,8 +32,8 @@
 // Analyze string data and strings accessed from code.
 class AnalyzeStrings : public Experiment {
  public:
-  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
-  void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
+  void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
+  void Dump(std::ostream& os, uint64_t total_size) const override;
 
  private:
   int64_t wide_string_bytes_ = 0u;
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index bf8a1b7..6d9b6fb 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -235,7 +235,7 @@
   const bool is_method_;
 };
 
-class ClassPath FINAL {
+class ClassPath final {
  public:
   ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
     OpenDexFiles(dex_paths, open_writable);
@@ -316,7 +316,7 @@
   std::vector<std::unique_ptr<const DexFile>> dex_files_;
 };
 
-class HierarchyClass FINAL {
+class HierarchyClass final {
  public:
   HierarchyClass() {}
 
@@ -455,7 +455,7 @@
   std::vector<HierarchyClass*> extended_by_;
 };
 
-class Hierarchy FINAL {
+class Hierarchy final {
  public:
   explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
     BuildClassHierarchy();
@@ -559,7 +559,7 @@
   std::map<std::string, HierarchyClass> classes_;
 };
 
-class HiddenApi FINAL {
+class HiddenApi final {
  public:
   HiddenApi() {}
 
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index ed6ac3d..4ea5b2d 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -38,7 +38,7 @@
 static constexpr bool kNeedsInterpreter = false;
 #endif  // TRACEFAST_INITERPRETER
 
-class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+class Tracer final : public art::instrumentation::InstrumentationListener {
  public:
   Tracer() {}
 
@@ -46,40 +46,40 @@
                      art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                      art::ArtMethod* method ATTRIBUTE_UNUSED,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const art::JValue& return_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
                   art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                   art::ArtMethod* method ATTRIBUTE_UNUSED,
                   uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
                  art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                  art::ArtMethod* method ATTRIBUTE_UNUSED,
                  uint32_t dex_pc ATTRIBUTE_UNUSED,
                  art::ArtField* field ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -87,7 +87,7 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::ArtField* field ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
@@ -95,32 +95,32 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     art::ArtField* field ATTRIBUTE_UNUSED,
                     const art::JValue& field_value ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
                        art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
                         art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
               art::ArtMethod* method ATTRIBUTE_UNUSED,
               uint32_t dex_pc ATTRIBUTE_UNUSED,
               int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
                                 art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                                 art::ArtMethod* caller ATTRIBUTE_UNUSED,
                                 uint32_t dex_pc ATTRIBUTE_UNUSED,
                                 art::ArtMethod* callee ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
                        const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Tracer);
@@ -149,7 +149,7 @@
   TraceFastPhaseCB() {}
 
   void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
-      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
       art::ScopedThreadSuspension sts(art::Thread::Current(),
                                       art::ThreadState::kWaitingForMethodTracingStart);
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 9c86024..865b9df 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -192,8 +192,8 @@
     return uses_;
   }
 
-  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
-  void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+  void AnalyzeFieldSet(const Instruction& instruction) override;
 
  private:
   // List of reflection uses found, concrete and abstract.
@@ -212,8 +212,8 @@
     return uses_;
   }
 
-  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
-  void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+  RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) override;
+  void AnalyzeFieldSet(const Instruction& instruction) override;
 
  private:
   // List of reflection uses found, concrete and abstract.