ART: SHARED_REQUIRES to REQUIRES_SHARED

This coincides with the actual attribute name and upstream usage.
Preparation for deferring to libbase.

Test: m
Test: m test-art-host
Change-Id: Ia8986b5dfd926ba772bf00b0a35eaf83596d8518
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index c942375..5239121 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -47,12 +47,12 @@
   // Create an OatMethod based on pointers (for unit tests).
   OatFile::OatMethod CreateOatMethod(const void* code);
 
-  void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+  void MakeExecutable(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void MakeExecutable(const void* code_start, size_t code_length);
 
   void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
   virtual void SetUp();
@@ -81,17 +81,17 @@
   virtual void TearDown();
 
   void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+  void CompileMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                            const char* method_name, const char* signature)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                             const char* method_name, const char* signature)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U);
 
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a955f3c..9e5fb83 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -69,12 +69,12 @@
                           jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
                           ArtMethod* method ATTRIBUTE_UNUSED,
                           bool osr ATTRIBUTE_UNUSED)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return false;
   }
 
   virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
-     SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+     REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
   uint64_t GetMaximumCompilationTimeBeforeWarning() const {
     return maximum_compilation_time_before_warning_;
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index e8e278d..0a4f094 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -275,7 +275,7 @@
     owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
   }
 
-  void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void Write(const ArrayRef<mirror::Class*>& types) REQUIRES_SHARED(Locks::mutator_lock_) {
     using namespace dwarf;  // NOLINT. For easy access to DWARF constants.
 
     info_.StartTag(DW_TAG_compile_unit);
@@ -466,7 +466,7 @@
   // Linkage name uniquely identifies type.
   // It is used to determine the dynamic type of objects.
   // We use the methods_ field of class since it is unique and it is not moved by the GC.
-  void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void WriteLinkageName(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
     auto* methods_ptr = type->GetMethodsPtr();
     if (methods_ptr == nullptr) {
       // Some types might have no methods.  Allocate empty array instead.
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index b7e000a..5bfdd16 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -145,7 +145,7 @@
     InstructionSet isa,
     const InstructionSetFeatures* features,
     const ArrayRef<mirror::Class*>& types)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   VectorOutputStream out("Debug ELF file", &buffer);
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 6f52249..b0542c7 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -56,7 +56,7 @@
     InstructionSet isa,
     const InstructionSetFeatures* features,
     const ArrayRef<mirror::Class*>& types)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
 std::vector<MethodDebugInfo> MakeTrampolineInfos(const OatHeader& oat_header);
 
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index dbdfa24..43fc687 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -64,7 +64,7 @@
      * @return true if the method is a candidate for inlining, false otherwise.
      */
     bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
-        SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
 
     /**
      * Check whether a particular method index corresponds to an intrinsic or special function.
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 4f5ea76..1f69686 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@
     ~QuickCompilerCallbacks() { }
 
     void MethodVerified(verifier::MethodVerifier* verifier)
-        SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+        REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
 
     void ClassRejected(ClassReference ref) OVERRIDE;
 
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 1af11a8..6afd1ab 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,7 +43,7 @@
     ~VerificationResults();
 
     void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
-        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(!verified_methods_lock_);
 
     const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 495acf0..04331e5 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@
   typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
 
   static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   ~VerifiedMethod() = default;
 
   const DevirtualizationMap& GetDevirtMap() const {
@@ -100,15 +100,15 @@
 
   // Generate devirtualizaion map into devirt_map_.
   void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Generate dequickening map into dequicken_map_. Returns false if there is an error.
   bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Generate safe case set into safe_cast_set_.
   void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   DevirtualizationMap devirt_map_;
   // Dequicken map is required for compiling quickened byte codes. The quicken maps from
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 77ec4b7..39f01d8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -492,7 +492,7 @@
 static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
     Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
     const DexFile& dex_file, const DexFile::ClassDef& class_def)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   auto* const runtime = Runtime::Current();
   if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
     // Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
@@ -1026,7 +1026,7 @@
       std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
      : exceptions_to_resolve_(exceptions_to_resolve) {}
 
-  virtual bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  virtual bool operator()(mirror::Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
     for (auto& m : c->GetMethods(pointer_size)) {
       ResolveExceptionsForMethod(&m, pointer_size);
@@ -1036,7 +1036,7 @@
 
  private:
   void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
     if (code_item == nullptr) {
       return;  // native or abstract method
@@ -1080,7 +1080,7 @@
   explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
       : image_classes_(image_classes) {}
 
-  bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     std::string temp;
     image_classes_->insert(klass->GetDescriptor(&temp));
     return true;
@@ -1161,7 +1161,7 @@
 
 static void MaybeAddToImageClasses(Handle<mirror::Class> c,
                                    std::unordered_set<std::string>* image_classes)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   // Make a copy of the handle so that we don't clobber it doing Assign.
@@ -1216,7 +1216,7 @@
 
   // Visitor for VisitReferences.
   void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
     if (ref != nullptr) {
       VisitClinitClassesObject(ref);
@@ -1232,7 +1232,7 @@
       const {}
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
-  void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
+  void Walk() REQUIRES_SHARED(Locks::mutator_lock_) {
     // Use the initial classes as roots for a search.
     for (mirror::Class* klass_root : image_classes_) {
       VisitClinitClassesObject(klass_root);
@@ -1244,7 +1244,7 @@
    public:
     explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {}
 
-    bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
       const char* name = klass->GetDescriptor(&temp);
       if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1265,7 +1265,7 @@
 
   ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
                     ClassLinker* linker)
-      SHARED_REQUIRES(Locks::mutator_lock_) :
+      REQUIRES_SHARED(Locks::mutator_lock_) :
       image_class_descriptors_(image_class_descriptors), self_(self) {
     CHECK(linker != nullptr);
     CHECK(image_class_descriptors != nullptr);
@@ -1284,7 +1284,7 @@
   }
 
   void VisitClinitClassesObject(mirror::Object* object) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(object != nullptr);
     if (marked_objects_.find(object) != marked_objects_.end()) {
       // Already processed.
@@ -1983,7 +1983,7 @@
 // A fast version of SkipClass above if the class pointer is available
 // that avoids the expensive FindInClassPath search.
 static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(klass != nullptr);
   const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
   if (&dex_file != &original_dex_file) {
@@ -1998,7 +1998,7 @@
 }
 
 static void CheckAndClearResolveException(Thread* self)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   CHECK(self->IsExceptionPending());
   mirror::Throwable* exception = self->GetException();
   std::string temp;
@@ -2529,7 +2529,7 @@
 
 class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
  public:
-  virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  virtual bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
       return true;
     }
@@ -2546,7 +2546,7 @@
   }
 
  private:
-  void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void FillIMTAndConflictTables(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!klass->ShouldHaveImt()) {
       return;
     }
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2dd4651..fbc1edd 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -125,7 +125,7 @@
 
   // Compile a single Method.
   void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
-      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
 
   VerificationResults* GetVerificationResults() const {
@@ -199,7 +199,7 @@
 
   bool CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
                                         uint32_t type_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
       REQUIRES(!Locks::mutator_lock_);
@@ -208,7 +208,7 @@
   bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
                                   Handle<mirror::DexCache> dex_cache,
                                   uint32_t type_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Are runtime access and instantiable checks necessary in the code?
   // out_is_finalizable is set to whether the type is finalizable.
@@ -216,7 +216,7 @@
                                               Handle<mirror::DexCache> dex_cache,
                                               uint32_t type_idx,
                                               bool* out_is_finalizable)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
                           bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -230,23 +230,23 @@
 
   // Get the DexCache for the
   mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
                                       const DexCompilationUnit* mUnit)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Resolve compiling method's class. Returns null on failure.
   mirror::Class* ResolveCompilingMethodsClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::Class* ResolveClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
       const DexCompilationUnit* mUnit)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Resolve a field. Returns null on failure, including incompatible class change.
   // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -254,40 +254,40 @@
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t field_idx, bool is_static)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Resolve a field with a given dex file.
   ArtField* ResolveFieldWithDexFile(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
       uint32_t field_idx, bool is_static)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get declaration location of a resolved field.
   void GetResolvedFieldDexFileLocation(
       ArtField* resolved_field, const DexFile** declaring_dex_file,
       uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
-  MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsFieldVolatile(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
+  MemberOffset GetFieldOffset(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Find a dex cache for a dex file.
   inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
   std::pair<bool, bool> IsFastInstanceField(
       mirror::DexCache* dex_cache, mirror::Class* referrer_class,
       ArtField* resolved_field, uint16_t field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
   // of the declaring class in the referrer's dex file.
   std::pair<bool, bool> IsFastStaticField(
       mirror::DexCache* dex_cache, mirror::Class* referrer_class,
       ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return whether the declaring class of `resolved_method` is
   // available to `referrer_class`. If this is true, compute the type
@@ -299,34 +299,34 @@
                                                 ArtMethod* resolved_method,
                                                 uint16_t method_idx,
                                                 uint32_t* storage_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Is static field's in referrer's class?
   bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Is static field's class initialized?
   bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
                                       ArtField* resolved_field)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Resolve a method. Returns null on failure, including incompatible class change.
   ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get declaration location of a resolved field.
   void GetResolvedMethodDexFileLocation(
       ArtMethod* resolved_method, const DexFile** declaring_dex_file,
       uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the index in the vtable of the method.
   uint16_t GetResolvedMethodVTableIndex(
       ArtMethod* resolved_method, InvokeType type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
   // for ProcessedInvoke() and computes the necessary lowering info.
@@ -336,13 +336,13 @@
       mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
       MethodReference* target_method, const MethodReference* devirt_target,
       uintptr_t* direct_code, uintptr_t* direct_method)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Is method's class initialized for an invoke?
   // For static invokes to determine whether we need to consider potential call to <clinit>().
   // For non-static invokes, assuming a non-null reference, the class is always initialized.
   bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
   // dex cache arrays don't have a fixed layout.
@@ -357,7 +357,7 @@
                         ArtField** resolved_field,
                         mirror::Class** referrer_class,
                         mirror::DexCache** dex_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can we fast path instance field access? Computes field's offset and volatility.
   bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
@@ -368,7 +368,7 @@
                                              const DexCompilationUnit* mUnit,
                                              bool is_put,
                                              const ScopedObjectAccess& soa)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
 
   // Can we fastpath a interface, super class or virtual method call? Computes method's vtable
@@ -467,7 +467,7 @@
 
   // Can we assume that the klass is loaded?
   bool CanAssumeClassIsLoaded(mirror::Class* klass)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const {
     if (!kIsTargetBuild) {
@@ -497,7 +497,7 @@
                                                                  ArtMember* resolved_member,
                                                                  uint16_t member_idx,
                                                                  uint32_t* storage_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can `referrer_class` access the resolved `member`?
   // Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -509,13 +509,13 @@
                                       ArtMember* member,
                                       mirror::DexCache* dex_cache,
                                       uint32_t field_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Can we assume that the klass is initialized?
   bool CanAssumeClassIsInitialized(mirror::Class* klass)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
   // The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -546,7 +546,7 @@
                                      /*out*/int* stats_flags,
                                      MethodReference* target_method,
                                      uintptr_t* direct_code, uintptr_t* direct_method)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   void PreCompile(jobject class_loader,
@@ -605,7 +605,7 @@
 
   void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
   static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void Compile(jobject class_loader,
                const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index bb45999..7634510 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -111,7 +111,7 @@
 }
 
 static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   Class* klass = obj->GetClass();
   CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
 }
@@ -687,7 +687,7 @@
 
 class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
  public:
-  bool operator()(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool operator()(Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(Thread::Current());
     mirror::Class::ComputeName(hs.NewHandle(c));
     return true;
@@ -700,7 +700,7 @@
   class_linker->VisitClassesWithoutClassesLock(&visitor);
 }
 
-static bool IsBootClassLoaderClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+static bool IsBootClassLoaderClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
   return klass->GetClassLoader() == nullptr;
 }
 
@@ -828,7 +828,7 @@
  public:
   explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
 
-  bool operator()(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool operator()(Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!image_writer_->KeepClass(klass)) {
       classes_to_prune_.insert(klass);
     }
@@ -1603,7 +1603,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       *roots[i] = image_writer_->GetImageAddress(*roots[i]);
     }
@@ -1611,7 +1611,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
     }
@@ -1864,7 +1864,7 @@
 
   // java.lang.ref.Reference visitor.
   void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
         mirror::Reference::ReferentOffset(),
         image_writer_->GetImageAddress(ref->GetReferent()));
@@ -1888,7 +1888,7 @@
 
   void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
                   mirror::Reference* ref ATTRIBUTE_UNUSED) const
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     LOG(FATAL) << "Reference not expected here.";
   }
 };
@@ -1904,14 +1904,14 @@
 }
 
 template <typename T>
-std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(T* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
   std::ostringstream oss;
   oss << ptr;
   return oss.str();
 }
 
 template <>
-std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
   return PrettyMethod(method);
 }
 
@@ -1945,7 +1945,7 @@
   explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
 
   template <typename T>
-  T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     return image_writer_->NativeLocationInImage(ptr);
   }
 
@@ -2023,7 +2023,7 @@
   explicit ImageAddressVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
 
   template <typename T>
-  T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     return image_writer_->GetImageAddress(ptr);
   }
 
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 7d13656..76749cf 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -77,7 +77,7 @@
   }
 
   template <typename T>
-  T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
     if (object == nullptr || IsInBootImage(object)) {
       return object;
     } else {
@@ -87,11 +87,11 @@
     }
   }
 
-  ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <typename PtrType>
   PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
-      const SHARED_REQUIRES(Locks::mutator_lock_) {
+      const REQUIRES_SHARED(Locks::mutator_lock_) {
     auto oat_it = dex_file_oat_index_map_.find(dex_file);
     DCHECK(oat_it != dex_file_oat_index_map_.end());
     const ImageInfo& image_info = GetImageInfo(oat_it->second);
@@ -132,7 +132,7 @@
 
   // Get the index of the oat file containing the dex file served by the dex cache.
   size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Update the oat layout for the given oat file.
   // This will make the oat_offset for the next oat file valid.
@@ -147,7 +147,7 @@
   bool AllocMemory();
 
   // Mark the objects defined in this space in the given live bitmap.
-  void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
+  void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Classify different kinds of bins that objects end up getting packed into during image writing.
   // Ordered from dirtiest to cleanest (until ArtMethods).
@@ -311,34 +311,34 @@
 
   // We use the lock word to store the offset of the object in the image.
   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void SetImageOffset(mirror::Object* object, size_t offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsImageOffsetAssigned(mirror::Object* object) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
-  void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+  void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
+  void AssignImageBinSlot(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsImageBinSlotAssigned(mirror::Object* object) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
   }
 
   mirror::Object* GetLocalAddress(mirror::Object* object) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     size_t offset = GetImageOffset(object);
     size_t oat_index = GetOatIndex(object);
     const ImageInfo& image_info = GetImageInfo(oat_index);
@@ -358,94 +358,94 @@
   }
 
   // Returns true if the class was in the original requested image classes list.
-  bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Debug aid that list of requested image classes.
   void DumpImageClasses();
 
   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
   void ComputeLazyFieldsForImageClasses()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Remove unwanted classes from various roots.
-  void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
+  void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Verify unwanted classes removed.
-  void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
+  void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Lays out where the image objects will be at runtime.
   void CalculateNewObjectOffsets()
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void CreateHeader(size_t oat_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void CalculateObjectBinSlots(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void UnbinObjectsIntoOffset(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void WalkFieldsInOrder(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   static void WalkFieldsCallback(mirror::Object* obj, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Creates the contiguous image in memory and adjusts pointers.
-  void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
-  void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
+  void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
+  void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupClass(mirror::Class* orig, mirror::Class* copy)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupObject(mirror::Object* orig, mirror::Object* copy)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupPointerArray(mirror::Object* dst,
                          mirror::PointerArray* arr,
                          mirror::Class* klass,
                          Bin array_type)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
   const uint8_t* GetQuickCode(ArtMethod* method,
                               const ImageInfo& image_info,
                               bool* quick_is_interpreted)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
 
   // Return true if a method is likely to be dirtied at runtime.
-  bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
+  bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Assign the offset for an ArtMethod.
   void AssignMethodOffset(ArtMethod* method,
                           NativeObjectRelocationType type,
                           size_t oat_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
+  void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
   // relocation.
   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if klass is loaded by the boot class loader but not in the boot image.
-  bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if klass depends on a boot class loader non image class. We want to prune these
   // classes since we do not want any boot class loader classes in the image. This means that
@@ -453,25 +453,25 @@
   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
   // driver.
   bool PruneAppImageClass(mirror::Class* klass)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // early_exit is true if we had a cyclic dependency anywhere down the chain.
   bool PruneAppImageClassInternal(mirror::Class* klass,
                                   bool* early_exit,
                                   std::unordered_set<mirror::Class*>* visited)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
 
-  uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+  uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Location of where the object will be when the image is loaded at runtime.
   template <typename T>
-  T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+  T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Location of where the temporary copy of the object currently is.
   template <typename T>
-  T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+  T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true of obj is inside of the boot image space. This may only return true if we are
   // compiling an app image.
@@ -481,7 +481,7 @@
   bool IsInBootOatFile(const void* ptr) const;
 
   // Get the index of the oat file associated with the object.
-  size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The oat index for shared data in multi-image and all data in single-image compilation.
   size_t GetDefaultOatIndex() const {
@@ -498,7 +498,7 @@
 
   // Find an already strong interned string in the other images or in the boot image. Used to
   // remove duplicates in the multi image and app image case.
-  mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if there already exists a native allocation for an object.
   bool NativeRelocationAssigned(void* ptr) const;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 6f6a8f5..7246ace 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -58,14 +58,14 @@
 
 extern "C" bool jit_compile_method(
     void* handle, ArtMethod* method, Thread* self, bool osr)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
   DCHECK(jit_compiler != nullptr);
   return jit_compiler->CompileMethod(self, method, osr);
 }
 
 extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
   DCHECK(jit_compiler != nullptr);
   if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 533dccf..18e3155 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
 
   // Compilation entrypoint. Returns whether the compilation succeeded.
   bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   CompilerOptions* GetCompilerOptions() const {
     return compiler_options_.get();
@@ -59,7 +59,7 @@
   // This is in the compiler since the runtime doesn't have access to the compiled method
   // structures.
   bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(JitCompiler);
 };
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index bf53bb2..b1e3811 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -60,7 +60,7 @@
   void CheckMethod(ArtMethod* method,
                    const OatFile::OatMethod& oat_method,
                    const DexFile& dex_file)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     const CompiledMethod* compiled_method =
         compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
                                                             method->GetDexMethodIndex()));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8a80982..2095608 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -684,7 +684,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -845,7 +845,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -887,7 +887,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     const DexFile::TypeId& type_id =
         dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
     const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
@@ -974,7 +974,7 @@
   }
 
   bool StartClass(const DexFile* dex_file, size_t class_def_index)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     OatDexMethodVisitor::StartClass(dex_file, class_def_index);
     if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
       dex_cache_ = class_linker_->FindDexCache(Thread::Current(), *dex_file);
@@ -983,7 +983,7 @@
     return true;
   }
 
-  bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool EndClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     bool result = OatDexMethodVisitor::EndClass();
     if (oat_class_index_ == writer_->oat_classes_.size()) {
       DCHECK(result);  // OatDexMethodVisitor::EndClass() never fails.
@@ -997,7 +997,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -1138,7 +1138,7 @@
   }
 
   ArtMethod* GetTargetMethod(const LinkerPatch& patch)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     MethodReference ref = patch.TargetMethod();
     mirror::DexCache* dex_cache =
         (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
@@ -1149,7 +1149,7 @@
     return method;
   }
 
-  uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t GetTargetOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t target_offset = writer_->relative_patcher_->GetOffset(patch.TargetMethod());
     // If there's no new compiled code, either we're compiling an app and the target method
     // is in the boot image, or we need to point to the correct trampoline.
@@ -1175,20 +1175,20 @@
   }
 
   mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return (target_dex_file == dex_file_)
         ? dex_cache_
         : class_linker_->FindDexCache(Thread::Current(), *target_dex_file);
   }
 
-  mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
     mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
     CHECK(type != nullptr);
     return type;
   }
 
-  mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
     StackHandleScope<1> hs(soa.Self());
     ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -1202,7 +1202,7 @@
     return string;
   }
 
-  uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t GetDexCacheOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (writer_->HasBootImage()) {
       uintptr_t element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<uintptr_t>(
           patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -1215,7 +1215,7 @@
     }
   }
 
-  uint32_t GetTargetObjectOffset(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t GetTargetObjectOffset(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(writer_->HasBootImage());
     object = writer_->image_writer_->GetImageAddress(object);
     size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
@@ -1225,7 +1225,7 @@
   }
 
   void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (writer_->HasBootImage()) {
       object = writer_->image_writer_->GetImageAddress(object);
     } else {
@@ -1245,7 +1245,7 @@
   }
 
   void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     if (writer_->HasBootImage()) {
       method = writer_->image_writer_->GetImageMethodAddress(method);
     } else if (kIsDebugBuild) {
@@ -1273,7 +1273,7 @@
   }
 
   void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t address = target_offset;
     if (writer_->HasBootImage()) {
       size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 451aa38..1e5f0b6 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -109,7 +109,7 @@
 }
 
 static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
 }
 
@@ -119,7 +119,7 @@
  * Return nullptr if the runtime target cannot be proven.
  */
 static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (IsMethodOrDeclaringClassFinal(resolved_method)) {
     // No need to lookup further, the resolved method will be the target.
     return resolved_method;
@@ -189,7 +189,7 @@
 static uint32_t FindMethodIndexIn(ArtMethod* method,
                                   const DexFile& dex_file,
                                   uint32_t name_and_signature_index)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (IsSameDexFile(*method->GetDexFile(), dex_file)) {
     return method->GetDexMethodIndex();
   } else {
@@ -200,7 +200,7 @@
 static uint32_t FindClassIndexIn(mirror::Class* cls,
                                  const DexFile& dex_file,
                                  Handle<mirror::DexCache> dex_cache)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   uint32_t index = DexFile::kDexNoIndex;
   if (cls->GetDexCache() == nullptr) {
     DCHECK(cls->IsArrayClass()) << PrettyClass(cls);
@@ -894,7 +894,7 @@
 
 static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction,
                                                    size_t arg_vreg_index)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   size_t input_index = 0;
   for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) {
     DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments());
@@ -1030,7 +1030,7 @@
 HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
                                                     uint32_t field_index,
                                                     HInstruction* obj)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
   ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
   DCHECK(resolved_field != nullptr);
@@ -1058,7 +1058,7 @@
                                                     uint32_t field_index,
                                                     HInstruction* obj,
                                                     HInstruction* value)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
   ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
   DCHECK(resolved_field != nullptr);
@@ -1374,7 +1374,7 @@
 static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
                                       bool declared_can_be_null,
                                       HInstruction* actual_obj)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (declared_can_be_null && !actual_obj->CanBeNull()) {
     return true;
   }
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 02d3a5f..486626b 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -64,12 +64,12 @@
   // reference type propagation can run after the inlining. If the inlining is successful, this
   // method will replace and remove the `invoke_instruction`.
   bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryBuildAndInline(HInvoke* invoke_instruction,
                          ArtMethod* resolved_method,
                          HInstruction** return_replacement)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
                                ArtMethod* resolved_method,
@@ -86,7 +86,7 @@
   bool TryPatternSubstitution(HInvoke* invoke_instruction,
                               ArtMethod* resolved_method,
                               HInstruction** return_replacement)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a new HInstanceFieldGet.
   HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
@@ -105,38 +105,38 @@
   bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
                                 ArtMethod* resolved_method,
                                 const InlineCache& ic)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to inline targets of a polymorphic call.
   bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
                                 ArtMethod* resolved_method,
                                 const InlineCache& ic)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
                                             ArtMethod* resolved_method,
                                             const InlineCache& ic)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
 
   HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
                                            HInstruction* receiver,
                                            uint32_t dex_pc) const
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   void FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Creates an instance of ReferenceTypeInfo from `klass` if `klass` is
   // admissible (see ReferenceTypePropagation::IsAdmissible for details).
   // Otherwise returns inexact Object RTI.
-  ReferenceTypeInfo GetClassRTI(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  ReferenceTypeInfo GetClassRTI(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* resolved_method)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Add a type guard on the given `receiver`. This will add to the graph:
   // i0 = HFieldGet(receiver, klass)
@@ -154,7 +154,7 @@
                              bool is_referrer,
                              HInstruction* invoke_instruction,
                              bool with_deoptimization)
-    SHARED_REQUIRES(Locks::mutator_lock_);
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
    * Ad-hoc implementation for implementing a diamond pattern in the graph for
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e5dab56..453068b 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -957,7 +957,7 @@
 }
 
 static bool IsSubClass(mirror::Class* to_test, mirror::Class* super_class)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class);
 }
 
@@ -1607,7 +1607,7 @@
 }
 
 static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (cls.Get() == nullptr) {
     return TypeCheckKind::kUnresolvedCheck;
   } else if (cls->IsInterface()) {
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 517cf76..aa34ddd 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -103,7 +103,7 @@
   bool NeedsAccessCheck(uint32_t type_index,
                         Handle<mirror::DexCache> dex_cache,
                         /*out*/bool* finalizable) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool NeedsAccessCheck(uint32_t type_index, /*out*/bool* finalizable) const;
 
   template<typename T>
@@ -255,14 +255,14 @@
       ArtMethod* method,
       uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Build a HNewInstance instruction.
   bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc);
 
   // Return whether the compiler can assume `cls` is initialized.
   bool IsInitialized(Handle<mirror::Class> cls) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to resolve a method using the class linker. Return null if a method could
   // not be resolved.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 2808e1b..8f37236 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2242,7 +2242,7 @@
 }
 
 static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (rti.IsValid()) {
     DCHECK(upper_bound_rti.IsSupertypeOf(rti))
         << " upper_bound_rti: " << upper_bound_rti
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index dfa8276..62e39f5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,7 +171,7 @@
 
   static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
 
-  static ReferenceTypeInfo Create(TypeHandle type_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+  static ReferenceTypeInfo Create(TypeHandle type_handle) REQUIRES_SHARED(Locks::mutator_lock_) {
     return Create(type_handle, type_handle->CannotBeAssignedFromOtherTypes());
   }
 
@@ -191,49 +191,49 @@
 
   bool IsExact() const { return is_exact_; }
 
-  bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsObjectClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsObjectClass();
   }
 
-  bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsStringClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsStringClass();
   }
 
-  bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsObjectArray() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
   }
 
-  bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsInterface() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsInterface();
   }
 
-  bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsArrayClass();
   }
 
-  bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsPrimitiveArray();
   }
 
-  bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsNonPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
   }
 
-  bool CanArrayHold(ReferenceTypeInfo rti)  const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool CanArrayHold(ReferenceTypeInfo rti)  const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     if (!IsExact()) return false;
     if (!IsArrayClass()) return false;
     return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
   }
 
-  bool CanArrayHoldValuesOf(ReferenceTypeInfo rti)  const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool CanArrayHoldValuesOf(ReferenceTypeInfo rti)  const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     if (!IsExact()) return false;
     if (!IsArrayClass()) return false;
@@ -244,13 +244,13 @@
 
   Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
 
-  bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     DCHECK(rti.IsValid());
     return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
   }
 
-  bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsValid());
     DCHECK(rti.IsValid());
     return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
@@ -260,7 +260,7 @@
   // Returns true if the type information provide the same amount of details.
   // Note that it does not mean that the instructions have the same actual type
   // (because the type can be the result of a merge).
-  bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsEqual(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!IsValid() && !rti.IsValid()) {
       // Invalid types are equal.
       return true;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e98b4d..c5d7611 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -294,7 +294,7 @@
   }
 
   uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
         InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
   }
@@ -311,7 +311,7 @@
 
   bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
       OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   void RunOptimizations(HGraph* graph,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index e96ab19..4289cf7 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -27,7 +27,7 @@
 static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
                                                      const DexFile& dex_file,
                                                      Handle<mirror::DexCache> hint_dex_cache)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
     return hint_dex_cache.Get();
   } else {
@@ -85,7 +85,7 @@
   void VisitParameterValue(HParameterValue* instr) OVERRIDE;
   void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
   void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
   void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
   void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
   void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -194,7 +194,7 @@
                                   ReferenceTypeInfo upper_bound,
                                   HInstruction* dominator_instr,
                                   HBasicBlock* dominator_block)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   // If the position where we should insert the bound type is not already a
   // a bound type then we need to create one.
   if (position == nullptr || !position->IsBoundType()) {
@@ -487,7 +487,7 @@
                                            const DexFile& dex_file,
                                            uint16_t type_idx,
                                            Handle<mirror::DexCache> hint_dex_cache)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
   // Get type from dex cache assuming it was populated by the verifier.
   return dex_cache->GetResolvedType(type_idx);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index edd83bf..1fa6624 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -44,7 +44,7 @@
 
   // Returns true if klass is admissible to the propagation: non-null and resolved.
   // For an array type, we also check if the component type is admissible.
-  static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+  static bool IsAdmissible(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
     return klass != nullptr &&
            klass->IsResolved() &&
            (!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
@@ -58,7 +58,7 @@
     explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
 
     template <typename T>
-    MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+    MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
       return handles_->NewHandle(object);
     }
 
@@ -80,8 +80,8 @@
 
   void VisitPhi(HPhi* phi);
   void VisitBasicBlock(HBasicBlock* block);
-  void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
-  void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
+  void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+  void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
   void BoundTypeForIfNotNull(HBasicBlock* block);
   void BoundTypeForIfInstanceOf(HBasicBlock* block);
   void ProcessWorklist();
@@ -92,10 +92,10 @@
   bool UpdateReferenceTypeInfo(HInstruction* instr);
 
   static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void ValidateTypes();
 
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 7649b50..75a4eac 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -46,7 +46,7 @@
 
   // Relay method to merge type in reference type propagation.
   ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
-                               const ReferenceTypeInfo& b) SHARED_REQUIRES(Locks::mutator_lock_) {
+                               const ReferenceTypeInfo& b) REQUIRES_SHARED(Locks::mutator_lock_) {
     return propagation_->MergeTypes(a, b);
   }
 
@@ -56,12 +56,12 @@
   }
 
   // Helper method to construct the Object type.
-  ReferenceTypeInfo ObjectType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+  ReferenceTypeInfo ObjectType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
     return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetObjectClassHandle(), is_exact);
   }
 
   // Helper method to construct the String type.
-  ReferenceTypeInfo StringType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+  ReferenceTypeInfo StringType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
     return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetStringClassHandle(), is_exact);
   }
 
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5a574d9..f7dc112 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -303,7 +303,7 @@
 }
 
 static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   ReferenceTypeInfo array_type = array->GetReferenceTypeInfo();
   DCHECK(array_type.IsPrimitiveArrayClass());
   return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType();