Move to newer clang annotations

Also enable -Wthread-safety-negative.

Changes:
Switch to capabilities and negative capabilities.

Future work:
Use capabilities to implement uninterruptible annotations to work
with AssertNoThreadSuspension.

Bug: 20072211

Change-Id: I42fcbe0300d98a831c89d1eff3ecd5a7e99ebf33
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 5d4feb8..2c552e6 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -132,7 +132,7 @@
   -DNVALGRIND
 
 # Warn about thread safety violations with clang.
-art_clang_cflags := -Wthread-safety
+art_clang_cflags := -Wthread-safety -Wthread-safety-negative
 
 # Warn if switch fallthroughs aren't annotated.
 art_clang_cflags += -Wimplicit-fallthrough
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index d215662..dc2bc5c 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -46,12 +46,12 @@
   // Create an OatMethod based on pointers (for unit tests).
   OatFile::OatMethod CreateOatMethod(const void* code);
 
-  void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void MakeExecutable(const void* code_start, size_t code_length);
 
   void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
   virtual void SetUp();
@@ -76,17 +76,17 @@
   virtual void TearDown();
 
   void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                            const char* method_name, const char* signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                             const char* method_name, const char* signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ReserveImageSpace();
 
diff --git a/compiler/compiler.h b/compiler/compiler.h
index e5d1aff..fcd3434 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -58,7 +58,7 @@
                                      const DexFile& dex_file) const = 0;
 
   virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+     SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   uint64_t GetMaximumCompilationTimeBeforeWarning() const {
     return maximum_compilation_time_before_warning_;
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index e4570fd..04c58ac 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -135,7 +135,7 @@
   // with IGET/IPUT. For fast path fields, retrieve the field offset.
   static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
                       MirIFieldLoweringInfo* field_infos, size_t count)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Construct an unresolved instance field lowering info.
   explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
@@ -192,7 +192,7 @@
   // and the type index of the declaring class in the compiled method's dex file.
   static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
                       MirSFieldLoweringInfo* field_infos, size_t count)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Construct an unresolved static field lowering info.
   explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 946c74b..4512f35 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -99,7 +99,7 @@
   // path methods, retrieve the method's vtable index and direct code and method when applicable.
   static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
                       MirMethodLoweringInfo* method_infos, size_t count)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
       : MirMethodInfo(method_idx,
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 26b41bf..a8cb9f0 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -62,49 +62,49 @@
      * @return true if the method is a candidate for inlining, false otherwise.
      */
     bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
     /**
      * Check whether a particular method index corresponds to an intrinsic or special function.
      */
-    InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+    InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_);
 
     /**
      * Check whether a particular method index corresponds to an intrinsic function.
      */
-    bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) LOCKS_EXCLUDED(lock_);
+    bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_);
 
     /**
      * Generate code for an intrinsic function invocation.
      */
-    bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_);
+    bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_);
 
     /**
      * Check whether a particular method index corresponds to a special function.
      */
-    bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+    bool IsSpecial(uint32_t method_index) REQUIRES(!lock_);
 
     /**
      * Generate code for a special function.
      */
-    bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_);
+    bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_);
 
     /**
      * Try to inline an invoke.
      */
     bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx)
-        LOCKS_EXCLUDED(lock_);
+        REQUIRES(!lock_);
 
     /**
      * Gets the thread pointer entrypoint offset for a string init method index and pointer size.
      */
     uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
-        LOCKS_EXCLUDED(lock_);
+        REQUIRES(!lock_);
 
     /**
      * Check whether a particular method index is a string init.
      */
-    bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+    bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_);
 
     /**
      * To avoid multiple lookups of a class by its descriptor, we cache its
@@ -351,11 +351,11 @@
      *
      * Only DexFileToMethodInlinerMap may call this function to initialize the inliner.
      */
-    void FindIntrinsics(const DexFile* dex_file) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+    void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_);
 
     friend class DexFileToMethodInlinerMap;
 
-    bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_);
+    bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_);
 
     static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
                                MIR* move_result, const InlineMethod& method);
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 43dd578..4a39ab3 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -50,7 +50,7 @@
                              const DexFile& dex_file) const OVERRIDE;
 
   uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
 
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index d692d26..03bf57b 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@
     ~QuickCompilerCallbacks() { }
 
     bool MethodVerified(verifier::MethodVerifier* verifier)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+        SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
 
     void ClassRejected(ClassReference ref) OVERRIDE;
 
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 7fc2a23..9934f6b 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,15 +43,15 @@
     ~VerificationResults();
 
     bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        LOCKS_EXCLUDED(verified_methods_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(!verified_methods_lock_);
 
     const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
-        LOCKS_EXCLUDED(verified_methods_lock_);
-    void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_);
+        REQUIRES(!verified_methods_lock_);
+    void RemoveVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_);
 
-    void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
-    bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_);
+    void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+    bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
 
     bool IsCandidateForCompilation(MethodReference& method_ref,
                                    const uint32_t access_flags);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index bf11839..f7d6d67 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@
   typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
 
   static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ~VerifiedMethod() = default;
 
   const std::vector<uint8_t>& GetDexGcMap() const {
@@ -107,15 +107,15 @@
 
   // Generate devirtualizaion map into devirt_map_.
   void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Generate dequickening map into dequicken_map_. Returns false if there is an error.
   bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Generate safe case set into safe_cast_set_.
   void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   std::vector<uint8_t> dex_gc_map_;
   DevirtualizationMap devirt_map_;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a52bfae..05705a2 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -167,69 +167,69 @@
 #define STATS_LOCK()
 #endif
 
-  void TypeInDexCache() {
+  void TypeInDexCache() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     types_in_dex_cache_++;
   }
 
-  void TypeNotInDexCache() {
+  void TypeNotInDexCache() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     types_not_in_dex_cache_++;
   }
 
-  void StringInDexCache() {
+  void StringInDexCache() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     strings_in_dex_cache_++;
   }
 
-  void StringNotInDexCache() {
+  void StringNotInDexCache() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     strings_not_in_dex_cache_++;
   }
 
-  void TypeDoesntNeedAccessCheck() {
+  void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     resolved_types_++;
   }
 
-  void TypeNeedsAccessCheck() {
+  void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     unresolved_types_++;
   }
 
-  void ResolvedInstanceField() {
+  void ResolvedInstanceField() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     resolved_instance_fields_++;
   }
 
-  void UnresolvedInstanceField() {
+  void UnresolvedInstanceField() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     unresolved_instance_fields_++;
   }
 
-  void ResolvedLocalStaticField() {
+  void ResolvedLocalStaticField() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     resolved_local_static_fields_++;
   }
 
-  void ResolvedStaticField() {
+  void ResolvedStaticField() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     resolved_static_fields_++;
   }
 
-  void UnresolvedStaticField() {
+  void UnresolvedStaticField() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     unresolved_static_fields_++;
   }
 
   // Indicate that type information from the verifier led to devirtualization.
-  void PreciseTypeDevirtualization() {
+  void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     type_based_devirtualization_++;
   }
 
   // Indicate that a method of the given type was resolved at compile time.
-  void ResolvedMethod(InvokeType type) {
+  void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
     DCHECK_LE(type, kMaxInvokeType);
     STATS_LOCK();
     resolved_methods_[type]++;
@@ -237,7 +237,7 @@
 
   // Indicate that a method of the given type was unresolved at compile time as it was in an
   // unknown dex file.
-  void UnresolvedMethod(InvokeType type) {
+  void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) {
     DCHECK_LE(type, kMaxInvokeType);
     STATS_LOCK();
     unresolved_methods_[type]++;
@@ -245,27 +245,27 @@
 
   // Indicate that a type of virtual method dispatch has been converted into a direct method
   // dispatch.
-  void VirtualMadeDirect(InvokeType type) {
+  void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) {
     DCHECK(type == kVirtual || type == kInterface || type == kSuper);
     STATS_LOCK();
     virtual_made_direct_[type]++;
   }
 
   // Indicate that a method of the given type was able to call directly into boot.
-  void DirectCallsToBoot(InvokeType type) {
+  void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
     DCHECK_LE(type, kMaxInvokeType);
     STATS_LOCK();
     direct_calls_to_boot_[type]++;
   }
 
   // Indicate that a method of the given type was able to be resolved directly from boot.
-  void DirectMethodsToBoot(InvokeType type) {
+  void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) {
     DCHECK_LE(type, kMaxInvokeType);
     STATS_LOCK();
     direct_methods_to_boot_[type]++;
   }
 
-  void ProcessedInvoke(InvokeType type, int flags) {
+  void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) {
     STATS_LOCK();
     if (flags == 0) {
       unresolved_methods_[type]++;
@@ -290,13 +290,13 @@
   }
 
   // A check-cast could be eliminated due to verifier type analysis.
-  void SafeCast() {
+  void SafeCast() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     safe_casts_++;
   }
 
   // A check-cast couldn't be eliminated due to verifier type analysis.
-  void NotASafeCast() {
+  void NotASafeCast() REQUIRES(!stats_lock_) {
     STATS_LOCK();
     not_safe_casts_++;
   }
@@ -692,7 +692,7 @@
 
 static void ResolveExceptionsForMethod(
     ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
   if (code_item == nullptr) {
     return;  // native or abstract method
@@ -729,7 +729,7 @@
 }
 
 static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   auto* exceptions_to_resolve =
       reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
   const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -743,7 +743,7 @@
 }
 
 static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::unordered_set<std::string>* image_classes =
       reinterpret_cast<std::unordered_set<std::string>*>(arg);
   std::string temp;
@@ -752,8 +752,7 @@
 }
 
 // Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
   CHECK(timings != nullptr);
   if (!IsImage()) {
     return;
@@ -819,7 +818,7 @@
 
 static void MaybeAddToImageClasses(Handle<mirror::Class> c,
                                    std::unordered_set<std::string>* image_classes)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   // Make a copy of the handle so that we don't clobber it doing Assign.
@@ -876,7 +875,7 @@
 
   // Visitor for VisitReferences.
   void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
     if (ref != nullptr) {
       VisitClinitClassesObject(ref);
@@ -887,7 +886,7 @@
   void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const {
   }
 
-  void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Use the initial classes as roots for a search.
     for (mirror::Class* klass_root : image_classes_) {
       VisitClinitClassesObject(klass_root);
@@ -897,7 +896,7 @@
  private:
   ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
                     ClassLinker* linker)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+      SHARED_REQUIRES(Locks::mutator_lock_) :
       image_class_descriptors_(image_class_descriptors), self_(self) {
     CHECK(linker != nullptr);
     CHECK(image_class_descriptors != nullptr);
@@ -915,7 +914,7 @@
   }
 
   static bool FindImageClasses(mirror::Class* klass, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg);
     std::string temp;
     const char* name = klass->GetDescriptor(&temp);
@@ -933,7 +932,7 @@
   }
 
   void VisitClinitClassesObject(mirror::Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(object != nullptr);
     if (marked_objects_.find(object) != marked_objects_.end()) {
       // Already processed.
@@ -1569,10 +1568,14 @@
   return result;
 }
 
+class CompilationVisitor {
+ public:
+  virtual ~CompilationVisitor() {}
+  virtual void Visit(size_t index) = 0;
+};
+
 class ParallelCompilationManager {
  public:
-  typedef void Callback(const ParallelCompilationManager* manager, size_t index);
-
   ParallelCompilationManager(ClassLinker* class_linker,
                              jobject class_loader,
                              CompilerDriver* compiler,
@@ -1610,14 +1613,15 @@
     return dex_files_;
   }
 
-  void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
+  void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units)
+      REQUIRES(!*Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     self->AssertNoPendingException();
     CHECK_GT(work_units, 0U);
 
     index_.StoreRelaxed(begin);
     for (size_t i = 0; i < work_units; ++i) {
-      thread_pool_->AddTask(self, new ForAllClosure(this, end, callback));
+      thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor));
     }
     thread_pool_->StartWorkers(self);
 
@@ -1636,10 +1640,10 @@
  private:
   class ForAllClosure : public Task {
    public:
-    ForAllClosure(ParallelCompilationManager* manager, size_t end, Callback* callback)
+    ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor)
         : manager_(manager),
           end_(end),
-          callback_(callback) {}
+          visitor_(visitor) {}
 
     virtual void Run(Thread* self) {
       while (true) {
@@ -1647,7 +1651,7 @@
         if (UNLIKELY(index >= end_)) {
           break;
         }
-        callback_(manager_, index);
+        visitor_->Visit(index);
         self->AssertNoPendingException();
       }
     }
@@ -1659,7 +1663,7 @@
    private:
     ParallelCompilationManager* const manager_;
     const size_t end_;
-    Callback* const callback_;
+    CompilationVisitor* const visitor_;
   };
 
   AtomicInteger index_;
@@ -1676,7 +1680,7 @@
 // A fast version of SkipClass above if the class pointer is available
 // that avoids the expensive FindInClassPath search.
 static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(klass != nullptr);
   const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
   if (&dex_file != &original_dex_file) {
@@ -1691,7 +1695,7 @@
 }
 
 static void CheckAndClearResolveException(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(self->IsExceptionPending());
   mirror::Throwable* exception = self->GetException();
   std::string temp;
@@ -1717,134 +1721,148 @@
   self->ClearException();
 }
 
-static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager,
-                                         size_t class_def_index)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
-  ATRACE_CALL();
-  Thread* self = Thread::Current();
-  jobject jclass_loader = manager->GetClassLoader();
-  const DexFile& dex_file = *manager->GetDexFile();
-  ClassLinker* class_linker = manager->GetClassLinker();
+class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
+ public:
+  explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
+      : manager_(manager) {}
 
-  // If an instance field is final then we need to have a barrier on the return, static final
-  // fields are assigned within the lock held for class initialization. Conservatively assume
-  // constructor barriers are always required.
-  bool requires_constructor_barrier = true;
+  virtual void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+    ATRACE_CALL();
+    Thread* const self = Thread::Current();
+    jobject jclass_loader = manager_->GetClassLoader();
+    const DexFile& dex_file = *manager_->GetDexFile();
+    ClassLinker* class_linker = manager_->GetClassLinker();
 
-  // Method and Field are the worst. We can't resolve without either
-  // context from the code use (to disambiguate virtual vs direct
-  // method and instance vs static field) or from class
-  // definitions. While the compiler will resolve what it can as it
-  // needs it, here we try to resolve fields and methods used in class
-  // definitions, since many of them many never be referenced by
-  // generated code.
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-  ScopedObjectAccess soa(self);
-  StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
-  // Resolve the class.
-  mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
-                                                   class_loader);
-  bool resolve_fields_and_methods;
-  if (klass == nullptr) {
-    // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
-    // attempt to resolve methods and fields when there is no declaring class.
-    CheckAndClearResolveException(soa.Self());
-    resolve_fields_and_methods = false;
-  } else {
-    // We successfully resolved a class, should we skip it?
-    if (SkipClass(jclass_loader, dex_file, klass)) {
-      return;
-    }
-    // We want to resolve the methods and fields eagerly.
-    resolve_fields_and_methods = true;
-  }
-  // Note the class_data pointer advances through the headers,
-  // static fields, instance fields, direct methods, and virtual
-  // methods.
-  const uint8_t* class_data = dex_file.GetClassData(class_def);
-  if (class_data == nullptr) {
-    // Empty class such as a marker interface.
-    requires_constructor_barrier = false;
-  } else {
-    ClassDataItemIterator it(dex_file, class_data);
-    while (it.HasNextStaticField()) {
-      if (resolve_fields_and_methods) {
-        ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
-                                                             dex_cache, class_loader, true);
-        if (field == nullptr) {
-          CheckAndClearResolveException(soa.Self());
-        }
+    // If an instance field is final then we need to have a barrier on the return, static final
+    // fields are assigned within the lock held for class initialization. Conservatively assume
+    // constructor barriers are always required.
+    bool requires_constructor_barrier = true;
+
+    // Method and Field are the worst. We can't resolve without either
+    // context from the code use (to disambiguate virtual vs direct
+    // method and instance vs static field) or from class
+    // definitions. While the compiler will resolve what it can as it
+    // needs it, here we try to resolve fields and methods used in class
+    // definitions, since many of them many never be referenced by
+    // generated code.
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    ScopedObjectAccess soa(self);
+    StackHandleScope<2> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+    Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+    // Resolve the class.
+    mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
+                                                     class_loader);
+    bool resolve_fields_and_methods;
+    if (klass == nullptr) {
+      // Class couldn't be resolved, for example, super-class is in a different dex file. Don't
+      // attempt to resolve methods and fields when there is no declaring class.
+      CheckAndClearResolveException(soa.Self());
+      resolve_fields_and_methods = false;
+    } else {
+      // We successfully resolved a class, should we skip it?
+      if (SkipClass(jclass_loader, dex_file, klass)) {
+        return;
       }
-      it.Next();
+      // We want to resolve the methods and fields eagerly.
+      resolve_fields_and_methods = true;
     }
-    // We require a constructor barrier if there are final instance fields.
-    requires_constructor_barrier = false;
-    while (it.HasNextInstanceField()) {
-      if (it.MemberIsFinal()) {
-        requires_constructor_barrier = true;
-      }
-      if (resolve_fields_and_methods) {
-        ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
-                                                             dex_cache, class_loader, false);
-        if (field == nullptr) {
-          CheckAndClearResolveException(soa.Self());
-        }
-      }
-      it.Next();
-    }
-    if (resolve_fields_and_methods) {
-      while (it.HasNextDirectMethod()) {
-        ArtMethod* method = class_linker->ResolveMethod(
-            dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
-            it.GetMethodInvokeType(class_def));
-        if (method == nullptr) {
-          CheckAndClearResolveException(soa.Self());
+    // Note the class_data pointer advances through the headers,
+    // static fields, instance fields, direct methods, and virtual
+    // methods.
+    const uint8_t* class_data = dex_file.GetClassData(class_def);
+    if (class_data == nullptr) {
+      // Empty class such as a marker interface.
+      requires_constructor_barrier = false;
+    } else {
+      ClassDataItemIterator it(dex_file, class_data);
+      while (it.HasNextStaticField()) {
+        if (resolve_fields_and_methods) {
+          ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+                                                               dex_cache, class_loader, true);
+          if (field == nullptr) {
+            CheckAndClearResolveException(soa.Self());
+          }
         }
         it.Next();
       }
-      while (it.HasNextVirtualMethod()) {
-        ArtMethod* method = class_linker->ResolveMethod(
-            dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
-            it.GetMethodInvokeType(class_def));
-        if (method == nullptr) {
-          CheckAndClearResolveException(soa.Self());
+      // We require a constructor barrier if there are final instance fields.
+      requires_constructor_barrier = false;
+      while (it.HasNextInstanceField()) {
+        if (it.MemberIsFinal()) {
+          requires_constructor_barrier = true;
+        }
+        if (resolve_fields_and_methods) {
+          ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+                                                               dex_cache, class_loader, false);
+          if (field == nullptr) {
+            CheckAndClearResolveException(soa.Self());
+          }
         }
         it.Next();
       }
-      DCHECK(!it.HasNext());
+      if (resolve_fields_and_methods) {
+        while (it.HasNextDirectMethod()) {
+          ArtMethod* method = class_linker->ResolveMethod(
+              dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+              it.GetMethodInvokeType(class_def));
+          if (method == nullptr) {
+            CheckAndClearResolveException(soa.Self());
+          }
+          it.Next();
+        }
+        while (it.HasNextVirtualMethod()) {
+          ArtMethod* method = class_linker->ResolveMethod(
+              dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+              it.GetMethodInvokeType(class_def));
+          if (method == nullptr) {
+            CheckAndClearResolveException(soa.Self());
+          }
+          it.Next();
+        }
+        DCHECK(!it.HasNext());
+      }
+    }
+    if (requires_constructor_barrier) {
+      manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
     }
   }
-  if (requires_constructor_barrier) {
-    manager->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index);
-  }
-}
 
-static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ private:
+  const ParallelCompilationManager* const manager_;
+};
+
+class ResolveTypeVisitor : public CompilationVisitor {
+ public:
+  explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) {
+  }
+  virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
   // Class derived values are more complicated, they require the linker and loader.
-  ScopedObjectAccess soa(Thread::Current());
-  ClassLinker* class_linker = manager->GetClassLinker();
-  const DexFile& dex_file = *manager->GetDexFile();
-  StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
-  mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
+    ScopedObjectAccess soa(Thread::Current());
+    ClassLinker* class_linker = manager_->GetClassLinker();
+    const DexFile& dex_file = *manager_->GetDexFile();
+    StackHandleScope<2> hs(soa.Self());
+    Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+    mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
 
-  if (klass == nullptr) {
-    CHECK(soa.Self()->IsExceptionPending());
-    mirror::Throwable* exception = soa.Self()->GetException();
-    VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
-    if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
-      // There's little point continuing compilation if the heap is exhausted.
-      LOG(FATAL) << "Out of memory during type resolution for compilation";
+    if (klass == nullptr) {
+      soa.Self()->AssertPendingException();
+      mirror::Throwable* exception = soa.Self()->GetException();
+      VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
+      if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
+        // There's little point continuing compilation if the heap is exhausted.
+        LOG(FATAL) << "Out of memory during type resolution for compilation";
+      }
+      soa.Self()->ClearException();
     }
-    soa.Self()->ClearException();
   }
-}
+
+ private:
+  const ParallelCompilationManager* const manager_;
+};
 
 void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
                                     const std::vector<const DexFile*>& dex_files,
@@ -1860,17 +1878,18 @@
     // For images we resolve all types, such as array, whereas for applications just those with
     // classdefs are resolved by ResolveClassFieldsAndMethods.
     TimingLogger::ScopedTiming t("Resolve Types", timings);
-    context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
+    ResolveTypeVisitor visitor(&context);
+    context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count_);
   }
 
   TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings);
-  context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
+  ResolveClassFieldsAndMethodsVisitor visitor(&context);
+  context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
 }
 
 void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                                  ThreadPool* thread_pool, TimingLogger* timings) {
-  for (size_t i = 0; i != dex_files.size(); ++i) {
-    const DexFile* dex_file = dex_files[i];
+  for (const DexFile* dex_file : dex_files) {
     CHECK(dex_file != nullptr);
     SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
@@ -1878,67 +1897,73 @@
 
 void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                             ThreadPool* thread_pool, TimingLogger* timings) {
-  for (size_t i = 0; i != dex_files.size(); ++i) {
-    const DexFile* dex_file = dex_files[i];
+  for (const DexFile* dex_file : dex_files) {
     CHECK(dex_file != nullptr);
     VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
 }
 
-static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
-  ATRACE_CALL();
-  ScopedObjectAccess soa(Thread::Current());
-  const DexFile& dex_file = *manager->GetDexFile();
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-  const char* descriptor = dex_file.GetClassDescriptor(class_def);
-  ClassLinker* class_linker = manager->GetClassLinker();
-  jobject jclass_loader = manager->GetClassLoader();
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-  Handle<mirror::Class> klass(
-      hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
-  if (klass.Get() == nullptr) {
-    CHECK(soa.Self()->IsExceptionPending());
-    soa.Self()->ClearException();
+class VerifyClassVisitor : public CompilationVisitor {
+ public:
+  explicit VerifyClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-    /*
-     * At compile time, we can still structurally verify the class even if FindClass fails.
-     * This is to ensure the class is structurally sound for compilation. An unsound class
-     * will be rejected by the verifier and later skipped during compilation in the compiler.
-     */
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
-    std::string error_msg;
-    if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
-                                              &class_def, true, &error_msg) ==
-                                                  verifier::MethodVerifier::kHardFailure) {
-      LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
-                 << " because: " << error_msg;
-      manager->GetCompiler()->SetHadHardVerifierFailure();
-    }
-  } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
-    CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
-    class_linker->VerifyClass(soa.Self(), klass);
-
-    if (klass->IsErroneous()) {
-      // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+    ATRACE_CALL();
+    ScopedObjectAccess soa(Thread::Current());
+    const DexFile& dex_file = *manager_->GetDexFile();
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const char* descriptor = dex_file.GetClassDescriptor(class_def);
+    ClassLinker* class_linker = manager_->GetClassLinker();
+    jobject jclass_loader = manager_->GetClassLoader();
+    StackHandleScope<3> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+    Handle<mirror::Class> klass(
+        hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+    if (klass.Get() == nullptr) {
       CHECK(soa.Self()->IsExceptionPending());
       soa.Self()->ClearException();
-      manager->GetCompiler()->SetHadHardVerifierFailure();
+
+      /*
+       * At compile time, we can still structurally verify the class even if FindClass fails.
+       * This is to ensure the class is structurally sound for compilation. An unsound class
+       * will be rejected by the verifier and later skipped during compilation in the compiler.
+       */
+      Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+      std::string error_msg;
+      if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
+                                                &class_def, true, &error_msg) ==
+                                                    verifier::MethodVerifier::kHardFailure) {
+        LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
+                   << " because: " << error_msg;
+        manager_->GetCompiler()->SetHadHardVerifierFailure();
+      }
+    } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+      CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
+      class_linker->VerifyClass(soa.Self(), klass);
+
+      if (klass->IsErroneous()) {
+        // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
+        CHECK(soa.Self()->IsExceptionPending());
+        soa.Self()->ClearException();
+        manager_->GetCompiler()->SetHadHardVerifierFailure();
+      }
+
+      CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
+          << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
+
+      // It is *very* problematic if there are verification errors in the boot classpath. For example,
+      // we rely on things working OK without verification when the decryption dialog is brought up.
+      // So abort in a debug build if we find this violated.
+      DCHECK(!manager_->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class "
+          << PrettyClass(klass.Get()) << " failed to fully verify.";
     }
-
-    CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
-        << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
-
-    // It is *very* problematic if there are verification errors in the boot classpath. For example,
-    // we rely on things working OK without verification when the decryption dialog is brought up.
-    // So abort in a debug build if we find this violated.
-    DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " <<
-        PrettyClass(klass.Get()) << " failed to fully verify.";
+    soa.Self()->AssertNoPendingException();
   }
-  soa.Self()->AssertNoPendingException();
-}
+
+ private:
+  const ParallelCompilationManager* const manager_;
+};
 
 void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
                                    const std::vector<const DexFile*>& dex_files,
@@ -1947,48 +1972,56 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
                                      thread_pool);
-  context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
+  VerifyClassVisitor visitor(&context);
+  context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
 }
 
-static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
-  ATRACE_CALL();
-  ScopedObjectAccess soa(Thread::Current());
-  const DexFile& dex_file = *manager->GetDexFile();
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-  const char* descriptor = dex_file.GetClassDescriptor(class_def);
-  ClassLinker* class_linker = manager->GetClassLinker();
-  jobject jclass_loader = manager->GetClassLoader();
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-  Handle<mirror::Class> klass(
-      hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
-  // Class might have failed resolution. Then don't set it to verified.
-  if (klass.Get() != nullptr) {
-    // Only do this if the class is resolved. If even resolution fails, quickening will go very,
-    // very wrong.
-    if (klass->IsResolved()) {
-      if (klass->GetStatus() < mirror::Class::kStatusVerified) {
-        ObjectLock<mirror::Class> lock(soa.Self(), klass);
-        // Set class status to verified.
-        mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
-        // Mark methods as pre-verified. If we don't do this, the interpreter will run with
-        // access checks.
-        klass->SetPreverifiedFlagOnAllMethods(
-            GetInstructionSetPointerSize(manager->GetCompiler()->GetInstructionSet()));
-        klass->SetPreverified();
+class SetVerifiedClassVisitor : public CompilationVisitor {
+ public:
+  explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+    ATRACE_CALL();
+    ScopedObjectAccess soa(Thread::Current());
+    const DexFile& dex_file = *manager_->GetDexFile();
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const char* descriptor = dex_file.GetClassDescriptor(class_def);
+    ClassLinker* class_linker = manager_->GetClassLinker();
+    jobject jclass_loader = manager_->GetClassLoader();
+    StackHandleScope<3> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+    Handle<mirror::Class> klass(
+        hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+    // Class might have failed resolution. Then don't set it to verified.
+    if (klass.Get() != nullptr) {
+      // Only do this if the class is resolved. If even resolution fails, quickening will go very,
+      // very wrong.
+      if (klass->IsResolved()) {
+        if (klass->GetStatus() < mirror::Class::kStatusVerified) {
+          ObjectLock<mirror::Class> lock(soa.Self(), klass);
+          // Set class status to verified.
+          mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
+          // Mark methods as pre-verified. If we don't do this, the interpreter will run with
+          // access checks.
+          klass->SetPreverifiedFlagOnAllMethods(
+              GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet()));
+          klass->SetPreverified();
+        }
+        // Record the final class status if necessary.
+        ClassReference ref(manager_->GetDexFile(), class_def_index);
+        manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
       }
-      // Record the final class status if necessary.
-      ClassReference ref(manager->GetDexFile(), class_def_index);
-      manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+    } else {
+      Thread* self = soa.Self();
+      DCHECK(self->IsExceptionPending());
+      self->ClearException();
     }
-  } else {
-    Thread* self = soa.Self();
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
   }
-}
+
+ private:
+  const ParallelCompilationManager* const manager_;
+};
 
 void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
                                         const std::vector<const DexFile*>& dex_files,
@@ -1997,99 +2030,107 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
                                      thread_pool);
-  context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_);
+  SetVerifiedClassVisitor visitor(&context);
+  context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
 }
 
-static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
-  ATRACE_CALL();
-  jobject jclass_loader = manager->GetClassLoader();
-  const DexFile& dex_file = *manager->GetDexFile();
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-  const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
-  const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
+class InitializeClassVisitor : public CompilationVisitor {
+ public:
+  explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
 
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-  Handle<mirror::Class> klass(
-      hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+    ATRACE_CALL();
+    jobject jclass_loader = manager_->GetClassLoader();
+    const DexFile& dex_file = *manager_->GetDexFile();
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+    const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
 
-  if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
-    // Only try to initialize classes that were successfully verified.
-    if (klass->IsVerified()) {
-      // Attempt to initialize the class but bail if we either need to initialize the super-class
-      // or static fields.
-      manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
-      if (!klass->IsInitialized()) {
-        // We don't want non-trivial class initialization occurring on multiple threads due to
-        // deadlock problems. For example, a parent class is initialized (holding its lock) that
-        // refers to a sub-class in its static/class initializer causing it to try to acquire the
-        // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
-        // after first initializing its parents, whose locks are acquired. This leads to a
-        // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
-        // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
-        // than use a special Object for the purpose we use the Class of java.lang.Class.
-        Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
-        ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
-        // Attempt to initialize allowing initialization of parent classes but still not static
-        // fields.
-        manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<3> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+    Handle<mirror::Class> klass(
+        hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
+
+    if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
+      // Only try to initialize classes that were successfully verified.
+      if (klass->IsVerified()) {
+        // Attempt to initialize the class but bail if we either need to initialize the super-class
+        // or static fields.
+        manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
         if (!klass->IsInitialized()) {
-          // We need to initialize static fields, we only do this for image classes that aren't
-          // marked with the $NoPreloadHolder (which implies this should not be initialized early).
-          bool can_init_static_fields = manager->GetCompiler()->IsImage() &&
-              manager->GetCompiler()->IsImageClass(descriptor) &&
-              !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
-          if (can_init_static_fields) {
-            VLOG(compiler) << "Initializing: " << descriptor;
-            // TODO multithreading support. We should ensure the current compilation thread has
-            // exclusive access to the runtime and the transaction. To achieve this, we could use
-            // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
-            // checks in Thread::AssertThreadSuspensionIsAllowable.
-            Runtime* const runtime = Runtime::Current();
-            Transaction transaction;
+          // We don't want non-trivial class initialization occurring on multiple threads due to
+          // deadlock problems. For example, a parent class is initialized (holding its lock) that
+          // refers to a sub-class in its static/class initializer causing it to try to acquire the
+          // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock)
+          // after first initializing its parents, whose locks are acquired. This leads to a
+          // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
+          // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+          // than use a special Object for the purpose we use the Class of java.lang.Class.
+          Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+          ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
+          // Attempt to initialize allowing initialization of parent classes but still not static
+          // fields.
+          manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+          if (!klass->IsInitialized()) {
+            // We need to initialize static fields, we only do this for image classes that aren't
+            // marked with the $NoPreloadHolder (which implies this should not be initialized early).
+            bool can_init_static_fields = manager_->GetCompiler()->IsImage() &&
+                manager_->GetCompiler()->IsImageClass(descriptor) &&
+                !StringPiece(descriptor).ends_with("$NoPreloadHolder;");
+            if (can_init_static_fields) {
+              VLOG(compiler) << "Initializing: " << descriptor;
+              // TODO multithreading support. We should ensure the current compilation thread has
+              // exclusive access to the runtime and the transaction. To achieve this, we could use
+              // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
+              // checks in Thread::AssertThreadSuspensionIsAllowable.
+              Runtime* const runtime = Runtime::Current();
+              Transaction transaction;
 
-            // Run the class initializer in transaction mode.
-            runtime->EnterTransactionMode(&transaction);
-            const mirror::Class::Status old_status = klass->GetStatus();
-            bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
-                                                                        true);
-            // TODO we detach transaction from runtime to indicate we quit the transactional
-            // mode which prevents the GC from visiting objects modified during the transaction.
-            // Ensure GC is not run so don't access freed objects when aborting transaction.
+              // Run the class initializer in transaction mode.
+              runtime->EnterTransactionMode(&transaction);
+              const mirror::Class::Status old_status = klass->GetStatus();
+              bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+                                                                           true);
+              // TODO we detach transaction from runtime to indicate we quit the transactional
+              // mode which prevents the GC from visiting objects modified during the transaction.
+              // Ensure GC is not run so don't access freed objects when aborting transaction.
 
-            ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
-            runtime->ExitTransactionMode();
+              ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
+              runtime->ExitTransactionMode();
 
-            if (!success) {
-              CHECK(soa.Self()->IsExceptionPending());
-              mirror::Throwable* exception = soa.Self()->GetException();
-              VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
-                  << exception->Dump();
-              std::ostream* file_log = manager->GetCompiler()->
-                  GetCompilerOptions().GetInitFailureOutput();
-              if (file_log != nullptr) {
-                *file_log << descriptor << "\n";
-                *file_log << exception->Dump() << "\n";
+              if (!success) {
+                CHECK(soa.Self()->IsExceptionPending());
+                mirror::Throwable* exception = soa.Self()->GetException();
+                VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
+                    << exception->Dump();
+                std::ostream* file_log = manager_->GetCompiler()->
+                    GetCompilerOptions().GetInitFailureOutput();
+                if (file_log != nullptr) {
+                  *file_log << descriptor << "\n";
+                  *file_log << exception->Dump() << "\n";
+                }
+                soa.Self()->ClearException();
+                transaction.Rollback();
+                CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
               }
-              soa.Self()->ClearException();
-              transaction.Rollback();
-              CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
             }
           }
+          soa.Self()->AssertNoPendingException();
         }
-        soa.Self()->AssertNoPendingException();
       }
+      // Record the final class status if necessary.
+      ClassReference ref(manager_->GetDexFile(), class_def_index);
+      manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
     }
-    // Record the final class status if necessary.
-    ClassReference ref(manager->GetDexFile(), class_def_index);
-    manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+    // Clear any class not found or verification exceptions.
+    soa.Self()->ClearException();
   }
-  // Clear any class not found or verification exceptions.
-  soa.Self()->ClearException();
-}
+
+ private:
+  const ParallelCompilationManager* const manager_;
+};
 
 void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
                                        const std::vector<const DexFile*>& dex_files,
@@ -2105,7 +2146,8 @@
   } else {
     thread_count = thread_count_;
   }
-  context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count);
+  InitializeClassVisitor visitor(&context);
+  context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
 }
 
 void CompilerDriver::InitializeClasses(jobject class_loader,
@@ -2132,101 +2174,108 @@
   VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
 }
 
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
-                                  size_t class_def_index) {
-  ATRACE_CALL();
-  const DexFile& dex_file = *manager->GetDexFile();
-  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-  ClassLinker* class_linker = manager->GetClassLinker();
-  jobject jclass_loader = manager->GetClassLoader();
-  Thread* self = Thread::Current();
-  {
-    // Use a scoped object access to perform to the quick SkipClass check.
-    const char* descriptor = dex_file.GetClassDescriptor(class_def);
-    ScopedObjectAccess soa(self);
-    StackHandleScope<3> hs(soa.Self());
-    Handle<mirror::ClassLoader> class_loader(
-        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-    Handle<mirror::Class> klass(
-        hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
-    if (klass.Get() == nullptr) {
-      CHECK(soa.Self()->IsExceptionPending());
-      soa.Self()->ClearException();
-    } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+class CompileClassVisitor : public CompilationVisitor {
+ public:
+  explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+
+  virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+    ATRACE_CALL();
+    const DexFile& dex_file = *manager_->GetDexFile();
+    const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+    ClassLinker* class_linker = manager_->GetClassLinker();
+    jobject jclass_loader = manager_->GetClassLoader();
+    Thread* self = Thread::Current();
+    {
+      // Use a scoped object access to perform to the quick SkipClass check.
+      const char* descriptor = dex_file.GetClassDescriptor(class_def);
+      ScopedObjectAccess soa(self);
+      StackHandleScope<3> hs(soa.Self());
+      Handle<mirror::ClassLoader> class_loader(
+          hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+      Handle<mirror::Class> klass(
+          hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+      if (klass.Get() == nullptr) {
+        CHECK(soa.Self()->IsExceptionPending());
+        soa.Self()->ClearException();
+      } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+        return;
+      }
+    }
+    ClassReference ref(&dex_file, class_def_index);
+    // Skip compiling classes with generic verifier failures since they will still fail at runtime
+    if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
       return;
     }
-  }
-  ClassReference ref(&dex_file, class_def_index);
-  // Skip compiling classes with generic verifier failures since they will still fail at runtime
-  if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) {
-    return;
-  }
-  const uint8_t* class_data = dex_file.GetClassData(class_def);
-  if (class_data == nullptr) {
-    // empty class, probably a marker interface
-    return;
-  }
-
-  CompilerDriver* const driver = manager->GetCompiler();
-
-  // Can we run DEX-to-DEX compiler on this class ?
-  DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
-  {
-    ScopedObjectAccess soa(self);
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::ClassLoader> class_loader(
-        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-    dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
-        soa.Self(), class_loader, dex_file, class_def);
-  }
-  ClassDataItemIterator it(dex_file, class_data);
-  // Skip fields
-  while (it.HasNextStaticField()) {
-    it.Next();
-  }
-  while (it.HasNextInstanceField()) {
-    it.Next();
-  }
-
-  bool compilation_enabled = driver->IsClassToCompile(
-      dex_file.StringByTypeIdx(class_def.class_idx_));
-
-  // Compile direct methods
-  int64_t previous_direct_method_idx = -1;
-  while (it.HasNextDirectMethod()) {
-    uint32_t method_idx = it.GetMemberIndex();
-    if (method_idx == previous_direct_method_idx) {
-      // smali can create dex files with two encoded_methods sharing the same method_idx
-      // http://code.google.com/p/smali/issues/detail?id=119
-      it.Next();
-      continue;
+    const uint8_t* class_data = dex_file.GetClassData(class_def);
+    if (class_data == nullptr) {
+      // empty class, probably a marker interface
+      return;
     }
-    previous_direct_method_idx = method_idx;
-    driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
-                          it.GetMethodInvokeType(class_def), class_def_index,
-                          method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
-                          compilation_enabled);
-    it.Next();
-  }
-  // Compile virtual methods
-  int64_t previous_virtual_method_idx = -1;
-  while (it.HasNextVirtualMethod()) {
-    uint32_t method_idx = it.GetMemberIndex();
-    if (method_idx == previous_virtual_method_idx) {
-      // smali can create dex files with two encoded_methods sharing the same method_idx
-      // http://code.google.com/p/smali/issues/detail?id=119
-      it.Next();
-      continue;
+
+    CompilerDriver* const driver = manager_->GetCompiler();
+
+    // Can we run DEX-to-DEX compiler on this class ?
+    DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
+    {
+      ScopedObjectAccess soa(self);
+      StackHandleScope<1> hs(soa.Self());
+      Handle<mirror::ClassLoader> class_loader(
+          hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+      dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel(
+          soa.Self(), class_loader, dex_file, class_def);
     }
-    previous_virtual_method_idx = method_idx;
-    driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
-                          it.GetMethodInvokeType(class_def), class_def_index,
-                          method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
-                          compilation_enabled);
-    it.Next();
+    ClassDataItemIterator it(dex_file, class_data);
+    // Skip fields
+    while (it.HasNextStaticField()) {
+      it.Next();
+    }
+    while (it.HasNextInstanceField()) {
+      it.Next();
+    }
+
+    bool compilation_enabled = driver->IsClassToCompile(
+        dex_file.StringByTypeIdx(class_def.class_idx_));
+
+    // Compile direct methods
+    int64_t previous_direct_method_idx = -1;
+    while (it.HasNextDirectMethod()) {
+      uint32_t method_idx = it.GetMemberIndex();
+      if (method_idx == previous_direct_method_idx) {
+        // smali can create dex files with two encoded_methods sharing the same method_idx
+        // http://code.google.com/p/smali/issues/detail?id=119
+        it.Next();
+        continue;
+      }
+      previous_direct_method_idx = method_idx;
+      driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+                            it.GetMethodInvokeType(class_def), class_def_index,
+                            method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+                            compilation_enabled);
+      it.Next();
+    }
+    // Compile virtual methods
+    int64_t previous_virtual_method_idx = -1;
+    while (it.HasNextVirtualMethod()) {
+      uint32_t method_idx = it.GetMemberIndex();
+      if (method_idx == previous_virtual_method_idx) {
+        // smali can create dex files with two encoded_methods sharing the same method_idx
+        // http://code.google.com/p/smali/issues/detail?id=119
+        it.Next();
+        continue;
+      }
+      previous_virtual_method_idx = method_idx;
+      driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+                            it.GetMethodInvokeType(class_def), class_def_index,
+                            method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+                            compilation_enabled);
+      it.Next();
+    }
+    DCHECK(!it.HasNext());
   }
-  DCHECK(!it.HasNext());
-}
+
+ private:
+  const ParallelCompilationManager* const manager_;
+};
 
 void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
                                     const std::vector<const DexFile*>& dex_files,
@@ -2234,7 +2283,8 @@
   TimingLogger::ScopedTiming t("Compile Dex File", timings);
   ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
                                      &dex_file, dex_files, thread_pool);
-  context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
+  CompileClassVisitor visitor(&context);
+  context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_);
 }
 
 // Does the runtime for the InstructionSet provide an implementation returned by
@@ -2453,7 +2503,7 @@
                               const std::vector<const art::DexFile*>& dex_files,
                               OatWriter* oat_writer,
                               art::File* file)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) {
     return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this);
   } else {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 5cf4044..88e03a2 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -114,14 +114,15 @@
 
   void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
 
   CompiledMethod* CompileMethod(Thread* self, ArtMethod*)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!compiled_methods_lock_) WARN_UNUSED;
 
   // Compile a single Method.
   void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_);
 
   VerificationResults* GetVerificationResults() const {
     return verification_results_;
@@ -162,54 +163,56 @@
 
   // Generate the trampolines that are invoked by unresolved direct methods.
   const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateJniDlsymLookup() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   CompiledClass* GetCompiledClass(ClassReference ref) const
-      LOCKS_EXCLUDED(compiled_classes_lock_);
+      REQUIRES(!compiled_classes_lock_);
 
   CompiledMethod* GetCompiledMethod(MethodReference ref) const
-      LOCKS_EXCLUDED(compiled_methods_lock_);
+      REQUIRES(!compiled_methods_lock_);
   size_t GetNonRelativeLinkerPatchCount() const
-      LOCKS_EXCLUDED(compiled_methods_lock_);
+      REQUIRES(!compiled_methods_lock_);
 
   // Remove and delete a compiled method.
-  void RemoveCompiledMethod(const MethodReference& method_ref);
+  void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_);
 
   void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
-                                     uint16_t class_def_index);
+                                     uint16_t class_def_index)
+      REQUIRES(!freezing_constructor_lock_);
   bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
-                                  uint16_t class_def_index) const;
+                                  uint16_t class_def_index) const
+      REQUIRES(!freezing_constructor_lock_);
 
   // Callbacks from compiler to see what runtime checks must be generated.
 
   bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx);
 
   bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Are runtime access checks necessary in the compiled code?
   bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
                                   uint32_t type_idx, bool* type_known_final = nullptr,
                                   bool* type_known_abstract = nullptr,
                                   bool* equals_referrers_class = nullptr)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Are runtime access and instantiable checks necessary in the code?
   bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
                                               uint32_t type_idx)
-     LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
                           bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -223,22 +226,22 @@
 
   // Get the DexCache for the
   mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve compiling method's class. Returns null on failure.
   mirror::Class* ResolveCompilingMethodsClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* ResolveClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
       const DexCompilationUnit* mUnit)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a field. Returns null on failure, including incompatible class change.
   // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -246,40 +249,40 @@
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t field_idx, bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a field with a given dex file.
   ArtField* ResolveFieldWithDexFile(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
       uint32_t field_idx, bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get declaration location of a resolved field.
   void GetResolvedFieldDexFileLocation(
       ArtField* resolved_field, const DexFile** declaring_dex_file,
       uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+  MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find a dex cache for a dex file.
   inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
   std::pair<bool, bool> IsFastInstanceField(
       mirror::DexCache* dex_cache, mirror::Class* referrer_class,
       ArtField* resolved_field, uint16_t field_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
   // of the declaring class in the referrer's dex file.
   std::pair<bool, bool> IsFastStaticField(
       mirror::DexCache* dex_cache, mirror::Class* referrer_class,
       ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return whether the declaring class of `resolved_method` is
   // available to `referrer_class`. If this is true, compute the type
@@ -291,34 +294,34 @@
                                                 ArtMethod* resolved_method,
                                                 uint16_t method_idx,
                                                 uint32_t* storage_index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Is static field's in referrer's class?
   bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Is static field's class initialized?
   bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
                                       ArtField* resolved_field)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a method. Returns null on failure, including incompatible class change.
   ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
       uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get declaration location of a resolved field.
   void GetResolvedMethodDexFileLocation(
       ArtMethod* resolved_method, const DexFile** declaring_dex_file,
       uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the index in the vtable of the method.
   uint16_t GetResolvedMethodVTableIndex(
       ArtMethod* resolved_method, InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
   // for ProcessedInvoke() and computes the necessary lowering info.
@@ -328,13 +331,13 @@
       mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
       MethodReference* target_method, const MethodReference* devirt_target,
       uintptr_t* direct_code, uintptr_t* direct_method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Is method's class initialized for an invoke?
   // For static invokes to determine whether we need to consider potential call to <clinit>().
   // For non-static invokes, assuming a non-null reference, the class is always initialized.
   bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
   // dex cache arrays don't have a fixed layout.
@@ -349,18 +352,18 @@
                         ArtField** resolved_field,
                         mirror::Class** referrer_class,
                         mirror::DexCache** dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we fast path instance field access? Computes field's offset and volatility.
   bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
                                 MemberOffset* field_offset, bool* is_volatile)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   ArtField* ComputeInstanceFieldInfo(uint32_t field_idx,
                                              const DexCompilationUnit* mUnit,
                                              bool is_put,
                                              const ScopedObjectAccess& soa)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
 
   // Can we fastpath static field access? Computes field's offset, volatility and whether the
@@ -369,7 +372,7 @@
                               MemberOffset* field_offset, uint32_t* storage_index,
                               bool* is_referrers_class, bool* is_volatile, bool* is_initialized,
                               Primitive::Type* type)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Can we fastpath a interface, super class or virtual method call? Computes method's vtable
   // index.
@@ -377,7 +380,7 @@
                          bool update_stats, bool enable_devirtualization,
                          InvokeType* type, MethodReference* target_method, int* vtable_idx,
                          uintptr_t* direct_code, uintptr_t* direct_method)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
   bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
@@ -445,7 +448,7 @@
   bool IsMethodToCompile(const MethodReference& method_ref) const;
 
   void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
-      LOCKS_EXCLUDED(compiled_classes_lock_);
+      REQUIRES(!compiled_classes_lock_);
 
   // Checks if the specified method has been verified without failures. Returns
   // false if the method is not in the verification results (GetVerificationResults).
@@ -487,7 +490,7 @@
                                                                  ArtMember* resolved_member,
                                                                  uint16_t member_idx,
                                                                  uint32_t* storage_index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can `referrer_class` access the resolved `member`?
   // Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -499,17 +502,17 @@
                                       ArtMember* member,
                                       mirror::DexCache* dex_cache,
                                       uint32_t field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we assume that the klass is initialized?
   bool CanAssumeClassIsInitialized(mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we assume that the klass is loaded?
   bool CanAssumeClassIsLoaded(mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
   // The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -540,71 +543,68 @@
                                      /*out*/int* stats_flags,
                                      MethodReference* target_method,
                                      uintptr_t* direct_code, uintptr_t* direct_method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   DexToDexCompilationLevel GetDexToDexCompilationlevel(
       Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
-      const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                   ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
 
-  void LoadImageClasses(TimingLogger* timings);
+  void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
 
   // Attempt to resolve all type, methods, fields, and strings
   // referenced from code in the dex file following PathClassLoader
   // ordering semantics.
   void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
   void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
                       const std::vector<const DexFile*>& dex_files,
                       ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
               ThreadPool* thread_pool, TimingLogger* timings);
   void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
                      const std::vector<const DexFile*>& dex_files,
                      ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                    ThreadPool* thread_pool, TimingLogger* timings);
   void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
                           const std::vector<const DexFile*>& dex_files,
                           ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                          ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
   void InitializeClasses(jobject class_loader, const DexFile& dex_file,
                          const std::vector<const DexFile*>& dex_files,
                          ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
+      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
 
-  void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
   static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                ThreadPool* thread_pool, TimingLogger* timings);
   void CompileDexFile(jobject class_loader, const DexFile& dex_file,
                       const std::vector<const DexFile*>& dex_files,
                       ThreadPool* thread_pool, TimingLogger* timings)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
   void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
                      InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
                      jobject class_loader, const DexFile& dex_file,
                      DexToDexCompilationLevel dex_to_dex_compilation_level,
                      bool compilation_enabled)
-      LOCKS_EXCLUDED(compiled_methods_lock_);
-
-  static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!compiled_methods_lock_);
 
   // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
   // as other fields rely on this.
@@ -776,6 +776,7 @@
   DedupeSet<ArrayRef<const uint8_t>,
             SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_;
 
+  friend class CompileClassVisitor;
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
 };
 
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index b358f4f..e35d07d 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -37,7 +37,7 @@
 
 class CompilerDriverTest : public CommonCompilerTest {
  protected:
-  void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
     TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
     TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
     compiler_driver_->CompileAll(class_loader,
@@ -49,7 +49,7 @@
 
   void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
                       const char* signature, bool is_virtual)
-      LOCKS_EXCLUDED(Locks::mutator_lock_) {
+      REQUIRES(!Locks::mutator_lock_) {
     CompileAll(class_loader);
     Thread::Current()->TransitionFromSuspendedToRunnable();
     bool started = runtime_->Start();
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 8e13b51..03f8ceb 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -57,7 +57,7 @@
                      const std::vector<const DexFile*>& dex_files,
                      const std::string& android_root,
                      bool is_host)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   const CompilerDriver* const compiler_driver_;
   File* const elf_file_;
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index fd202ee..83781ab 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -33,7 +33,7 @@
                      const std::string& android_root,
                      bool is_host,
                      const CompilerDriver& driver)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void EncodeOatPatches(const std::vector<uintptr_t>& locations,
                                std::vector<uint8_t>* buffer);
@@ -44,7 +44,7 @@
              const std::string& android_root,
              bool is_host)
       OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 2b65aa9..3ba1415 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -73,7 +73,7 @@
 static constexpr bool kComputeEagerResolvedStrings = false;
 
 static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Class* klass = obj->GetClass();
   CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
 }
@@ -1035,7 +1035,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       *roots[i] = ImageAddress(*roots[i]);
     }
@@ -1043,7 +1043,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
     }
@@ -1052,7 +1052,7 @@
  private:
   ImageWriter* const image_writer_;
 
-  mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     const size_t offset = image_writer_->GetImageOffset(obj);
     auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
     VLOG(compiler) << "Update root from " << obj << " to " << dest;
@@ -1190,7 +1190,7 @@
   }
 
   void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
     // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
     // image.
@@ -1200,8 +1200,8 @@
 
   // java.lang.ref.Reference visitor.
   void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
         mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
   }
@@ -1217,15 +1217,15 @@
   }
 
   void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     DCHECK(obj->IsClass());
     FixupVisitor::operator()(obj, offset, /*is_static*/false);
   }
 
   void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
                   mirror::Reference* ref ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     LOG(FATAL) << "Reference not expected here.";
   }
 };
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1523383..42b1cbf 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -69,15 +69,15 @@
   }
 
   template <typename T>
-  T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
     return object == nullptr ? nullptr :
         reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
   }
 
-  ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
-      const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) {
     auto it = dex_cache_array_starts_.find(dex_file);
     DCHECK(it != dex_cache_array_starts_.end());
     return reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
@@ -88,7 +88,7 @@
 
   bool Write(const std::string& image_filename, const std::string& oat_filename,
              const std::string& oat_location)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   uintptr_t GetOatDataBegin() {
     return reinterpret_cast<uintptr_t>(oat_data_begin_);
@@ -98,7 +98,7 @@
   bool AllocMemory();
 
   // Mark the objects defined in this space in the given live bitmap.
-  void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Classify different kinds of bins that objects end up getting packed into during image writing.
   enum Bin {
@@ -165,32 +165,32 @@
 
   // We use the lock word to store the offset of the object in the image.
   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void SetImageOffset(mirror::Object* object, size_t offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsImageOffsetAssigned(mirror::Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
+  void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsImageBinSlotAssigned(mirror::Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
   }
 
   mirror::Object* GetLocalAddress(mirror::Object* object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     size_t offset = GetImageOffset(object);
     uint8_t* dst = image_->Begin() + offset;
     return reinterpret_cast<mirror::Object*>(dst);
@@ -209,74 +209,74 @@
   }
 
   // Returns true if the class was in the original requested image classes list.
-  bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Debug aid that list of requested image classes.
   void DumpImageClasses();
 
   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
   void ComputeLazyFieldsForImageClasses()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
-  void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_);
   static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Remove unwanted classes from various roots.
-  void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
   static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Verify unwanted classes removed.
-  void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Lays out where the image objects will be at runtime.
   void CalculateNewObjectOffsets()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CalculateObjectBinSlots(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void UnbinObjectsIntoOffset(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void WalkFieldsInOrder(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void WalkFieldsCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Creates the contiguous image in memory and adjusts pointers.
-  void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_);
+  void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupClass(mirror::Class* orig, mirror::Class* copy)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupObject(mirror::Object* orig, mirror::Object* copy)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
-                         Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                         Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
   const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   const uint8_t* GetQuickEntryPoint(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Patches references in OatFile to expect runtime addresses.
   void SetOatChecksumFromElfFile(File* elf_file);
@@ -285,10 +285,10 @@
   size_t GetBinSizeSum(Bin up_to = kBinSize) const;
 
   // Return true if a method is likely to be dirtied at runtime.
-  bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Assign the offset for an ArtMethod.
-  void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_REQUIRES(Locks::mutator_lock_);
 
   const CompilerDriver& compiler_driver_;
 
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a122ceb..d70211f 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -55,7 +55,7 @@
 }
 
 extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
   DCHECK(jit_compiler != nullptr);
   return jit_compiler->CompileMethod(self, method);
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index b0010e0..ef68caa 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -38,11 +38,11 @@
   static JitCompiler* Create();
   virtual ~JitCompiler();
   bool CompileMethod(Thread* self, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // This is in the compiler since the runtime doesn't have access to the compiled method
   // structures.
   bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
-                      OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                      OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
   CompilerCallbacks* GetCompilerCallbacks() const;
   size_t GetTotalCompileTime() const {
     return total_time_;
@@ -63,7 +63,7 @@
       const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
       const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
   bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(JitCompiler);
 };
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0747756..c98a5f8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -44,7 +44,7 @@
   void CheckMethod(ArtMethod* method,
                    const OatFile::OatMethod& oat_method,
                    const DexFile& dex_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const CompiledMethod* compiled_method =
         compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
                                                             method->GetDexMethodIndex()));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4318ea5..64e7487 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -365,7 +365,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -560,7 +560,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -601,7 +601,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -665,7 +665,7 @@
   }
 
   bool StartClass(const DexFile* dex_file, size_t class_def_index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     OatDexMethodVisitor::StartClass(dex_file, class_def_index);
     if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
       dex_cache_ = class_linker_->FindDexCache(*dex_file);
@@ -673,7 +673,7 @@
     return true;
   }
 
-  bool EndClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     bool result = OatDexMethodVisitor::EndClass();
     if (oat_class_index_ == writer_->oat_classes_.size()) {
       DCHECK(result);  // OatDexMethodVisitor::EndClass() never fails.
@@ -687,7 +687,7 @@
   }
 
   bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
@@ -793,7 +793,7 @@
   }
 
   ArtMethod* GetTargetMethod(const LinkerPatch& patch)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     MethodReference ref = patch.TargetMethod();
     mirror::DexCache* dex_cache =
         (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file);
@@ -803,7 +803,7 @@
     return method;
   }
 
-  uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
     auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod());
     uint32_t target_offset =
         (target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u;
@@ -828,7 +828,7 @@
   }
 
   mirror::Class* GetTargetType(const LinkerPatch& patch)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
         ? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile());
     mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
@@ -836,7 +836,7 @@
     return type;
   }
 
-  uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (writer_->image_writer_ != nullptr) {
       auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress(
               patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -849,7 +849,7 @@
   }
 
   void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
     // type pointers across oat files do. (TODO: Investigate why.)
     if (writer_->image_writer_ != nullptr) {
@@ -865,7 +865,7 @@
   }
 
   void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
     // type pointers across oat files do. (TODO: Investigate why.)
     if (writer_->image_writer_ != nullptr) {
@@ -882,7 +882,7 @@
   }
 
   void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
         PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
                               writer_->oat_data_offset_ + target_offset);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 82b9377..3baf438 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -165,9 +165,9 @@
   size_t InitOatClasses(size_t offset);
   size_t InitOatMaps(size_t offset);
   size_t InitOatCode(size_t offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   size_t InitOatCodeDexFiles(size_t offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool WriteTables(OutputStream* out, const size_t file_offset);
   size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index cea7dd9..c185b58 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -86,7 +86,7 @@
 }
 
 static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
 }
 
@@ -96,7 +96,7 @@
  * Return nullptr if the runtime target cannot be proven.
  */
 static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (IsMethodOrDeclaringClassFinal(resolved_method)) {
     // No need to lookup further, the resolved method will be the target.
     return resolved_method;
@@ -164,7 +164,7 @@
 static uint32_t FindMethodIndexIn(ArtMethod* method,
                                   const DexFile& dex_file,
                                   uint32_t referrer_index)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) {
     return method->GetDexMethodIndex();
   } else {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index f7a8486..7f446d4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1461,7 +1461,7 @@
   typedef Handle<mirror::Class> TypeHandle;
 
   static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (type_handle->IsObjectClass()) {
       // Override the type handle to be consistent with the case when we get to
       // Top but don't have the Object class available. It avoids having to guess
@@ -1478,13 +1478,13 @@
 
   bool IsExact() const { return is_exact_; }
   bool IsTop() const { return is_top_; }
-  bool IsInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return !IsTop() && GetTypeHandle()->IsInterface();
   }
 
   Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
 
-  bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
     if (IsTop()) {
       // Top (equivalent for java.lang.Object) is supertype of anything.
       return true;
@@ -1499,7 +1499,7 @@
   // Returns true if the type information provide the same amount of details.
   // Note that it does not mean that the instructions have the same actual type
   // (e.g. tops are equal but they can be the result of a merge).
-  bool IsEqual(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (IsExact() != rti.IsExact()) {
       return false;
     }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b8ce04e..1c0123e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -256,7 +256,7 @@
   }
 
   uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
         InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
   }
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 17cfed4..11f5ac9 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -42,8 +42,8 @@
  private:
   void VisitPhi(HPhi* phi);
   void VisitBasicBlock(HBasicBlock* block);
-  void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
+  void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
   void BoundTypeForIfNotNull(HBasicBlock* block);
   void BoundTypeForIfInstanceOf(HBasicBlock* block);
   void ProcessWorklist();
@@ -54,7 +54,7 @@
   bool UpdateReferenceTypeInfo(HInstruction* instr);
 
   ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   StackHandleScopeCollection* handles_;
 
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index bdab279..9fb2245 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@
 // Create code that will invoke the function held in thread local storage.
 const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
                                                ThreadOffset<4> entry_point_offset)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
                                                ThreadOffset<8> entry_point_offset)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 }  // namespace art
 
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 325ee4f..42ed881 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -143,7 +143,6 @@
     LOG(ERROR) << "Unable to mmap new swap file chunk.";
     LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
     LOG(ERROR) << "Free list:";
-    MutexLock lock(Thread::Current(), lock_);
     DumpFreeMap(free_by_size_);
     LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
     LOG(FATAL) << "Aborting...";
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 691df4a..f7c772d 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -60,15 +60,15 @@
  public:
   SwapSpace(int fd, size_t initial_size);
   ~SwapSpace();
-  void* Alloc(size_t size) LOCKS_EXCLUDED(lock_);
-  void Free(void* ptr, size_t size) LOCKS_EXCLUDED(lock_);
+  void* Alloc(size_t size) REQUIRES(!lock_);
+  void Free(void* ptr, size_t size) REQUIRES(!lock_);
 
   size_t GetSize() {
     return size_;
   }
 
  private:
-  SpaceChunk NewFileChunk(size_t min_size);
+  SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
 
   int fd_;
   size_t size_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 74ec2ed..bffb3b5 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1660,7 +1660,7 @@
 
   // Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file.
   bool CreateImageFile()
-      LOCKS_EXCLUDED(Locks::mutator_lock_) {
+      REQUIRES(!Locks::mutator_lock_) {
     CHECK(image_writer_ != nullptr);
     if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) {
       LOG(ERROR) << "Failed to create image file " << image_filename_;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index dce5206..304d4e5 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -56,7 +56,7 @@
         image_location_(image_location),
         image_diff_pid_(image_diff_pid) {}
 
-  bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
     std::ostream& os = *os_;
     os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
 
@@ -92,7 +92,7 @@
     return str.substr(idx + 1);
   }
 
-  bool DumpImageDiff(pid_t image_diff_pid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool DumpImageDiff(pid_t image_diff_pid) SHARED_REQUIRES(Locks::mutator_lock_) {
     std::ostream& os = *os_;
 
     {
@@ -140,7 +140,7 @@
 
     // Look at /proc/$pid/mem and only diff the things from there
   bool DumpImageDiffMap(pid_t image_diff_pid, const backtrace_map_t& boot_map)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
     std::ostream& os = *os_;
     const size_t pointer_size = InstructionSetPointerSize(
         Runtime::Current()->GetInstructionSet());
@@ -683,7 +683,7 @@
   }
 
   static std::string GetClassDescriptor(mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(klass != nullptr);
 
     std::string descriptor;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9325454..b8b6a5f 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -499,7 +499,7 @@
     return oat_file_.GetOatHeader().GetInstructionSet();
   }
 
-  const void* GetQuickOatCode(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < oat_dex_files_.size(); i++) {
       const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
       CHECK(oat_dex_file != nullptr);
@@ -1462,7 +1462,7 @@
         image_header_(image_header),
         oat_dumper_options_(oat_dumper_options) {}
 
-  bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
     std::ostream& os = *os_;
     std::ostream& indent_os = vios_.Stream();
 
@@ -1664,7 +1664,7 @@
 
  private:
   static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(type != nullptr);
     if (value == nullptr) {
       os << StringPrintf("null   %s\n", PrettyDescriptor(type).c_str());
@@ -1681,7 +1681,7 @@
   }
 
   static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     os << StringPrintf("%s: ", field->GetName());
     switch (field->GetTypeAsPrimitiveType()) {
       case Primitive::kPrimLong:
@@ -1734,7 +1734,7 @@
   }
 
   static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Class* super = klass->GetSuperClass();
     if (super != nullptr) {
       DumpFields(os, obj, super);
@@ -1750,7 +1750,7 @@
   }
 
   const void* GetQuickOatCodeBegin(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
         InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()));
     if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
@@ -1763,7 +1763,7 @@
   }
 
   uint32_t GetQuickOatCodeSize(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m));
     if (oat_code_begin == nullptr) {
       return 0;
@@ -1772,7 +1772,7 @@
   }
 
   const void* GetQuickOatCodeEnd(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m));
     if (oat_code_begin == nullptr) {
       return nullptr;
@@ -1780,7 +1780,7 @@
     return oat_code_begin + GetQuickOatCodeSize(m);
   }
 
-  static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     DCHECK(arg != nullptr);
     ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
@@ -1882,7 +1882,7 @@
   }
 
   void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(method != nullptr);
     const auto image_pointer_size =
         InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
@@ -2070,7 +2070,7 @@
     }
 
     void DumpOutliers(std::ostream& os)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       size_t sum_of_sizes = 0;
       size_t sum_of_sizes_squared = 0;
       size_t sum_of_expansion = 0;
@@ -2171,7 +2171,7 @@
     }
 
     void Dump(std::ostream& os, std::ostream& indent_os)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       {
         os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n"
            << "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index dbd1d23..1ed6597 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -445,7 +445,7 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
     }
@@ -453,7 +453,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
     }
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 23abca8..6da516c 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -94,16 +94,16 @@
                                         bool new_oat_out);  // Output oat was newly created?
 
   static void BitmapCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
   }
 
   void VisitObject(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupMethod(ArtMethod* object, ArtMethod* copy)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupNativePointerArray(mirror::PointerArray* object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool InHeap(mirror::Object*);
 
   // Patches oat in place, modifying the oat_file given to the constructor.
@@ -113,13 +113,13 @@
   template <typename ElfFileImpl>
   bool PatchOatHeader(ElfFileImpl* oat_file);
 
-  bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool PatchImage() SHARED_REQUIRES(Locks::mutator_lock_);
+  void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+  void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchInternedStrings(const ImageHeader* image_header)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool WriteElf(File* out);
   bool WriteImage(File* out);
@@ -177,10 +177,10 @@
     PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
     ~PatchVisitor() {}
     void operator() (mirror::Object* obj, MemberOffset off, bool b) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
     // For reference classes.
     void operator() (mirror::Class* cls, mirror::Reference* ref) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   private:
     PatchOat* const patcher_;
     mirror::Object* const copy_;
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index a58aecb..77bb5c8 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -35,7 +35,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 0383ad6..1c99f3c 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -35,7 +35,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(SP, new_sp);
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index f86f9ae..9ef761e 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -42,7 +42,7 @@
   // Reads values from callee saves in the given frame. The frame also holds
   // the method that holds the layout.
   virtual void FillCalleeSaves(const StackVisitor& fr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Sets the stack pointer value.
   virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index d01b95e..38cf29a 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -34,7 +34,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index ebc036c..e4a144f 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -34,7 +34,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(SP, new_sp);
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 05b42f5..0831c26 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1557,7 +1557,7 @@
 
 static void GetSetBooleanStatic(ArtField* f, Thread* self,
                                 ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   constexpr size_t num_values = 5;
@@ -1588,7 +1588,7 @@
 }
 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                              StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1619,7 +1619,7 @@
 
 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
                                   ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1654,7 +1654,7 @@
 }
 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1689,7 +1689,7 @@
 
 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                              StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1719,7 +1719,7 @@
 }
 static void GetSetShortStatic(ArtField* f, Thread* self,
                               ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1750,7 +1750,7 @@
 
 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
                                Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1784,7 +1784,7 @@
 }
 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1819,7 +1819,7 @@
 
 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
                            StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1855,7 +1855,7 @@
 
 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1896,7 +1896,7 @@
 
 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
                                  ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
                             reinterpret_cast<size_t>(val),
                             0U,
@@ -1916,7 +1916,7 @@
 
 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                             StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1940,7 +1940,7 @@
 static void set_and_check_instance(ArtField* f, mirror::Object* trg,
                                    mirror::Object* val, Thread* self, ArtMethod* referrer,
                                    StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
                             reinterpret_cast<size_t>(trg),
                             reinterpret_cast<size_t>(val),
@@ -1963,7 +1963,7 @@
 
 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
                               Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
@@ -1986,7 +1986,7 @@
 
 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
                            StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
     defined(__aarch64__)
   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -2017,7 +2017,7 @@
 
 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
     defined(__aarch64__)
   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a783d48..c4a11d8 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -34,7 +34,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(ESP, new_sp);
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index c9b0ff6..30bb9ec 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -34,7 +34,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     SetGPR(RSP, new_sp);
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 73beb1f..5138cc9 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -253,7 +253,7 @@
   SetObj<kTransactionActive>(object, l);
 }
 
-inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t field_index = GetDexFieldIndex();
   if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
     DCHECK(IsStatic());
@@ -264,7 +264,7 @@
   return dex_file->GetFieldName(dex_file->GetFieldId(field_index));
 }
 
-inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t field_index = GetDexFieldIndex();
   if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
     DCHECK(IsStatic());
@@ -278,11 +278,11 @@
 }
 
 inline Primitive::Type ArtField::GetTypeAsPrimitiveType()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return Primitive::GetType(GetTypeDescriptor()[0]);
 }
 
-inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) {
   return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
 }
 
@@ -304,15 +304,15 @@
   return type;
 }
 
-inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) {
   return Primitive::ComponentSize(GetTypeAsPrimitiveType());
 }
 
-inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) {
   return GetDeclaringClass()->GetDexCache();
 }
 
-inline const DexFile* ArtField::GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) {
   return GetDexCache()->GetDexFile();
 }
 
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 7a03723..1a0ee0f 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -42,27 +42,27 @@
  public:
   ArtField();
 
-  mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetDeclaringClass(mirror::Class *new_declaring_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     access_flags_ = new_access_flags;
   }
 
-  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
-  bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
-  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
@@ -76,115 +76,115 @@
   }
 
   // Offset to field within an Object.
-  MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset OffsetOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
   }
 
-  MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // field access, null object for static fields
-  uint8_t GetBoolean(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetBoolean(mirror::Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int8_t GetByte(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetByte(mirror::Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint16_t GetChar(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetChar(mirror::Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int16_t GetShort(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetShort(mirror::Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t GetInt(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetInt(mirror::Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int64_t GetLong(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetLong(mirror::Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  float GetFloat(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetFloat(mirror::Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  double GetDouble(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void SetDouble(mirror::Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Object* GetObject(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
   void SetObject(mirror::Object* object, mirror::Object* l)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Raw field accesses.
-  uint32_t Get32(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
   void Set32(mirror::Object* object, uint32_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint64_t Get64(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void Set64(mirror::Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Object* GetObj(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
   void SetObj(mirror::Object* object, mirror::Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<typename RootVisitorType>
-  void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootVisitorType& visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
   // Returns an instance field with this offset in the given class or null if not found.
   static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Returns a static field with this offset in the given class or null if not found.
   static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolves / returns the name from the dex cache.
   mirror::String* GetStringName(Thread* self, bool resolve)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kResolve>
-  mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
 
   GcRoot<mirror::Class>& DeclaringClassRoot() {
     return declaring_class_;
@@ -192,11 +192,11 @@
 
  private:
   mirror::Class* ProxyFindSystemClass(const char* descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
                                        mirror::DexCache* dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   GcRoot<mirror::Class> declaring_class_;
 
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 7673418..17c9fe4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -94,7 +94,7 @@
 }
 
 static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedAssertNoThreadSuspension ants(Thread::Current(), "HasSameNameAndSignature");
   const DexFile* dex_file = method1->GetDexFile();
   const DexFile::MethodId& mid = dex_file->GetMethodId(method1->GetDexMethodIndex());
@@ -455,7 +455,7 @@
 // Counts the number of references in the parameter list of the corresponding method.
 // Note: Thus does _not_ include "this" for non-static methods.
 static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t shorty_len;
   const char* shorty = method->GetShorty(&shorty_len);
   uint32_t refs = 0;
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 4169c5e..a5bd2f0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -54,24 +54,24 @@
 
   static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
                                         jobject jlr_method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetDeclaringClass(mirror::Class *new_declaring_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset DeclaringClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
-  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetAccessFlags(uint32_t new_access_flags) {
     // Not called within a transaction.
@@ -79,35 +79,35 @@
   }
 
   // Approximate what kind of method call would be used for this method.
-  InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the method is declared public.
-  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the method is declared private.
-  bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPrivate) != 0;
   }
 
   // Returns true if the method is declared static.
-  bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
   // Returns true if the method is a constructor.
-  bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccConstructor) != 0;
   }
 
   // Returns true if the method is a class initializer.
-  bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsConstructor() && IsStatic();
   }
 
   // Returns true if the method is static, private, or a constructor.
-  bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsDirect(GetAccessFlags());
   }
 
@@ -116,56 +116,56 @@
   }
 
   // Returns true if the method is declared synchronized.
-  bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
     return (GetAccessFlags() & synchonized) != 0;
   }
 
-  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccMiranda) != 0;
   }
 
-  bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccNative) != 0;
   }
 
-  bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool ShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccDontInline) != 0;
   }
 
-  void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) {
     SetAccessFlags(GetAccessFlags() | kAccDontInline);
   }
 
-  bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t mask = kAccFastNative | kAccNative;
     return (GetAccessFlags() & mask) == mask;
   }
 
-  bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
-  bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPreverified) != 0;
   }
 
-  void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!IsPreverified());
     SetAccessFlags(GetAccessFlags() | kAccPreverified);
   }
 
-  bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Temporary solution for detecting if a method has been optimized: the compiler
     // does not create a GC map. Instead, the vmap table contains the stack map
     // (as in stack_map.h).
@@ -175,18 +175,18 @@
         && GetNativeGcMap(pointer_size) == nullptr;
   }
 
-  bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Doesn't do erroneous / unresolved class checks.
-  uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetMethodIndex();
   }
 
-  void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     method_index_ = new_method_index;
   }
@@ -211,7 +211,7 @@
   // Number of 32bit registers that would be required to hold all the arguments
   static size_t NumArgRegisters(const StringPiece& shorty);
 
-  ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetDexMethodIndex(uint32_t new_idx) {
     // Not called within a transaction.
@@ -227,36 +227,36 @@
   }
 
   ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method,
                                                size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool HasDexCacheResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_);
   bool HasSameDexCacheResolvedMethods(ArtMethod* other)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kWithCheck = true>
   mirror::Class* GetDexCacheResolvedType(uint32_t type_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool HasDexCacheResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
   bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the Class* from the type index into this method's dex cache.
   mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find the method that this method overrides.
-  ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find the method index for this method within other_dexfile. If this method isn't present then
   // return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
@@ -264,10 +264,10 @@
   // in the other_dexfile.
   uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
                                             uint32_t name_and_signature_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   const void* GetEntryPointFromQuickCompiledCode() {
     return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
@@ -287,7 +287,7 @@
                   entry_point_from_quick_compiled_code, pointer_size);
   }
 
-  uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Check whether the given PC is within the quick compiled code associated with this method's
   // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
@@ -297,12 +297,12 @@
         reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
   }
 
-  void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the entrypoint points to the interpreter, as
   // opposed to the compiled code, that is, this method will be
   // interpretered on invocation.
-  bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_);
 
   uint32_t GetQuickOatCodeOffset();
   void SetQuickOatCodeOffset(uint32_t code_offset);
@@ -317,37 +317,37 @@
 
   // Actual entry point pointer to compiled oat code or null.
   const void* GetQuickOatEntryPoint(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Actual pointer to compiled oat code or null.
   const void* GetQuickOatCodePointer(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
   }
 
   // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
   const uint8_t* GetMappingTable(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
   const uint8_t* GetVmapTable(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const uint8_t* GetQuickenedInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
   const uint8_t* GetNativeGcMap(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kCheckFrameSize = true>
-  uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
     if (kCheckFrameSize) {
       DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
@@ -355,30 +355,30 @@
     return result;
   }
 
-  QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
   QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetReturnPcOffset(GetFrameSizeInBytes());
   }
 
   FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
     return FrameOffset(frame_size_in_bytes - sizeof(void*));
   }
 
-  FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
     constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
     DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
     return FrameOffset(handle_scope_offset);
   }
 
   void RegisterNative(const void* native_method, bool is_fast)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
@@ -397,7 +397,7 @@
     return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
   }
 
-  void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
   }
   ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
@@ -409,34 +409,34 @@
   ALWAYS_INLINE bool IsRuntimeMethod();
 
   // Is this a hand crafted method used for something like describing callee saves?
-  bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
 #ifdef NDEBUG
   uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
   }
 #else
   uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 #endif
 
   // Converts a native PC to a dex PC.
   uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Converts a dex PC to a native PC.
   uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
     return MethodReference(GetDexFile(), GetDexMethodIndex());
   }
 
@@ -445,63 +445,63 @@
   // a move-exception instruction is present.
   uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
                           bool* has_no_move_exception)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<typename RootVisitorType>
-  void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootVisitorType& visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t unused_length;
     return GetShorty(&unused_length);
   }
 
-  const char* GetShorty(uint32_t* out_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::ProtoId& GetPrototype() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::TypeList* GetParameterTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint16_t GetClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::ClassDef& GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
 
   const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
   // number of bugs at call sites.
-  mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Class* GetReturnType(bool resolve = true) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // May cause thread suspension due to class resolution.
   bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Size of an instance of this object.
   static size_t ObjectSize(size_t pointer_size) {
@@ -510,10 +510,10 @@
   }
 
   void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 0e7f61e..02f9f58 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -39,10 +39,10 @@
   virtual ~Barrier();
 
   // Pass through the barrier, decrement the count but do not block.
-  void Pass(Thread* self);
+  void Pass(Thread* self) REQUIRES(!lock_);
 
   // Wait on the barrier, decrement the count.
-  void Wait(Thread* self);
+  void Wait(Thread* self) REQUIRES(!lock_);
 
   // The following three calls are only safe if we somehow know that no other thread both
   // - has been woken up, and
@@ -51,18 +51,18 @@
   // to sleep, resulting in a deadlock.
 
   // Increment the count by delta, wait on condition if count is non zero.
-  void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_);
+  void Increment(Thread* self, int delta) REQUIRES(!lock_);;
 
   // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
   // true if time out occurred.
-  bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
+  bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_);
 
   // Set the count to a new value.  This should only be used if there is no possibility that
   // another thread is still in Wait().  See above.
-  void Init(Thread* self, int count);
+  void Init(Thread* self, int count) REQUIRES(!lock_);
 
  private:
-  void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void SetCountLocked(Thread* self, int count) REQUIRES(lock_);
 
   // Counter, when this reaches 0 all people blocked on the barrier are signalled.
   int count_ GUARDED_BY(lock_);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index d977941..c4b36ee 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -182,12 +182,12 @@
  public:
   explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
   ~ArenaPool();
-  Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
-  void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
-  size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
+  Arena* AllocArena(size_t size) REQUIRES(!lock_);
+  void FreeArenaChain(Arena* first) REQUIRES(!lock_);
+  size_t GetBytesAllocated() const REQUIRES(!lock_);
   // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
   // use_malloc is false.
-  void TrimMaps() LOCKS_EXCLUDED(lock_);
+  void TrimMaps() REQUIRES(!lock_);
 
  private:
   const bool use_malloc_;
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 93d4edc..2cd1a4d 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -237,7 +237,7 @@
  public:
   LogMessage(const char* file, unsigned int line, LogSeverity severity, int error);
 
-  ~LogMessage();  // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_).
+  ~LogMessage();  // TODO: enable REQUIRES(!Locks::logging_lock_).
 
   // Returns the stream associated with the message, the LogMessage performs output when it goes
   // out of scope.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 5c59647..1d5dee2 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -244,18 +244,14 @@
 
 #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
 #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
-#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
 #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
 #define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
-#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
 #define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
-#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
 #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
 #define PT_GUARDED_BY(x)
 // THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
 #define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
 #define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
-#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
 
 #if defined(__clang__)
 #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
@@ -263,12 +259,43 @@
 #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
 #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
 #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
+#define SHARED_REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
+#define CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(capability(__VA_ARGS__))
+#define SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_capability(__VA_ARGS__))
+#define ASSERT_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(__VA_ARGS__))
+#define ASSERT_SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(__VA_ARGS__))
+#define RETURN_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
+#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
 #else
 #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__))
 #define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__))
 #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__))
 #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__))
 #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__))
+#define REQUIRES(...)
+#define SHARED_REQUIRES(...)
+#define CAPABILITY(...)
+#define SHARED_CAPABILITY(...)
+#define ASSERT_CAPABILITY(...)
+#define ASSERT_SHARED_CAPABILITY(...)
+#define RETURN_CAPABILITY(...)
+#define TRY_ACQUIRE(...)
+#define TRY_ACQUIRE_SHARED(...)
+#define ACQUIRE(...)
+#define ACQUIRE_SHARED(...)
+#define RELEASE(...)
+#define RELEASE_SHARED(...)
+#define SCOPED_CAPABILITY
 #endif
 
+#define LOCKABLE CAPABILITY("mutex")
+#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex")
+
 #endif  // ART_RUNTIME_BASE_MACROS_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 5b258e5..6f82f28 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -43,8 +43,8 @@
 
 namespace art {
 
-class LOCKABLE ReaderWriterMutex;
-class LOCKABLE MutatorMutex;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
 class ScopedContentionRecorder;
 class Thread;
 
@@ -214,35 +214,37 @@
   virtual bool IsMutex() const { return true; }
 
   // Block until mutex is free then acquire exclusive access.
-  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
-  void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
+  void ExclusiveLock(Thread* self) ACQUIRE();
+  void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
 
   // Returns true if acquires exclusive access, false otherwise.
-  bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
-  bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
+  bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
+  bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
 
   // Release exclusive access.
-  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
-  void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
+  void ExclusiveUnlock(Thread* self) RELEASE();
+  void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
 
   // Is the current thread the exclusive holder of the Mutex.
   bool IsExclusiveHeld(const Thread* self) const;
 
   // Assert that the Mutex is exclusively held by the current thread.
-  void AssertExclusiveHeld(const Thread* self) {
+  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
     if (kDebugLocking && (gAborting == 0)) {
       CHECK(IsExclusiveHeld(self)) << *this;
     }
   }
-  void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
+  void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
 
   // Assert that the Mutex is not held by the current thread.
-  void AssertNotHeldExclusive(const Thread* self) {
+  void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
     if (kDebugLocking && (gAborting == 0)) {
       CHECK(!IsExclusiveHeld(self)) << *this;
     }
   }
-  void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
+  void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
+    AssertNotHeldExclusive(self);
+  }
 
   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
   // than the owner.
@@ -255,6 +257,9 @@
 
   virtual void Dump(std::ostream& os) const;
 
+  // For negative capabilities in clang annotations.
+  const Mutex& operator!() const { return *this; }
+
  private:
 #if ART_USE_FUTEXES
   // 0 is unheld, 1 is held.
@@ -290,7 +295,7 @@
 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
 // * for large values of n the SharedLock may block.
 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
-class LOCKABLE ReaderWriterMutex : public BaseMutex {
+class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
  public:
   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
   ~ReaderWriterMutex();
@@ -298,12 +303,12 @@
   virtual bool IsReaderWriterMutex() const { return true; }
 
   // Block until ReaderWriterMutex is free then acquire exclusive access.
-  void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
-  void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
+  void ExclusiveLock(Thread* self) ACQUIRE();
+  void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
 
   // Release exclusive access.
-  void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
-  void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
+  void ExclusiveUnlock(Thread* self) RELEASE();
+  void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
 
   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
   // or false if timeout is reached.
@@ -313,15 +318,15 @@
 #endif
 
   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
-  void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
-  void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
+  void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
+  void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
 
   // Try to acquire share of ReaderWriterMutex.
-  bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
 
   // Release a share of the access.
-  void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
-  void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
+  void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
+  void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
 
   // Is the current thread the exclusive holder of the ReaderWriterMutex.
   bool IsExclusiveHeld(const Thread* self) const;
@@ -368,6 +373,9 @@
 
   virtual void Dump(std::ostream& os) const;
 
+  // For negative capabilities in clang annotations.
+  const ReaderWriterMutex& operator!() const { return *this; }
+
  private:
 #if ART_USE_FUTEXES
   // Out-of-inline path for handling contention for a SharedLock.
@@ -402,7 +410,7 @@
 // suspended states before exclusive ownership of the mutator mutex is sought.
 //
 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
-class LOCKABLE MutatorMutex : public ReaderWriterMutex {
+class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
  public:
   explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
     : ReaderWriterMutex(name, level) {}
@@ -410,6 +418,9 @@
 
   virtual bool IsMutatorMutex() const { return true; }
 
+  // For negative capabilities in clang annotations.
+  const MutatorMutex& operator!() const { return *this; }
+
  private:
   friend class Thread;
   void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
@@ -458,7 +469,7 @@
 
 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
 // upon destruction.
-class SCOPED_LOCKABLE MutexLock {
+class SCOPED_CAPABILITY MutexLock {
  public:
   explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
     mu_.ExclusiveLock(self_);
@@ -478,7 +489,7 @@
 
 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
 // construction and releases it upon destruction.
-class SCOPED_LOCKABLE ReaderMutexLock {
+class SCOPED_CAPABILITY ReaderMutexLock {
  public:
   explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
       self_(self), mu_(mu) {
@@ -500,7 +511,7 @@
 
 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
 // construction and releases it upon destruction.
-class SCOPED_LOCKABLE WriterMutexLock {
+class SCOPED_CAPABILITY WriterMutexLock {
  public:
   explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
       self_(self), mu_(mu) {
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 3750c81..340550f 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -101,18 +101,18 @@
       : mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable", mu) {
   }
 
-  static void* Callback(void* arg) {
-    RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
-    state->mu.Lock(Thread::Current());
-    state->cv.Signal(Thread::Current());
-    state->mu.Unlock(Thread::Current());
-    return nullptr;
-  }
-
   Mutex mu;
   ConditionVariable cv;
 };
 
+static void* RecursiveLockWaitCallback(void* arg) {
+  RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg);
+  state->mu.Lock(Thread::Current());
+  state->cv.Signal(Thread::Current());
+  state->mu.Unlock(Thread::Current());
+  return nullptr;
+}
+
 // GCC has trouble with our mutex tests, so we have to turn off thread safety analysis.
 static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS {
   RecursiveLockWait state;
@@ -120,8 +120,7 @@
   state.mu.Lock(Thread::Current());
 
   pthread_t pthread;
-  int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
-                                             &state);
+  int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWaitCallback, &state);
   ASSERT_EQ(0, pthread_create_result);
 
   state.cv.Wait(Thread::Current());
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index b300109..e10cd24 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -33,17 +33,17 @@
   explicit CumulativeLogger(const std::string& name);
   ~CumulativeLogger();
   void Start();
-  void End() LOCKS_EXCLUDED(lock_);
-  void Reset() LOCKS_EXCLUDED(lock_);
-  void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_);
+  void End() REQUIRES(!lock_);
+  void Reset() REQUIRES(!lock_);
+  void Dump(std::ostream& os) const REQUIRES(!lock_);
   uint64_t GetTotalNs() const {
     return GetTotalTime() * kAdjust;
   }
   // Allow the name to be modified, particularly when the cumulative logger is a field within a
   // parent class that is unable to determine the "name" of a sub-class.
-  void SetName(const std::string& name) LOCKS_EXCLUDED(lock_);
-  void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
-  size_t GetIterations() const;
+  void SetName(const std::string& name) REQUIRES(!lock_);
+  void AddLogger(const TimingLogger& logger) REQUIRES(!lock_);
+  size_t GetIterations() const REQUIRES(!lock_);
 
  private:
   class HistogramComparator {
@@ -58,8 +58,8 @@
   static constexpr size_t kInitialBucketSize = 50;  // 50 microseconds.
 
   void AddPair(const std::string &label, uint64_t delta_time)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      REQUIRES(lock_);
+  void DumpHistogram(std::ostream &os) const REQUIRES(lock_);
   uint64_t GetTotalTime() const {
     return total_time_;
   }
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 0ae32f4..40b3669 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -155,7 +155,7 @@
    * Assumes "jobj" has already been validated.
    */
   bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
     if (o == nullptr) {
       AbortF("field operation on NULL object: %p", java_object);
@@ -199,7 +199,7 @@
    */
   bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
                          jmethodID mid, Primitive::Type type, InvokeType invoke)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
       return false;
@@ -246,7 +246,7 @@
    * Assumes "java_class" has already been validated.
    */
   bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
     ArtField* f = CheckFieldID(soa, fid);
     if (f == nullptr) {
@@ -269,7 +269,7 @@
    * Instances of "java_class" must be instances of the method's declaring class.
    */
   bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
       return false;
@@ -290,7 +290,7 @@
    * will be handled automatically by the instanceof check.)
    */
   bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
       return false;
@@ -343,7 +343,7 @@
    * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
    */
   bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* traceMethod = nullptr;
     if (has_method_ && soa.Vm()->IsTracingEnabled()) {
       // We need to guard some of the invocation interface's calls: a bad caller might
@@ -443,7 +443,7 @@
   }
 
   bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
     if (method == nullptr) {
       AbortF("expected non-null method");
@@ -461,7 +461,7 @@
   }
 
   bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* method = soa.DecodeMethod(mid);
     if (method == nullptr) {
       AbortF("expected non-null constructor");
@@ -475,7 +475,7 @@
   }
 
   bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
     if (field == nullptr) {
       AbortF("expected non-null java.lang.reflect.Field");
@@ -491,7 +491,7 @@
   }
 
   bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
     if (!obj->GetClass()->IsThrowableClass()) {
       AbortF("expected java.lang.Throwable but got object of type "
@@ -502,7 +502,7 @@
   }
 
   bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Class* c = soa.Decode<mirror::Class*>(jc);
     if (!c->IsThrowableClass()) {
       AbortF("expected java.lang.Throwable class but got object of "
@@ -533,7 +533,7 @@
   }
 
   bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Class* c = soa.Decode<mirror::Class*>(jc);
     if (!c->IsInstantiableNonArray()) {
       AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
@@ -543,7 +543,7 @@
   }
 
   bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!CheckArray(soa, array)) {
       return false;
     }
@@ -558,7 +558,7 @@
 
   bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
                         Primitive::Type type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
       return false;
     }
@@ -619,7 +619,7 @@
    * to "running" mode before doing the checks.
    */
   bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const char* what = nullptr;
     switch (kind) {
     case kClass:
@@ -715,7 +715,7 @@
   }
 
   bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     switch (fmt) {
       case 'a':  // jarray
         return CheckArray(soa, arg.a);
@@ -785,7 +785,7 @@
 
   void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
                               std::string* msg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     switch (fmt) {
       case 'L':  // jobject fall-through.
       case 'a':  // jarray fall-through.
@@ -946,7 +946,7 @@
    * Since we're dealing with objects, switch to "running" mode.
    */
   bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(java_array == nullptr)) {
       AbortF("jarray was NULL");
       return false;
@@ -983,7 +983,7 @@
   }
 
   ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (fid == nullptr) {
       AbortF("jfieldID was NULL");
       return nullptr;
@@ -999,7 +999,7 @@
   }
 
   ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (mid == nullptr) {
       AbortF("jmethodID was NULL");
       return nullptr;
@@ -1014,7 +1014,7 @@
     return m;
   }
 
-  bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     if (self == nullptr) {
       AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
@@ -2670,7 +2670,7 @@
 
   static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
                             jclass c, jmethodID mid, InvokeType invoke)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     bool checked;
     switch (invoke) {
       case kVirtual: {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 504b753..3155b51 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -28,10 +28,10 @@
 // holding references.
 class CheckReferenceMapVisitor : public StackVisitor {
  public:
-  explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsCalleeSaveMethod() || m->IsNative()) {
       CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
@@ -52,7 +52,7 @@
   }
 
   void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (GetMethod()->IsOptimized(sizeof(void*))) {
       CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
     } else {
@@ -62,7 +62,7 @@
 
  private:
   void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     CodeInfo code_info = m->GetOptimizedCodeInfo();
     StackMapEncoding encoding = code_info.ExtractEncoding();
@@ -104,7 +104,7 @@
   }
 
   void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
     const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 21b63c6..11901b3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -187,7 +187,7 @@
 }
 
 inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(!class_roots_.IsNull());
   mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
   mirror::Class* klass = class_roots->Get(class_root);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8f7862a..e0be5af 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -91,7 +91,7 @@
 
 static void ThrowNoClassDefFoundError(const char* fmt, ...)
     __attribute__((__format__(__printf__, 1, 2)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 static void ThrowNoClassDefFoundError(const char* fmt, ...) {
   va_list args;
   va_start(args, fmt);
@@ -100,14 +100,12 @@
   va_end(args);
 }
 
-bool ClassLinker::HasInitWithString(
-    Thread* self, ClassLinker* class_linker, const char* descriptor) {
+bool ClassLinker::HasInitWithString(Thread* self, const char* descriptor) {
   ArtMethod* method = self->GetCurrentMethod(nullptr);
   StackHandleScope<1> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
-      method->GetDeclaringClass()->GetClassLoader()
-      : nullptr));
-  mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader);
+      method->GetDeclaringClass()->GetClassLoader() : nullptr));
+  mirror::Class* exception_class = FindClass(self, descriptor, class_loader);
 
   if (exception_class == nullptr) {
     // No exc class ~ no <init>-with-string.
@@ -144,7 +142,7 @@
       std::string temp;
       const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp);
 
-      if (HasInitWithString(self, this, descriptor)) {
+      if (HasInitWithString(self, descriptor)) {
         self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str());
       } else {
         self->ThrowNewException(descriptor, nullptr);
@@ -157,7 +155,7 @@
 }
 
 static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (VLOG_IS_ON(class_linker)) {
     std::string temp;
     LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
@@ -166,7 +164,7 @@
 }
 
 static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   JNIEnv* env = self->GetJniEnv();
 
@@ -228,7 +226,7 @@
                            MemberOffset* field_offset,
                            std::deque<ArtField*>* grouped_and_sorted_fields,
                            FieldGaps* gaps)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(current_field_idx != nullptr);
   DCHECK(grouped_and_sorted_fields != nullptr);
   DCHECK(gaps != nullptr);
@@ -1021,7 +1019,7 @@
 
 static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class,
                                  gc::space::ImageSpace* space)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (m->IsRuntimeMethod()) {
     CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
   } else if (m->IsMiranda()) {
@@ -1039,7 +1037,7 @@
 
 static void SanityCheckArtMethodPointerArray(
     mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size,
-    gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    gc::space::ImageSpace* space) SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(arr != nullptr);
   for (int32_t j = 0; j < arr->GetLength(); ++j) {
     auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
@@ -1054,7 +1052,7 @@
 }
 
 static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(obj != nullptr);
   CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
   CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
@@ -1374,7 +1372,7 @@
 };
 
 static bool GetClassesVisitorArray(mirror::Class* c, void* varg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg);
   if (arg->index < (*arg->classes)->GetLength()) {
     (*arg->classes)->Set(arg->index, c);
@@ -1597,7 +1595,7 @@
 
 static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                               mirror::ClassLoader* class_loader)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return class_loader == nullptr ||
       class_loader->GetClass() ==
           soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader);
@@ -2118,7 +2116,7 @@
 
 // Returns true if the method must run with interpreter, false otherwise.
 static bool NeedsInterpreter(ArtMethod* method, const void* quick_code)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (quick_code == nullptr) {
     // No code: need interpreter.
     // May return true for native code, in the case of generic JNI
@@ -2906,7 +2904,7 @@
 }
 
 static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
   CHECK(image != nullptr);
   mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
@@ -3277,14 +3275,13 @@
   }
   const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       // Ensure exception types are resolved so that they don't need resolution to be delivered,
       // unresolved exception types will be ignored by exception delivery
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
-        mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method);
+        mirror::Class* exception_type = ResolveType(iterator.GetHandlerTypeIndex(), method);
         if (exception_type == nullptr) {
           DCHECK(Thread::Current()->IsExceptionPending());
           Thread::Current()->ClearException();
@@ -3780,7 +3777,7 @@
 
 bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                                          ObjectLock<mirror::Class>& lock)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   while (true) {
     self->AssertNoPendingException();
     CHECK(!klass->IsInitialized());
@@ -3824,7 +3821,7 @@
                                                           Handle<mirror::Class> super_klass,
                                                           ArtMethod* method,
                                                           ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(Thread::Current()->IsExceptionPending());
   DCHECK(!m->IsProxyMethod());
   const DexFile* dex_file = m->GetDexFile();
@@ -3848,7 +3845,7 @@
                                                    ArtMethod* method,
                                                    ArtMethod* m,
                                                    uint32_t index, uint32_t arg_type_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(Thread::Current()->IsExceptionPending());
   DCHECK(!m->IsProxyMethod());
   const DexFile* dex_file = m->GetDexFile();
@@ -3868,7 +3865,7 @@
                                    Handle<mirror::Class> super_klass,
                                    ArtMethod* method,
                                    const std::string& error_msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ThrowLinkageError(klass.Get(),
                     "Class %s method %s resolves differently in %s %s: %s",
                     PrettyDescriptor(klass.Get()).c_str(),
@@ -3883,7 +3880,7 @@
                                                       Handle<mirror::Class> super_klass,
                                                       ArtMethod* method1,
                                                       ArtMethod* method2)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType()));
@@ -4245,7 +4242,7 @@
                                   const DexFile& dex_file,
                                   const DexFile::ClassDef& class_def,
                                   mirror::Class* super_class)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Check for unexpected changes in the superclass.
   // Quick check 1) is the super_class class-loader the boot class loader? This always has
   // precedence.
@@ -4438,7 +4435,7 @@
 class MethodNameAndSignatureComparator FINAL : public ValueObject {
  public:
   explicit MethodNameAndSignatureComparator(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+      SHARED_REQUIRES(Locks::mutator_lock_) :
       dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
       name_(nullptr), name_len_(0) {
     DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
@@ -4452,7 +4449,7 @@
   }
 
   bool HasSameNameAndSignature(ArtMethod* other)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
     const DexFile* other_dex_file = other->GetDexFile();
     const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
@@ -4488,7 +4485,7 @@
        image_pointer_size_(image_pointer_size) {
     std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
   }
-  void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(
         virtual_method_index, image_pointer_size_);
     const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName();
@@ -4503,7 +4500,7 @@
     hash_table_[index] = virtual_method_index;
   }
   uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const char* name = comparator->GetName();
     uint32_t hash = ComputeModifiedUtf8Hash(name);
     size_t index = hash % hash_size_;
@@ -5105,7 +5102,7 @@
 }
 
 struct LinkFieldsComparator {
-  explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) {
   }
   // No thread safety analysis as will be called from STL. Checked lock held in constructor.
   bool operator()(ArtField* field1, ArtField* field2)
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b60cba4..0239b64 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -109,16 +109,16 @@
 
   // Initialize class linker by bootstraping from dex files.
   void InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Initialize class linker from one or more images.
-  void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void InitFromImage() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Finds a class by its descriptor, loading it if necessary.
   // If class_loader is null, searches boot_class_path_.
   mirror::Class* FindClass(Thread* self, const char* descriptor,
                            Handle<mirror::ClassLoader> class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
   // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
@@ -129,16 +129,16 @@
                                   Thread* self, const char* descriptor, size_t hash,
                                   Handle<mirror::ClassLoader> class_loader,
                                   mirror::Class** result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Finds a class by its descriptor using the "system" class loader, ie by searching the
   // boot_class_path_.
   mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Finds the array class given for the element class.
   mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Returns true if the class linker is initialized.
   bool IsInitialized() const {
@@ -149,65 +149,62 @@
   mirror::Class* DefineClass(Thread* self, const char* descriptor, size_t hash,
                              Handle<mirror::ClassLoader> class_loader,
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Finds a class by its descriptor, returning null if it isn't wasn't loaded
   // by the given 'class_loader'.
   mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
                              mirror::ClassLoader* class_loader)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds all the classes with the given descriptor, regardless of ClassLoader.
   void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // General class unloading is not supported, this is used to prune
   // unwanted classes during image writing.
   bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpAllClasses(int flags)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_);
 
   size_t NumLoadedClasses()
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache. The referrer is used to identify the
   // target DexCache and ClassLoader to use for resolution.
   mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache.
   mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
                                 Handle<mirror::DexCache> dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve a Type with the given index from the DexFile, storing the
   // result in the DexCache. The referrer is used to identity the
   // target DexCache and ClassLoader to use for resolution.
   mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Resolve a Type with the given index from the DexFile, storing the
   // result in the DexCache. The referrer is used to identify the
   // target DexCache and ClassLoader to use for resolution.
   mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Resolve a type with the given ID from the DexFile, storing the
   // result in DexCache. The ClassLoader is used to search for the
@@ -216,7 +213,7 @@
   mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
                              Handle<mirror::DexCache> dex_cache,
                              Handle<mirror::ClassLoader> class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Resolve a method with a given ID from the DexFile, storing the
   // result in DexCache. The ClassLinker and ClassLoader are used as
@@ -227,31 +224,29 @@
                            Handle<mirror::DexCache> dex_cache,
                            Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer,
                            InvokeType type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Resolve a field with a given ID from the DexFile, storing the
   // result in DexCache. The ClassLinker and ClassLoader are used as
   // in ResolveType. What is unique is the is_static argument which is
   // used to determine if we are resolving a static or non-static
   // field.
-  ArtField* ResolveField(const DexFile& dex_file,
-                                 uint32_t field_idx,
-                                 Handle<mirror::DexCache> dex_cache,
-                                 Handle<mirror::ClassLoader> class_loader,
-                                 bool is_static)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* ResolveField(const DexFile& dex_file, uint32_t field_idx,
+                         Handle<mirror::DexCache> dex_cache,
+                         Handle<mirror::ClassLoader> class_loader, bool is_static)
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Resolve a field with a given ID from the DexFile, storing the
   // result in DexCache. The ClassLinker and ClassLoader are used as
@@ -260,32 +255,30 @@
   ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
                             Handle<mirror::DexCache> dex_cache,
                             Handle<mirror::ClassLoader> class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Get shorty from method index without resolution. Used to do handlerization.
   const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true on success, false if there's an exception pending.
   // can_run_clinit=false allows the compiler to attempt to init a class,
   // given the restriction that no <clinit> execution is possible.
   bool EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields,
                          bool can_init_parents)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Initializes classes that have instances in the image but that have
   // <clinit> methods so they could not be initialized by the compiler.
-  void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RunRootClinits() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   void RegisterDexFile(const DexFile& dex_file)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   const OatFile* RegisterOatFile(const OatFile* oat_file)
-      LOCKS_EXCLUDED(dex_lock_);
+      REQUIRES(!dex_lock_);
 
   const std::vector<const DexFile*>& GetBootClassPath() {
     return boot_class_path_;
@@ -293,34 +286,29 @@
 
   // Returns the first non-image oat file in the class path.
   const OatFile* GetPrimaryOatFile()
-      LOCKS_EXCLUDED(dex_lock_);
+      REQUIRES(!dex_lock_);
 
   void VisitClasses(ClassVisitor* visitor, void* arg)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
   // so that it can visit individual classes without holding the doesn't hold the
   // Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
   // can race with insertion and deletion of classes while the visitor is being called.
   void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::DexCache* FindDexCache(const DexFile& dex_file)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsDexFileRegistered(const DexFile& dex_file)
-      LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupDexCaches(ArtMethod* resolution_method)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds or creates the oat file holding dex_location. Then loads and returns
   // all corresponding dex files (there may be more than one dex file loaded
@@ -336,83 +324,83 @@
   // This method should not be called with the mutator_lock_ held, because it
   // could end up starving GC if we need to generate or relocate any oat
   // files.
-  std::vector<std::unique_ptr<const DexFile>>  OpenDexFilesFromOat(
+  std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
       const char* dex_location, const char* oat_location,
       std::vector<std::string>* error_msgs)
-      LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+      REQUIRES(!dex_lock_, !Locks::mutator_lock_);
 
   // Allocate an instance of a java.lang.Object.
-  mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* AllocObject(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // TODO: replace this with multiple methods that allocate the correct managed type.
   template <class T>
   mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* AllocArtMethodArray(Thread* self, size_t length);
 
   mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* AllocArtFieldArray(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
                                                                               size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VerifyClass(Thread* self, Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
                                mirror::Class::Status& oat_file_class_status)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
   void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
                                          Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
   void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
                                   jobjectArray interfaces, jobject loader, jobjectArray methods,
                                   jobjectArray throws)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   std::string GetDescriptorForProxy(mirror::Class* proxy_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the oat code for a method when its class isn't yet initialized
   const void* GetQuickOatCodeFor(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the oat code for a method from a method index.
   const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
                                  uint32_t method_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get compiled code for a method, return null if no code
   // exists. This is unlike Get..OatCodeFor which will return a bridge
   // or interpreter entrypoint.
   const void* GetOatMethodQuickCodeFor(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   pid_t GetClassesLockOwner();  // For SignalCatcher.
   pid_t GetDexLockOwner();  // For SignalCatcher.
 
-  mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static const char* GetClassRootDescriptor(ClassRoot class_root);
 
@@ -431,20 +419,20 @@
 
   // Set the entrypoints up for method to the given code.
   void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the entrypoints up for method to the enter the interpreter.
   void SetEntryPointsToInterpreter(ArtMethod* method) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Attempts to insert a class into a class table.  Returns null if
   // the class was inserted, otherwise returns an existing class with
   // the same descriptor and ClassLoader.
   mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
     DCHECK(class_roots != nullptr);
     return class_roots;
@@ -452,23 +440,23 @@
 
   // Move all of the image classes into the class table for faster lookups.
   void MoveImageClassesToClassTable()
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
   // that no more classes are ever added to the pre zygote table which makes it that the pages
   // always remain shared dirty instead of private dirty.
   void MoveClassTableToPreZygote()
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the method can be called with its direct code pointer, false otherwise.
   bool MayBeCalledWithDirectCodePointer(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
   // Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
   jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   size_t GetImagePointerSize() const {
     DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
@@ -477,48 +465,47 @@
 
   // Used by image writer for checking.
   bool ClassInClassTable(mirror::Class* klass)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* CreateRuntimeMethod();
 
   // Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache
   // entries are roots, but potentially not image classes.
-  void DropFindArrayClassCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   OatFile& GetImageOatFile(gc::space::ImageSpace* space)
-      LOCKS_EXCLUDED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!dex_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FinishInit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // For early bootstrapping by Init
   mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Alloc* convenience functions to avoid needing to pass in mirror::Class*
   // values that are known to the ClassLinker such as
   // kObjectArrayClass and kJavaLangString etc.
   mirror::Class* AllocClass(Thread* self, uint32_t class_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
                                   Handle<mirror::ClassLoader> class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
   void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   // Precomputes size needed for Class, in the case of a non-temporary class this size must be
   // sufficient to hold all static fields.
@@ -529,131 +516,131 @@
   // table.
   void SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
                   Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
                  Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
                         Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
                  ArtField* dst)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it,
                   Handle<mirror::Class> klass, ArtMethod* dst)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
   // error and sets found to false.
   OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
-      EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(dex_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsDexFileRegisteredLocked(const DexFile& dex_file)
-      SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_);
 
   bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit,
                        bool can_init_parents)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
   bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                               ObjectLock<mirror::Class>& lock);
   bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
                                                 Handle<mirror::ClassLoader> class_loader1,
                                                 Handle<mirror::ClassLoader> class_loader2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method,
                                                      mirror::Class* klass1, mirror::Class* klass2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
                  Handle<mirror::ObjectArray<mirror::Class>> interfaces,
                  MutableHandle<mirror::Class>* h_new_class_out)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LinkSuperClass(Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
                    Handle<mirror::ObjectArray<mirror::Class>> interfaces,
                    ArtMethod** out_imt)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
                             Handle<mirror::ObjectArray<mirror::Class>> interfaces,
                             ArtMethod** out_imt)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
                 uint32_t class_def_method_index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckProxyConstructor(ArtMethod* constructor) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For use by ImageWriter to find DexCaches for its roots
   ReaderWriterMutex* DexLock()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) {
     return &dex_lock_;
   }
-  size_t GetDexCacheCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_) {
+  size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
     return dex_caches_.size();
   }
-  mirror::DexCache* GetDexCache(size_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_);
+  mirror::DexCache* GetDexCache(size_t idx) SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_);
 
   const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
-      LOCKS_EXCLUDED(dex_lock_);
+      REQUIRES(!dex_lock_);
 
   // Returns the boot image oat file.
-  const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_);
+  const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_);
 
   void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
   // class access flags to determine whether this has been done before.
   void EnsurePreverifiedMethods(Handle<mirror::Class> c)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* LookupClassFromTableLocked(const char* descriptor,
                                             mirror::ClassLoader* class_loader,
                                             size_t hash)
-      SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
 
   mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* LookupClassFromImage(const char* descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // EnsureResolved is called to make sure that a class in the class_table_ has been resolved
   // before returning it to the caller. Its the responsibility of the thread that placed the class
@@ -662,13 +649,13 @@
   // retire a class, the version of the class in the table is returned and this may differ from
   // the class passed in.
   mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
-      WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      WARN_UNUSED SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return the quick generic JNI stub for testing.
   const void* GetRuntimeQuickGenericJniStub() const;
@@ -677,20 +664,21 @@
   // class.
   // Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only
   //       a recreation with a custom string.
-  void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ThrowEarlierClassFailure(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!dex_lock_);
 
   // Check for duplicate class definitions of the given oat file against all open oat files.
-  bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_);
+  bool HasCollisions(const OatFile* oat_file, std::string* error_msg) REQUIRES(!dex_lock_);
 
-  bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool HasInitWithString(Thread* self, const char* descriptor)
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
 
   bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods,
                                  size_t new_num_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::classlinker_classes_lock_);
 
   std::vector<const DexFile*> boot_class_path_;
   std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a4e0227..3f8259a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -46,7 +46,7 @@
 class ClassLinkerTest : public CommonRuntimeTest {
  protected:
   void AssertNonExistentClass(const std::string& descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
     EXPECT_TRUE(self->IsExceptionPending());
@@ -58,13 +58,13 @@
   }
 
   void AssertPrimitiveClass(const std::string& descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str()));
   }
 
   void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ASSERT_TRUE(primitive != nullptr);
     ASSERT_TRUE(primitive->GetClass() != nullptr);
     ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
@@ -102,7 +102,7 @@
   void AssertArrayClass(const std::string& array_descriptor,
                         const std::string& component_type,
                         mirror::ClassLoader* class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     StackHandleScope<2> hs(self);
     Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
@@ -116,7 +116,7 @@
   }
 
   void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ASSERT_TRUE(array.Get() != nullptr);
     ASSERT_TRUE(array->GetClass() != nullptr);
     ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
@@ -159,7 +159,7 @@
     EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
   }
 
-  void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
     EXPECT_TRUE(method != nullptr);
     EXPECT_TRUE(method->GetDeclaringClass() != nullptr);
     EXPECT_TRUE(method->GetName() != nullptr);
@@ -174,7 +174,7 @@
   }
 
   void AssertField(mirror::Class* klass, ArtField* field)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     EXPECT_TRUE(field != nullptr);
     EXPECT_EQ(klass, field->GetDeclaringClass());
     EXPECT_TRUE(field->GetName() != nullptr);
@@ -182,7 +182,7 @@
   }
 
   void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     std::string temp;
     EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp));
     if (descriptor == "Ljava/lang/Object;") {
@@ -319,7 +319,7 @@
   }
 
   void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ASSERT_TRUE(descriptor != nullptr);
     Thread* self = Thread::Current();
     StackHandleScope<1> hs(self);
@@ -339,7 +339,7 @@
   }
 
   void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Verify all the classes defined in this file
     for (size_t i = 0; i < dex.NumClassDefs(); i++) {
       const DexFile::ClassDef& class_def = dex.GetClassDef(i);
@@ -385,7 +385,7 @@
   std::string class_descriptor;
   std::vector<CheckOffset> offsets;
 
-  bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool Check() SHARED_REQUIRES(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     mirror::Class* klass =
         Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
@@ -1107,7 +1107,7 @@
 }
 
 static void CheckMethod(ArtMethod* method, bool verified)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (!method->IsNative() && !method->IsAbstract()) {
     EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified)
         << PrettyMethod(method, true);
@@ -1115,7 +1115,7 @@
 }
 
 static void CheckPreverified(mirror::Class* c, bool preverified)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
       << "Class " << PrettyClass(c) << " not as expected";
   for (auto& m : c->GetDirectMethods(sizeof(void*))) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 2332f97..a474ae6 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -122,12 +122,12 @@
   std::string GetTestDexFileName(const char* name);
 
   std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_);
 
   std::string android_data_;
   std::string dalvik_cache_;
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 3acd366..de692d1 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -34,7 +34,7 @@
 namespace art {
 
 static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (referrer != nullptr) {
     std::string location(referrer->GetLocation());
     if (!location.empty()) {
@@ -46,7 +46,7 @@
 
 static void ThrowException(const char* exception_descriptor,
                            mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::ostringstream msg;
   if (args != nullptr) {
     std::string vmsg;
@@ -62,7 +62,7 @@
 
 static void ThrowWrappedException(const char* exception_descriptor,
                                   mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::ostringstream msg;
   if (args != nullptr) {
     std::string vmsg;
@@ -336,7 +336,7 @@
 static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
                                                          const DexFile& dex_file,
                                                          InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::ostringstream msg;
   msg << "Attempt to invoke " << type << " method '"
       << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index b391c5b..2402e6f 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -33,169 +33,169 @@
 // AbstractMethodError
 
 void ThrowAbstractMethodError(ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ArithmeticException
 
-void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ArrayIndexOutOfBoundsException
 
 void ThrowArrayIndexOutOfBoundsException(int index, int length)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ArrayStoreException
 
 void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ClassCircularityError
 
 void ThrowClassCircularityError(mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ClassCastException
 
 void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowClassCastException(const char* msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // ClassFormatError
 
 void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // IllegalAccessError
 
 void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
                                                    ArtMethod* called,
                                                    InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // IllegalAccessException
 
 void ThrowIllegalAccessException(const char* msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // IllegalArgumentException
 
 void ThrowIllegalArgumentException(const char* msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // IncompatibleClassChangeError
 
 void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
                                        ArtMethod* method, ArtMethod* referrer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
                                                                 mirror::Object* this_object,
                                                                 ArtMethod* referrer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
                                             ArtMethod* referrer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // IOException
 
 void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // LinkageError
 
 void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // NegativeArraySizeException
 
 void ThrowNegativeArraySizeException(int size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNegativeArraySizeException(const char* msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 
 // NoSuchFieldError
 
 void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
                            const StringPiece& type, const StringPiece& name)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // NoSuchMethodError
 
 void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
                             const Signature& signature)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNoSuchMethodError(uint32_t method_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // NullPointerException
 
 void ThrowNullPointerExceptionForFieldAccess(ArtField* field,
                                              bool is_read)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
                                               InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
                                               InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNullPointerExceptionFromDexPC()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 void ThrowNullPointerException(const char* msg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // RuntimeException
 
 void ThrowRuntimeException(const char* fmt, ...)
     __attribute__((__format__(__printf__, 1, 2)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 // VerifyError
 
 void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+    SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
 
 }  // namespace art
 
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b296e39..af7b04f 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -38,7 +38,7 @@
   virtual ~CompilerCallbacks() { }
 
   virtual bool MethodVerified(verifier::MethodVerifier* verifier)
-  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   virtual void ClassRejected(ClassReference ref) = 0;
 
   // Return true if we should attempt to relocate to a random base address if we have not already
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index eccebf1..287a50b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -72,7 +72,7 @@
  public:
   Breakpoint(ArtMethod* method, uint32_t dex_pc,
              DeoptimizationRequest::Kind deoptimization_kind)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
     CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
           deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
@@ -81,14 +81,14 @@
     method_ = soa.EncodeMethod(method);
   }
 
-  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
     : method_(nullptr), dex_pc_(other.dex_pc_),
       deoptimization_kind_(other.deoptimization_kind_) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
     method_ = soa.EncodeMethod(other.Method());
   }
 
-  ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
     return soa.DecodeMethod(method_);
   }
@@ -111,7 +111,7 @@
 };
 
 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
   return os;
 }
@@ -123,7 +123,7 @@
 
   void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
                      uint32_t dex_pc)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -149,7 +149,7 @@
 
   void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
                     uint32_t dex_pc, const JValue& return_value)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     if (method->IsNative()) {
       // TODO: post location events is a suspension point and native method entry stubs aren't.
       return;
@@ -166,7 +166,7 @@
 
   void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method, uint32_t dex_pc)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     // We're not recorded to listen to this kind of event, so complain.
     LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
                << " " << dex_pc;
@@ -174,7 +174,7 @@
 
   void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
                   uint32_t new_dex_pc)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
       // We also listen to kMethodExited instrumentation event and the current instruction is a
       // RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -195,47 +195,47 @@
 
   void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
                  ArtMethod* method, uint32_t dex_pc, ArtField* field)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
   }
 
   void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
                     ArtMethod* method, uint32_t dex_pc, ArtField* field,
                     const JValue& field_value)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
   }
 
   void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     Dbg::PostException(exception_object);
   }
 
   // We only care about how many backward branches were executed in the Jit.
   void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
                << " " << dex_pc_offset;
   }
 
  private:
   static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = method->GetCodeItem();
     const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
     return instruction->IsReturn();
   }
 
-  static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
   }
 
-  static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
   }
 
   static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return (Dbg::GetInstrumentationEvents() & event) != 0;
   }
 
@@ -298,8 +298,8 @@
 }
 
 static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
-    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    REQUIRES(!Locks::breakpoint_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
     if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
@@ -311,7 +311,7 @@
 }
 
 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
-    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
+    REQUIRES(!Locks::thread_suspend_count_lock_) {
   MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
   // A thread may be suspended for GC; in this code, we really want to know whether
   // there's a debugger suspension active.
@@ -319,7 +319,7 @@
 }
 
 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
   if (o == nullptr) {
     *error = JDWP::ERR_INVALID_OBJECT;
@@ -334,7 +334,7 @@
 }
 
 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
   if (o == nullptr) {
     *error = JDWP::ERR_INVALID_OBJECT;
@@ -350,8 +350,8 @@
 
 static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
                             JDWP::JdwpError* error)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-    LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_)
+    REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
   mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
   if (thread_peer == nullptr) {
     // This isn't even an object.
@@ -381,14 +381,14 @@
 }
 
 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string temp;
   const char* descriptor = klass->GetDescriptor(&temp);
   return BasicTagFromDescriptor(descriptor);
 }
 
 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(c != nullptr);
   if (c->IsArrayClass()) {
     return JDWP::JT_ARRAY;
@@ -764,7 +764,7 @@
     OwnedMonitorVisitor(Thread* thread, Context* context,
                         std::vector<JDWP::ObjectId>* monitor_vector,
                         std::vector<uint32_t>* stack_depth_vector)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+        SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         current_stack_depth(0),
         monitors(monitor_vector),
@@ -781,7 +781,7 @@
     }
 
     static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
       visitor->monitors->push_back(gRegistry->Add(owned_monitor));
       visitor->stack_depths->push_back(visitor->current_stack_depth);
@@ -1270,17 +1270,17 @@
 }
 
 static JDWP::MethodId ToMethodId(const ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
 }
 
 static ArtField* FromFieldId(JDWP::FieldId fid)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
 }
 
 static ArtMethod* FromMethodId(JDWP::MethodId mid)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
 }
 
@@ -1326,10 +1326,7 @@
   return modifier_instance == event_instance;
 }
 
-void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-    LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                   Locks::thread_suspend_count_lock_) {
+void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
   if (m == nullptr) {
     memset(location, 0, sizeof(*location));
   } else {
@@ -1376,7 +1373,7 @@
  * the end.
  */
 static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   if (code_item == nullptr) {
     // We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1398,7 +1395,7 @@
  * slots to dex style argument placement.
  */
 static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   if (code_item == nullptr) {
     // We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1424,7 +1421,8 @@
   return DexFile::kDexNoIndex16;
 }
 
-JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
+JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
+                                          JDWP::ExpandBuf* pReply) {
   JDWP::JdwpError error;
   mirror::Class* c = DecodeClass(class_id, &error);
   if (c == nullptr) {
@@ -1437,7 +1435,8 @@
   expandBufAdd4BE(pReply, instance_field_count + static_field_count);
 
   for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
-    ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
+    ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
+        c->GetStaticField(i - instance_field_count);
     expandBufAddFieldId(pReply, ToFieldId(f));
     expandBufAddUtf8String(pReply, f->GetName());
     expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
@@ -1553,7 +1552,7 @@
 
     static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
                          const char* name, const char* descriptor, const char* signature)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
 
       VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
@@ -1641,7 +1640,7 @@
 }
 
 static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
   JValue field_value;
   switch (fieldType) {
@@ -1688,7 +1687,7 @@
 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
                                          JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
                                          bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JDWP::JdwpError error;
   mirror::Class* c = DecodeClass(ref_type_id, &error);
   if (ref_type_id != 0 && c == nullptr) {
@@ -1744,7 +1743,7 @@
 }
 
 static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
   // Debugging only happens at runtime so we know we are not running in a transaction.
   static constexpr bool kNoTransactionMode = false;
@@ -1815,7 +1814,7 @@
 
 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
                                          uint64_t value, int width, bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JDWP::JdwpError error;
   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
@@ -1945,7 +1944,7 @@
 
 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
                                          JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
                                                                                 error);
   if (*error != JDWP::ERR_NONE) {
@@ -2004,7 +2003,7 @@
 
 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
                                  std::vector<JDWP::ObjectId>* child_thread_group_ids)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(thread_group != nullptr);
 
   // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
@@ -2158,7 +2157,7 @@
 
 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
                                    mirror::Object* desired_thread_group, mirror::Object* peer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Do we want threads from all thread groups?
   if (desired_thread_group == nullptr) {
     return true;
@@ -2202,7 +2201,7 @@
   }
 }
 
-static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
   struct CountStackDepthVisitor : public StackVisitor {
     explicit CountStackDepthVisitor(Thread* thread_in)
         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -2245,7 +2244,7 @@
    public:
     GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
                     JDWP::ExpandBuf* buf_in)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+        SHARED_REQUIRES(Locks::mutator_lock_)
         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           depth_(0),
           start_frame_(start_frame_in),
@@ -2254,7 +2253,7 @@
       expandBufAdd4BE(buf_, frame_count_);
     }
 
-    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
       if (GetMethod()->IsRuntimeMethod()) {
         return true;  // The debugger can't do anything useful with a frame that has no Method*.
       }
@@ -2366,7 +2365,7 @@
 
 struct GetThisVisitor : public StackVisitor {
   GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         this_object(nullptr),
         frame_id(frame_id_in) {}
@@ -2408,7 +2407,7 @@
 class FindFrameVisitor FINAL : public StackVisitor {
  public:
   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         frame_id_(frame_id),
         error_(JDWP::ERR_INVALID_FRAMEID) {}
@@ -2482,14 +2481,14 @@
 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
 
 static std::string GetStackContextAsString(const StackVisitor& visitor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
                       PrettyMethod(visitor.GetMethod()).c_str());
 }
 
 static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
                                          JDWP::JdwpTag tag)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
              << GetStackContextAsString(visitor);
   return kStackFrameLocalAccessError;
@@ -2651,7 +2650,7 @@
 template<typename T>
 static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
                                          JDWP::JdwpTag tag, T value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   LOG(ERROR) << "Failed to write " << tag << " local " << value
              << " (0x" << std::hex << value << ") into register v" << vreg
              << GetStackContextAsString(visitor);
@@ -2736,7 +2735,7 @@
 }
 
 static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(location != nullptr);
   if (m == nullptr) {
     memset(location, 0, sizeof(*location));
@@ -2814,7 +2813,7 @@
 class CatchLocationFinder : public StackVisitor {
  public:
   CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
     : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
       self_(self),
       exception_(exception),
@@ -2826,7 +2825,7 @@
       throw_dex_pc_(DexFile::kDexNoIndex) {
   }
 
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     DCHECK(method != nullptr);
     if (method->IsRuntimeMethod()) {
@@ -2860,15 +2859,15 @@
     return true;  // Continue stack walk.
   }
 
-  ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
     return catch_method_;
   }
 
-  ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
     return throw_method_;
   }
 
-  mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
     return this_at_throw_.Get();
   }
 
@@ -3170,7 +3169,7 @@
 }
 
 static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   if (code_item == nullptr) {
     // TODO We should not be asked to watch location in a native or abstract method so the code item
@@ -3191,7 +3190,7 @@
 }
 
 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
   for (Breakpoint& breakpoint : gBreakpoints) {
     if (breakpoint.Method() == m) {
       return &breakpoint;
@@ -3208,7 +3207,7 @@
 // Sanity checks all existing breakpoints on the same method.
 static void SanityCheckExistingBreakpoints(ArtMethod* m,
                                            DeoptimizationRequest::Kind deoptimization_kind)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
   for (const Breakpoint& breakpoint : gBreakpoints) {
     if (breakpoint.Method() == m) {
       CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
@@ -3237,7 +3236,7 @@
 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
                                                                  ArtMethod* m,
                                                                  const Breakpoint** existing_brkpt)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (!Dbg::RequiresDeoptimization()) {
     // We already run in interpreter-only mode so we don't need to deoptimize anything.
     VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
@@ -3498,8 +3497,8 @@
 class ScopedThreadSuspension {
  public:
   ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+      REQUIRES(!Locks::thread_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) :
       thread_(nullptr),
       error_(JDWP::ERR_NONE),
       self_suspend_(false),
@@ -3560,7 +3559,7 @@
   // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
   // is for step-out.
   struct SingleStepStackVisitor : public StackVisitor {
-    explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
           stack_depth(0),
           method(nullptr),
@@ -4419,7 +4418,7 @@
     needHeader_ = false;
   }
 
-  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (pieceLenField_ == nullptr) {
       // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
       CHECK(needHeader_);
@@ -4435,13 +4434,13 @@
   }
 
   static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_) {
     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
   }
 
   static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
   }
 
@@ -4461,7 +4460,7 @@
   }
 
   // Returns true if the object is not an empty chunk.
-  bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
     // in the following code not to allocate memory, by ensuring buf_ is of the correct size
     if (used_bytes == 0) {
@@ -4498,7 +4497,7 @@
   }
 
   void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (ProcessRecord(start, used_bytes)) {
       uint8_t state = ExamineNativeObject(start);
       AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
@@ -4507,7 +4506,7 @@
   }
 
   void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     if (ProcessRecord(start, used_bytes)) {
       // Determine the type of this chunk.
       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -4519,7 +4518,7 @@
   }
 
   void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Make sure there's enough room left in the buffer.
     // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
     // 17 bytes for any header.
@@ -4552,12 +4551,12 @@
     *p_++ = length - 1;
   }
 
-  uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
     return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
   }
 
   uint8_t ExamineJavaObject(mirror::Object* o)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     if (o == nullptr) {
       return HPSG_STATE(SOLIDITY_FREE, 0);
     }
@@ -4607,7 +4606,7 @@
 };
 
 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
   const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
   HeapChunkContext::HeapChunkJavaCallback(
       obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
@@ -4772,7 +4771,7 @@
 };
 
 static const char* GetMethodSourceFile(ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(method != nullptr);
   const char* source_file = method->GetDeclaringClassSourceFile();
   return (source_file != nullptr) ? source_file : "";
diff --git a/runtime/debugger.h b/runtime/debugger.h
index fd7d46c..a9fa6ce 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -79,7 +79,7 @@
   JDWP::ExpandBuf* const reply;
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
@@ -155,15 +155,15 @@
   DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
 
   DeoptimizationRequest(const DeoptimizationRequest& other)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
     // Create a new JNI global reference for the method.
     SetMethod(other.Method());
   }
 
-  ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Name 'Kind()' would collide with the above enum name.
   Kind GetKind() const {
@@ -205,7 +205,7 @@
   static void StopJdwp();
 
   // Invoked by the GC in case we need to keep DDMS informed.
-  static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_);
+  static void GcDidFinish() REQUIRES(!Locks::mutator_lock_);
 
   // Return the DebugInvokeReq for the current thread.
   static DebugInvokeReq* GetInvokeReq();
@@ -219,8 +219,8 @@
    */
   static void Connected();
   static void GoActive()
-      LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_);
-  static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_);
+      REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_);
+  static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_);
   static void Dispose() {
     gDisposed = true;
   }
@@ -239,8 +239,7 @@
 
   // Returns true if a method has any breakpoints.
   static bool MethodHasAnyBreakpoints(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::breakpoint_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
 
   static bool IsDisposed() {
     return gDisposed;
@@ -254,248 +253,233 @@
   static int64_t LastDebuggerActivity();
 
   static void UndoDebuggerSuspensions()
-    LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                   Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   /*
    * Class, Object, Array
    */
   static std::string GetClassName(JDWP::RefTypeId id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static std::string GetClassName(mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void GetClassList(std::vector<JDWP::RefTypeId>* classes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
                                       uint32_t* pStatus, std::string* pDescriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static size_t GetTagWidth(JDWP::JdwpTag tag);
 
   static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count,
                                      JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
                                           JDWP::Request* request)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
                                            JDWP::ObjectId* new_array_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   //
   // Event filtering.
   //
   static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
                             const JDWP::EventLocation& event_location)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
                          ArtField* event_field)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   //
   // Monitors.
   //
   static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
                                           std::vector<JDWP::ObjectId>* monitors,
                                           std::vector<uint32_t>* stack_depths)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
                                              JDWP::ObjectId* contended_monitor)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   //
   // Heap.
   //
   static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
                                            std::vector<uint64_t>* counts)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
                                       std::vector<JDWP::ObjectId>* instances)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
                                              std::vector<JDWP::ObjectId>* referring_objects)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   //
   // Methods and fields.
   //
   static std::string GetMethodName(JDWP::MethodId method_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
                                               JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic,
                                                JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id,
                                                   JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id,
                               JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic,
                                   JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
                                       JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
                                JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id,
                                       std::vector<uint8_t>* bytecodes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static std::string GetFieldName(JDWP::FieldId field_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
                                        JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
                                        uint64_t value, int width)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
                                              JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Thread, ThreadGroup, Frame
    */
   static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
   static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
   static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
                                             JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
                                               JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
                                                 JDWP::ExpandBuf* pReply)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::ObjectId GetSystemThreadGroupId()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
   static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
                                          JDWP::JdwpThreadStatus* pThreadStatus,
                                          JDWP::JdwpSuspendStatus* pSuspendStatus)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
   static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,
                                                     JDWP::ExpandBuf* pReply)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   // static void WaitForSuspend(JDWP::ObjectId thread_id);
 
   // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
   // returns all threads.
   static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
   static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
                                          size_t frame_count, JDWP::ExpandBuf* buf)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static JDWP::ObjectId GetThreadSelfId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_);
+  static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void SuspendVM()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   static void ResumeVM()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   static void ResumeThread(JDWP::ObjectId thread_id)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void SuspendSelf();
 
   static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
                                        JDWP::ObjectId* result)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
 
   /*
    * Debugger notification
@@ -508,47 +492,42 @@
   };
   static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
                                    ArtField* f)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
                                          mirror::Object* this_object, ArtField* f,
                                          const JValue* field_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostException(mirror::Throwable* exception)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostThreadStart(Thread* t)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostThreadDeath(Thread* t)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostClassPrepare(mirror::Class* c)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
                              ArtMethod* method, uint32_t new_dex_pc,
                              int event_flags, const JValue* return_value)
-      LOCKS_EXCLUDED(Locks::breakpoint_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Indicates whether we need deoptimization for debugging.
   static bool RequiresDeoptimization();
 
   // Records deoptimization request in the queue.
   static void RequestDeoptimization(const DeoptimizationRequest& req)
-      LOCKS_EXCLUDED(Locks::deoptimization_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
   // request and finally resumes all threads.
   static void ManageDeoptimization()
-      LOCKS_EXCLUDED(Locks::deoptimization_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Breakpoints.
   static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
-      LOCKS_EXCLUDED(Locks::breakpoint_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
-      LOCKS_EXCLUDED(Locks::breakpoint_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Forced interpreter checkers for single-step and continue support.
@@ -557,7 +536,7 @@
   // Indicates whether we need to force the use of interpreter to invoke a method.
   // This allows to single-step or continue into the called method.
   static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsDebuggerActive()) {
       return false;
     }
@@ -568,7 +547,7 @@
   // method through the resolution trampoline. This allows to single-step or continue into
   // the called method.
   static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsDebuggerActive()) {
       return false;
     }
@@ -579,7 +558,7 @@
   // a method through the resolution trampoline. This allows to deoptimize the stack for
   // debugging when we returned from the called method.
   static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsDebuggerActive()) {
       return false;
     }
@@ -590,7 +569,7 @@
   // interpreter into the runtime. This allows to deoptimize the stack and continue
   // execution with interpreter for debugging.
   static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsDebuggerActive()) {
       return false;
     }
@@ -600,10 +579,9 @@
   // Single-stepping.
   static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
                                        JDWP::JdwpStepDepth depth)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void UnconfigureStep(JDWP::ObjectId thread_id)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Invoke support
@@ -623,9 +601,8 @@
                                              JDWP::MethodId method_id, uint32_t arg_count,
                                              uint64_t arg_values[], JDWP::JdwpTag* arg_types,
                                              uint32_t options)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Called by the event thread to execute a method prepared by the JDWP thread in the given
   // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
@@ -642,30 +619,29 @@
    * DDM support.
    */
   static void DdmSendThreadNotification(Thread* t, uint32_t type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void DdmSetThreadNotification(bool enable)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
   static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
-  static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_);
+  static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_);
   static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Allocation tracking support.
    */
-  static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+  static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
   static jbyteArray GetRecentAllocations()
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void DumpRecentAllocations() LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+      REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_);
 
   enum HpifWhen {
     HPIF_WHEN_NEVER = 0,
@@ -674,7 +650,7 @@
     HPIF_WHEN_EVERY_GC = 3
   };
   static int DdmHandleHpifChunk(HpifWhen when)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   enum HpsgWhen {
     HPSG_WHEN_NEVER = 0,
@@ -687,78 +663,76 @@
   static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native);
 
   static void DdmSendHeapInfo(HpifWhen reason)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void DdmSendHeapSegments(bool native)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static ObjectRegistry* GetObjectRegistry() {
     return gRegistry;
   }
 
   static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::FieldId ToFieldId(const ArtField* f)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   static JDWP::JdwpState* GetJdwpState();
 
-  static uint32_t GetInstrumentationEvents() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) {
     return instrumentation_events_;
   }
 
  private:
   static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
                                JDWP::JdwpTag result_tag, uint64_t result_value,
                                JDWP::ObjectId exception)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
                                        ScopedObjectAccessUnchecked& soa, int slot,
                                        JDWP::JdwpTag tag, uint8_t* buf, size_t width)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
                                        uint64_t value, size_t width)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_);
   static void PostThreadStartOrStop(Thread*, uint32_t)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void PostLocationEvent(ArtMethod* method, int pcOffset,
                                 mirror::Object* thisPtr, int eventFlags,
                                 const JValue* return_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
 
   static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Indicates whether the debugger is making requests.
   static bool gDebuggerActive;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 3a15f1a..ceefdec 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -870,7 +870,7 @@
   //
   // This is used by runtime; therefore use art::Method not art::DexFile::Method.
   int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
                        DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
@@ -1314,10 +1314,10 @@
   EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
                                   Handle<mirror::ClassLoader>* class_loader,
                                   ClassLinker* linker, const DexFile::ClassDef& class_def)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void ReadValueToField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasNext() const { return pos_ < array_size_; }
 
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index de925b7..3e15cc5 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -41,7 +41,7 @@
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
                                     const InlineInfo& inline_info,
                                     uint8_t inlining_depth)
-  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t method_index = inline_info.GetMethodIndexAtDepth(inlining_depth);
   InvokeType invoke_type = static_cast<InvokeType>(
         inline_info.GetInvokeTypeAtDepth(inlining_depth));
@@ -74,7 +74,7 @@
 inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
                                             Runtime::CalleeSaveType type,
                                             bool do_caller_check = false)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
 
   const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
@@ -110,7 +110,7 @@
 }
 
 inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return GetCalleeSaveMethodCaller(
       self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
 }
@@ -403,7 +403,7 @@
 
 // Explicit template declarations of FindFieldFromCode for all field access types.
 #define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
 ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
                                                   ArtMethod* referrer, \
                                                   Thread* self, size_t expected_size) \
@@ -531,7 +531,7 @@
 
 // Explicit template declarations of FindMethodFromCode for all invoke types.
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                 \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE                       \
+  template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE                       \
   ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx,         \
                                                       mirror::Object** this_object, \
                                                       ArtMethod** referrer, \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index fc7f8b7..eaf26bc 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -38,7 +38,7 @@
                                                       ArtMethod* referrer,
                                                       Thread* self,
                                                       bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (UNLIKELY(component_count < 0)) {
     ThrowNegativeArraySizeException(component_count);
     return nullptr;  // Failure
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 47865a2..dc04c0a 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,12 +45,12 @@
 ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
                                                      ArtMethod* method,
                                                      Thread* self, bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
                                                                         Thread* self,
                                                                         bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -61,21 +61,21 @@
                                                          ArtMethod* method,
                                                          Thread* self,
                                                          gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method and a resolved class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
                                                                  Thread* self,
                                                                  gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method and an initialized class, create an instance.
 template <bool kInstrumented>
 ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
                                                                     Thread* self,
                                                                     gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 
 template <bool kAccessCheck>
@@ -83,7 +83,7 @@
                                                     int32_t component_count,
                                                     ArtMethod* method,
                                                     bool* slow_path)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
 // it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -95,7 +95,7 @@
                                                        ArtMethod* method,
                                                        Thread* self,
                                                        gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template <bool kAccessCheck, bool kInstrumented>
 ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -103,13 +103,13 @@
                                                                ArtMethod* method,
                                                                Thread* self,
                                                                gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
                                                  ArtMethod* method, Thread* self,
                                                  bool access_check,
                                                  gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
                                                              int32_t component_count,
@@ -117,7 +117,7 @@
                                                              Thread* self,
                                                              bool access_check,
                                                              gc::AllocatorType allocator_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Type of find field operation for fast and slow case.
 enum FindFieldType {
@@ -134,47 +134,47 @@
 template<FindFieldType type, bool access_check>
 inline ArtField* FindFieldFromCode(
     uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template<InvokeType type, bool access_check>
 inline ArtMethod* FindMethodFromCode(
     uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Fast path field resolution that can't initialize classes or throw exceptions.
 inline ArtField* FindFieldFast(
     uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Fast path method resolution that can't throw exceptions.
 inline ArtMethod* FindMethodFast(
     uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
     InvokeType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 inline mirror::Class* ResolveVerifyAndClinit(
     uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
-extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
 inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // TODO: annotalysis disabled as monitor semantics are maintained in Java code.
 inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
     NO_THREAD_SAFETY_ANALYSIS;
 
 void CheckReferenceResult(mirror::Object* o, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
                                     jobject rcvr_jobj, jobject interface_art_method_jobj,
                                     std::vector<jvalue>& args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 template <typename INT_TYPE, typename FLOAT_TYPE>
 inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 7a44158..331de91 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -39,32 +39,32 @@
   explicit ScopedQuickEntrypointChecks(Thread *self,
                                        bool entry_check = kIsDebugBuild,
                                        bool exit_check = kIsDebugBuild)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
+      SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
     if (entry_check) {
       TestsOnEntry();
     }
   }
 
-  ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_)
       : self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
     if (kIsDebugBuild) {
       TestsOnEntry();
     }
   }
 
-  ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (exit_check_) {
       TestsOnExit();
     }
   }
 
  private:
-  void TestsOnEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(self_);
     self_->VerifyStack();
   }
 
-  void TestsOnExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(self_);
     self_->VerifyStack();
   }
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index f56b5e4..9311791 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -30,7 +30,7 @@
 #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
 extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
     uint32_t type_idx, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
     mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
@@ -57,7 +57,7 @@
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
     mirror::Class* klass, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   UNUSED(method); \
   ScopedQuickEntrypointChecks sqec(self); \
   if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -84,7 +84,7 @@
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
     mirror::Class* klass, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   UNUSED(method); \
   ScopedQuickEntrypointChecks sqec(self); \
   if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -109,34 +109,34 @@
 } \
 extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
     uint32_t type_idx, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
 } \
 extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
     uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
                                                       allocator_type); \
 } \
 extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
     mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
                                                               allocator_type); \
 } \
 extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
     uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
                                                      allocator_type); \
 } \
 extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
     uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   if (!instrumented_bool) { \
     return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
@@ -146,7 +146,7 @@
 } \
 extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
     uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   if (!instrumented_bool) { \
     return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
@@ -157,7 +157,7 @@
 extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
     mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
     Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   ScopedQuickEntrypointChecks sqec(self); \
   StackHandleScope<1> hs(self); \
   Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
@@ -166,7 +166,7 @@
 } \
 extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
     int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   StackHandleScope<1> hs(self); \
   Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
   return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
@@ -174,7 +174,7 @@
 } \
 extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \
     mirror::String* string, Thread* self) \
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+    SHARED_REQUIRES(Locks::mutator_lock_) { \
   StackHandleScope<1> hs(self); \
   Handle<mirror::String> handle_string(hs.NewHandle(string)); \
   return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index ec0aef5..14a8e04 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -31,10 +31,10 @@
 // holding the runtime shutdown lock and the mutator lock when we update the entrypoints.
 
 void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+    REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
 
 void SetQuickAllocEntryPointsInstrumented(bool instrumented)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+    REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
 
 }  // namespace art
 
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 37de380..968ac53 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -21,7 +21,7 @@
 
 // Assignable test for code, won't throw.  Null and equality tests already performed
 extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(klass != nullptr);
   DCHECK(ref_class != nullptr);
   return klass->IsAssignableFrom(ref_class) ? 1 : 0;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f1b5445..a4feac1 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,7 +28,7 @@
 
 namespace art {
 
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
 
   if (VLOG_IS_ON(deopt)) {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 3cefc47..b12b118 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Called to ensure static storage base is initialized for direct static field reads and writes.
   // A class may be accessing another class' fields when it doesn't have access, as access has been
   // given by inheritance.
@@ -36,7 +36,7 @@
 }
 
 extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Called when method->dex_cache_resolved_types_[] misses.
   ScopedQuickEntrypointChecks sqec(self);
   auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
@@ -44,7 +44,7 @@
 }
 
 extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Called when caller isn't guaranteed to have access to a type and the dex cache may be
   // unpopulated.
   ScopedQuickEntrypointChecks sqec(self);
@@ -53,7 +53,7 @@
 }
 
 extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
   return ResolveStringFromCode(caller, string_idx);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 871cf3c..25a943a 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,7 @@
 
 extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
                                            Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr)) {
@@ -42,7 +42,7 @@
 
 extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
                                                Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr)) {
@@ -57,7 +57,7 @@
 
 extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
                                              Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr)) {
@@ -73,7 +73,7 @@
 extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
                                              ArtMethod* referrer,
                                              Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr)) {
@@ -89,7 +89,7 @@
 extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
                                            ArtMethod* referrer,
                                            Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
   if (LIKELY(field != nullptr)) {
@@ -105,7 +105,7 @@
 extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
                                            ArtMethod* referrer,
                                            Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
   if (LIKELY(field != nullptr)) {
@@ -121,7 +121,7 @@
 extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
                                                    ArtMethod* referrer,
                                                    Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
                                   sizeof(mirror::HeapReference<mirror::Object>));
@@ -138,7 +138,7 @@
 
 extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -158,7 +158,7 @@
 
 extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                                  ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -177,7 +177,7 @@
 }
 extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                                ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -197,7 +197,7 @@
 
 extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                                ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -217,7 +217,7 @@
 
 extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -237,7 +237,7 @@
 
 extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -258,7 +258,7 @@
 extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                                      ArtMethod* referrer,
                                                      Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
                                   sizeof(mirror::HeapReference<mirror::Object>));
@@ -279,7 +279,7 @@
 
 extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
                                      ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
   if (LIKELY(field != nullptr)) {
@@ -310,7 +310,7 @@
 
 extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
                                       ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
   if (LIKELY(field != nullptr)) {
@@ -341,7 +341,7 @@
 
 extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
                                       ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
   if (LIKELY(field != nullptr)) {
@@ -360,7 +360,7 @@
 
 extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer,
                                       uint64_t new_value, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
   if (LIKELY(field != nullptr)) {
@@ -379,7 +379,7 @@
 
 extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
                                        ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
                                   sizeof(mirror::HeapReference<mirror::Object>));
@@ -402,7 +402,7 @@
 
 extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
                                        ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -441,7 +441,7 @@
 
 extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
                                         ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -481,7 +481,7 @@
 
 extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
                                         ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -509,7 +509,7 @@
 
 extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
                                         ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
   if (LIKELY(field != nullptr  && obj != nullptr)) {
@@ -534,7 +534,7 @@
 extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                          mirror::Object* new_value,
                                          ArtMethod* referrer, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
                                   sizeof(mirror::HeapReference<mirror::Object>));
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index d3991cd..22b2fa3 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -26,7 +26,7 @@
  */
 extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
                                               ArtMethod* method, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   const uint16_t* const insns = method->GetCodeItem()->insns_;
   const Instruction::ArrayDataPayload* payload =
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 2b5c15b..ad5ee84 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -28,7 +28,7 @@
                                                              mirror::Object* this_object,
                                                              Thread* self,
                                                              uintptr_t lr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
   // that part.
   ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
@@ -50,7 +50,7 @@
 extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
                                                               uint64_t gpr_result,
                                                               uint64_t fpr_result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Compute address of return PC and sanity check that it currently holds 0.
   size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly);
   uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index de225ad..f69c39e 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -63,7 +63,7 @@
 }
 
 static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JNIEnvExt* env = self->GetJniEnv();
   env->locals.SetSegmentState(env->local_ref_cookie);
   env->local_ref_cookie = saved_local_ref_cookie;
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 4423c08..3bf001e 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -21,7 +21,7 @@
 namespace art {
 
 extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
   ScopedQuickEntrypointChecks sqec(self);
   if (UNLIKELY(obj == nullptr)) {
@@ -41,7 +41,7 @@
 }
 
 extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
   ScopedQuickEntrypointChecks sqec(self);
   if (UNLIKELY(obj == nullptr)) {
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 87e0c6e..47b3eff 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,7 +19,7 @@
 
 namespace art {
 
-extern "C" void artTestSuspendFromCode(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
   // Called when suspend count check value is 0 and thread->suspend_count_ != 0
   ScopedQuickEntrypointChecks sqec(self);
   self->CheckSuspend();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index f22edc1..5a82b3a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -25,14 +25,14 @@
 
 // Deliver an exception that's pending on thread helping set up a callee save frame on the way.
 extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   self->QuickDeliverException();
 }
 
 // Called by generated call to throw an exception.
 extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   /*
    * exception may be null, in which case this routine should
    * throw NPE.  NOTE: this is a convenience for generated code,
@@ -51,7 +51,7 @@
 
 // Called by generated call to throw a NPE exception.
 extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   self->NoteSignalBeingHandled();
   ThrowNullPointerExceptionFromDexPC();
@@ -61,7 +61,7 @@
 
 // Called by generated call to throw an arithmetic divide by zero exception.
 extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ThrowArithmeticExceptionDivideByZero();
   self->QuickDeliverException();
@@ -69,14 +69,14 @@
 
 // Called by generated call to throw an array index out of bounds exception.
 extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ThrowArrayIndexOutOfBoundsException(index, length);
   self->QuickDeliverException();
 }
 
 extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   self->NoteSignalBeingHandled();
   ThrowStackOverflowError(self);
@@ -85,7 +85,7 @@
 }
 
 extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ThrowNoSuchMethodError(method_idx);
   self->QuickDeliverException();
@@ -94,7 +94,7 @@
 extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
                                                      mirror::Class* src_type,
                                                      Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   DCHECK(!dest_type->IsAssignableFrom(src_type));
   ThrowClassCastException(dest_type, src_type);
@@ -103,7 +103,7 @@
 
 extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
                                                       Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ThrowArrayStoreException(value->GetClass(), array->GetClass());
   self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4f76ebd..6fe2bb6 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -280,7 +280,7 @@
   // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
   // 1st GPR.
   static mirror::Object* GetProxyThisObject(ArtMethod** sp)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK((*sp)->IsProxyMethod());
     CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
     CHECK_GT(kNumQuickGprArgs, 0u);
@@ -291,19 +291,19 @@
     return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
   }
 
-  static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
     return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs);
   }
 
-  static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
     uint8_t* previous_sp =
         reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
     return *reinterpret_cast<ArtMethod**>(previous_sp);
   }
 
-  static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
     const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
@@ -329,14 +329,14 @@
   }
 
   // For the given quick ref and args quick frame, return the caller's PC.
-  static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
     uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
     return *reinterpret_cast<uintptr_t*>(lr);
   }
 
   QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
-                       uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+                       uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
           is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
           gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
           fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
@@ -421,7 +421,7 @@
     }
   }
 
-  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
     // (a) 'stack_args_' should point to the first method's argument
     // (b) whatever the argument type it is, the 'stack_index_' should
     //     be moved forward along with every visiting.
@@ -571,7 +571,7 @@
 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return QuickArgumentVisitor::GetProxyThisObject(sp);
 }
 
@@ -582,7 +582,7 @@
                                uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
 
-  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
 
  private:
   ShadowFrame* const sf_;
@@ -625,7 +625,7 @@
 }
 
 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
   // frame.
   ScopedQuickEntrypointChecks sqec(self);
@@ -692,9 +692,9 @@
                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
 
-  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
 
-  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   ScopedObjectAccessUnchecked* const soa_;
@@ -753,7 +753,7 @@
 // field within the proxy object, which will box the primitive arguments and deal with error cases.
 extern "C" uint64_t artQuickProxyInvokeHandler(
     ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
   DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
   // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
@@ -809,9 +809,9 @@
                                uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
 
-  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
 
-  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   ScopedObjectAccessUnchecked* const soa_;
@@ -842,7 +842,7 @@
 // Lazily resolve a method for quick. Called by stub code.
 extern "C" const void* artQuickResolutionTrampoline(
     ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // The resolution trampoline stashes the resolved method into the callee-save frame to transport
   // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
   // does not have the same stack layout as the callee-save method).
@@ -1196,7 +1196,7 @@
     return gpr_index_ > 0;
   }
 
-  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
     uintptr_t handle = PushHandle(ptr);
     if (HaveHandleScopeGpr()) {
       gpr_index_--;
@@ -1384,7 +1384,7 @@
   void PushStack(uintptr_t val) {
     delegate_->PushStack(val);
   }
-  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
     return delegate_->PushHandle(ref);
   }
 
@@ -1443,11 +1443,11 @@
   }
 
   virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     UNUSED(sm);
   }
 
-  void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
 
     WalkHeader(&sm);
@@ -1519,7 +1519,7 @@
   //
   // Note: assumes ComputeAll() has been run before.
   void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* method = **m;
 
     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
@@ -1560,7 +1560,7 @@
   // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
   // Returns the new bottom. Note: this may be unaligned.
   uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // First, fix up the layout of the callee-save frame.
     // We have to squeeze in the HandleScope, and relocate the method pointer.
     LayoutCalleeSaveFrame(self, m, sp, handle_scope);
@@ -1578,7 +1578,7 @@
   uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
                          HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
                          uint32_t** start_fpr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Walk(shorty, shorty_len);
 
     // JNI part.
@@ -1594,7 +1594,7 @@
 
   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
   void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   uint32_t num_handle_scope_references_;
@@ -1650,7 +1650,7 @@
     cur_stack_arg_++;
   }
 
-  virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
     LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
     UNREACHABLE();
   }
@@ -1688,16 +1688,16 @@
     }
   }
 
-  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+  void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
 
-  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   StackReference<mirror::Object>* GetFirstHandleScopeEntry()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return handle_scope_->GetHandle(0).GetReference();
   }
 
-  jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return handle_scope_->GetHandle(0).ToJObject();
   }
 
@@ -1713,7 +1713,7 @@
                 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
                                              handle_scope_(handle_scope), cur_entry_(0) {}
 
-    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
@@ -1721,7 +1721,7 @@
       cur_entry_ = 0U;
     }
 
-    void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
       // Initialize padding entries.
       size_t expected_slots = handle_scope_->NumberOfReferences();
       while (cur_entry_ < expected_slots) {
@@ -1841,7 +1841,7 @@
  * 2) An error, if the value is negative.
  */
 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtMethod* called = *sp;
   DCHECK(called->IsNative()) << PrettyMethod(called, true);
   uint32_t shorty_len = 0;
@@ -1914,7 +1914,7 @@
  * unlocking.
  */
 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   ArtMethod* called = *sp;
@@ -1971,7 +1971,7 @@
 // for the method pointer.
 //
 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
-// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
+// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
 
 template<InvokeType type, bool access_check>
 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
@@ -2013,7 +2013,7 @@
 
 // Explicit artInvokeCommon template function declarations to please analysis tool.
 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
   TwoWordReturn artInvokeCommon<type, access_check>(                                            \
       uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
 
@@ -2032,31 +2032,31 @@
 // See comments in runtime_support_asm.S
 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
 }
 
 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
 }
 
 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
 }
 
 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
 }
 
 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
 }
 
@@ -2064,7 +2064,7 @@
 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx,
                                                       mirror::Object* this_object,
                                                       Thread* self, ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   // The optimizing compiler currently does not inline methods that have an interface
   // invocation. We use the outer method directly to avoid fetching a stack map, which is
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 93f32e8..55b1772 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -74,12 +74,12 @@
   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
 
   // Returns false if we overflowed the stack.
-  bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
     return AtomicPushBackInternal(value, capacity_);
   }
 
   // Returns false if we overflowed the stack.
-  bool AtomicPushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
     return AtomicPushBackInternal(value, growth_limit_);
   }
 
@@ -87,7 +87,7 @@
   // slots. Returns false if we overflowed the stack.
   bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
                       StackReference<T>** end_address)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kIsDebugBuild) {
       debug_is_sorted_ = false;
     }
@@ -113,7 +113,7 @@
     return true;
   }
 
-  void AssertAllZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kIsDebugBuild) {
       for (size_t i = 0; i < capacity_; ++i) {
         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
@@ -121,7 +121,7 @@
     }
   }
 
-  void PushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kIsDebugBuild) {
       debug_is_sorted_ = false;
     }
@@ -131,7 +131,7 @@
     begin_[index].Assign(value);
   }
 
-  T* PopBack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
     // Decrement the back index non atomically.
     back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
@@ -194,12 +194,12 @@
     }
   }
 
-  bool ContainsSorted(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(debug_is_sorted_);
     return std::binary_search(Begin(), End(), value, ObjectComparator());
   }
 
-  bool Contains(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
     for (auto cur = Begin(), end = End(); cur != end; ++cur) {
       if (cur->AsMirrorPtr() == value) {
         return true;
@@ -221,7 +221,7 @@
 
   // Returns false if we overflowed the stack.
   bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kIsDebugBuild) {
       debug_is_sorted_ = false;
     }
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 34e6aa3..88a6c6c 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -107,8 +107,8 @@
   size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
               const Visitor& visitor,
               const uint8_t minimum_age = kCardDirty) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Assertion used to check the given address is covered by the card table
   void CheckAddrIsInCardTable(const uint8_t* addr) const;
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 1648aef..0b96979 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -35,34 +35,34 @@
 
 class HeapBitmap {
  public:
-  bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+  void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_);
   template<typename LargeObjectSetVisitor>
   bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
   template<typename LargeObjectSetVisitor>
   bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
   ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
   LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
 
   void Walk(ObjectCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   template <typename Visitor>
   void Visit(const Visitor& visitor)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
   void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
   void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   explicit HeapBitmap(Heap* heap) : heap_(heap) {}
 
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 009254b..68e7fa0 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -100,7 +100,7 @@
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
   void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Only add the reference if it is non null and fits our criteria.
     mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
     mirror::Object* ref = obj_ptr->AsMirrorPtr();
@@ -131,8 +131,8 @@
         contains_reference_to_other_space_(contains_reference_to_other_space) {}
 
   void operator()(Object* root) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(root != nullptr);
     ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_,
                                                       contains_reference_to_other_space_);
@@ -164,7 +164,7 @@
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
   void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     mirror::Object* ref = ref_ptr->AsMirrorPtr();
     // Only add the reference if it is non null and fits our criteria.
@@ -188,7 +188,7 @@
   }
 
   void operator()(Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     // We don't have an early exit since we use the visitor pattern, an early
     // exit should significantly speed this up.
     AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
@@ -209,7 +209,7 @@
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
   void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
         references_.find(ref) == references_.end()) {
@@ -237,7 +237,7 @@
  public:
   explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table,
                                    const std::set<const Object*>& references)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_)
       : mod_union_table_(mod_union_table), references_(references) {
   }
 
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 520cc1c..5888193 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -82,7 +82,7 @@
   // for said cards. Exclusive lock is required since verify sometimes uses
   // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
   // bitmap or not.
-  virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+  virtual void Verify() REQUIRES(Locks::heap_bitmap_lock_) = 0;
 
   // Returns true if a card is marked inside the mod union table. Used for testing. The address
   // doesn't need to be aligned.
@@ -118,21 +118,21 @@
 
   // Update table based on cleared cards and mark all references to the other spaces.
   void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
   // VisitMarkedRange can't know if the callback will modify the bitmap or not.
   void Verify() OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   // Function that tells whether or not to add a reference to the table.
   virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
 
   virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
 
-  virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   virtual void SetCards() OVERRIDE;
 
@@ -158,8 +158,8 @@
 
   // Mark all references to the alloc space(s).
   virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Nothing to verify.
   virtual void Verify() OVERRIDE {}
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index aad8a25..edab1b0 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -46,7 +46,7 @@
   }
   mirror::ObjectArray<mirror::Object>* AllocObjectArray(
       Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     auto* klass = GetObjectArrayClass(self, space);
     const size_t size = mirror::ComputeArraySize(component_count, 2);
     size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
@@ -67,7 +67,7 @@
 
  private:
   mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (java_lang_object_array_ == nullptr) {
       java_lang_object_array_ =
           Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
@@ -97,12 +97,12 @@
  public:
   explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     MarkObject(ref->AsMirrorPtr());
   }
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     out_->insert(obj);
     return obj;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 23ab8df..994a0ad 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -68,7 +68,7 @@
         contains_reference_to_target_space_(contains_reference_to_target_space) {}
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
@@ -79,8 +79,8 @@
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     if (target_space_->HasAddress(ref->GetReferent())) {
       *contains_reference_to_target_space_ = true;
       collector_->DelayReferenceReferent(klass, ref);
@@ -101,8 +101,8 @@
       : collector_(collector), target_space_(target_space),
         contains_reference_to_target_space_(contains_reference_to_target_space) {}
 
-  void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
                                           collector_);
     obj->VisitReferences<kMovingClasses>(visitor, visitor);
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index affe863..3a0dcf7 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -56,8 +56,8 @@
   // Mark through all references to the target space.
   void UpdateAndMarkReferences(space::ContinuousSpace* target_space,
                                collector::GarbageCollector* collector)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Dump(std::ostream& os);
 
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index cdeaa50..7914b66 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -188,7 +188,7 @@
 void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited,
                                                  ObjectCallback* callback, mirror::Object* obj,
                                                  mirror::Class* klass, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Visit fields of parent classes first.
   mirror::Class* super = klass->GetSuperClass();
   if (super != nullptr) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index e0661b6..b8ff471 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -123,7 +123,7 @@
 
   // Visit the live objects in the range [visit_begin, visit_end).
   // TODO: Use lock annotations when clang is fixed.
-  // EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   template <typename Visitor>
   void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
       NO_THREAD_SAFETY_ANALYSIS;
@@ -131,12 +131,12 @@
   // Visits set bits in address order.  The callback is not permitted to change the bitmap bits or
   // max during the traversal.
   void Walk(ObjectCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   // Visits set bits with an in order traversal.  The callback is not permitted to change the bitmap
   // bits or max during the traversal.
   void InOrderWalk(ObjectCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Walk through the bitmaps in increasing address order, and find the object pointers that
   // correspond to garbage objects.  Call <callback> zero or more times with lists of these object
@@ -204,12 +204,12 @@
 
   // For an unvisited object, visit it then all its children found via fields.
   static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
-                                void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                                void* arg) SHARED_REQUIRES(Locks::mutator_lock_);
   // Walk instance fields of the given Class. Separate function to allow recursion on the super
   // class.
   static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback,
                                  mirror::Object* obj, mirror::Class* klass, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Backing storage for bitmap.
   std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 3108b7c..ec4d626 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -111,8 +111,8 @@
 }
 
 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_)
+    REQUIRES(Locks::alloc_tracker_lock_) {
   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
   // This does not need a read barrier because this is called by GC.
   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
@@ -177,7 +177,7 @@
 
 struct AllocRecordStackVisitor : public StackVisitor {
   AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         trace(trace_in),
         depth(0),
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 933363b..0a4f532 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -39,7 +39,7 @@
  public:
   AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {}
 
-  int32_t ComputeLineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* GetMethod() const {
     return method_;
@@ -184,14 +184,14 @@
     return trace_->GetTid();
   }
 
-  mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return klass_.Read();
   }
 
   const char* GetClassDescriptor(std::string* storage) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  GcRoot<mirror::Class>& GetClassGcRoot() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
     return klass_;
   }
 
@@ -221,12 +221,12 @@
   // in order to make sure the AllocRecordObjectMap object is not null.
   static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
                                size_t byte_count)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::alloc_tracker_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+  static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
 
-  AllocRecordObjectMap() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_)
+  AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_)
       : alloc_record_max_(kDefaultNumAllocRecords),
         recent_record_max_(kDefaultNumRecentRecords),
         max_stack_depth_(kDefaultAllocStackDepth),
@@ -238,8 +238,8 @@
   ~AllocRecordObjectMap();
 
   void Put(mirror::Object* obj, AllocRecord* record)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_) {
     if (entries_.size() == alloc_record_max_) {
       delete entries_.front().second;
       entries_.pop_front();
@@ -247,23 +247,23 @@
     entries_.emplace_back(GcRoot<mirror::Object>(obj), record);
   }
 
-  size_t Size() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+  size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
     return entries_.size();
   }
 
-  size_t GetRecentAllocationSize() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+  size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
     CHECK_LE(recent_record_max_, alloc_record_max_);
     size_t sz = entries_.size();
     return std::min(recent_record_max_, sz);
   }
 
   void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_);
 
   void SweepAllocationRecords(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_);
 
   // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
   // AllowNewAllocationRecords(), in which case new allocation records can be added although they
@@ -272,34 +272,34 @@
   // swept from the list. But missing the first few records is acceptable for using the button to
   // enable allocation tracking.
   void DisallowNewAllocationRecords()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_);
   void AllowNewAllocationRecords()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_);
 
   // TODO: Is there a better way to hide the entries_'s type?
   EntryList::iterator Begin()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_) {
     return entries_.begin();
   }
 
   EntryList::iterator End()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_) {
     return entries_.end();
   }
 
   EntryList::reverse_iterator RBegin()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_) {
     return entries_.rbegin();
   }
 
   EntryList::reverse_iterator REnd()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_) {
     return entries_.rend();
   }
 
@@ -318,7 +318,7 @@
   // see the comment in typedef of EntryList
   EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
 
-  void SetProperties() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+  void SetProperties() REQUIRES(Locks::alloc_tracker_lock_);
 };
 
 }  // namespace gc
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index c356a39..a7f29af 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -51,7 +51,7 @@
     bool IsFree() const {
       return !kIsDebugBuild || magic_num_ == kMagicNumFree;
     }
-    size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+    size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) {
       const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
@@ -60,7 +60,7 @@
       return byte_size;
     }
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
-        EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+        REQUIRES(rosalloc->lock_) {
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
       uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
@@ -69,20 +69,20 @@
     void* Begin() {
       return reinterpret_cast<void*>(this);
     }
-    void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+    void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
       uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
       uint8_t* end = fpr_base + ByteSize(rosalloc);
       return end;
     }
     bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
-        EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+        REQUIRES(rosalloc->lock_) {
       return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
     }
     bool IsAtEndOfSpace(RosAlloc* rosalloc)
-        EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+        REQUIRES(rosalloc->lock_) {
       return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
     }
-    bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+    bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
       switch (rosalloc->page_release_mode_) {
         case kPageReleaseModeNone:
           return false;
@@ -99,7 +99,7 @@
           return false;
       }
     }
-    void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
+    void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
       uint8_t* start = reinterpret_cast<uint8_t*>(this);
       size_t byte_size = ByteSize(rosalloc);
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
@@ -254,8 +254,8 @@
     std::string Dump();
     // Verify for debugging.
     void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+        REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::thread_list_lock_);
 
    private:
     // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket
@@ -512,51 +512,51 @@
 
   // Page-granularity alloc/free
   void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      REQUIRES(lock_);
   // Returns how many bytes were freed.
-  size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_);
 
   // Allocate/free a run slot.
   void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
                      size_t* bytes_tl_bulk_allocated)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
   // Allocate/free a run slot without acquiring locks.
-  // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+  // TODO: REQUIRES(Locks::mutator_lock_)
   void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
                                  size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      LOCKS_EXCLUDED(lock_);
-  void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx);
+      REQUIRES(!lock_);
+  void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_);
 
   // Returns the bracket size.
   size_t FreeFromRun(Thread* self, void* ptr, Run* run)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
 
   // Used to allocate a new thread local run for a size bracket.
-  Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
+  Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_);
 
   // Used to acquire a new/reused run for a size bracket. Used when a
   // thread-local or current run gets full.
-  Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
+  Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_);
 
   // The internal of non-bulk Free().
-  size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
+  size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_);
 
   // Allocates large objects.
   void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
                          size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
 
   // Revoke a run by adding it to non_full_runs_ or freeing the pages.
-  void RevokeRun(Thread* self, size_t idx, Run* run);
+  void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_);
 
   // Revoke the current runs which share an index with the thread local runs.
-  void RevokeThreadUnsafeCurrentRuns();
+  void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_);
 
   // Release a range of pages.
-  size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_);
 
   // Dumps the page map for debugging.
-  std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  std::string DumpPageMap() REQUIRES(lock_);
 
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -570,11 +570,11 @@
   template<bool kThreadSafe = true>
   void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
               size_t* bytes_tl_bulk_allocated)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
   size_t Free(Thread* self, void* ptr)
-      LOCKS_EXCLUDED(bulk_free_lock_);
+      REQUIRES(!bulk_free_lock_, !lock_);
   size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
-      LOCKS_EXCLUDED(bulk_free_lock_);
+      REQUIRES(!bulk_free_lock_, !lock_);
 
   // Returns true if the given allocation request can be allocated in
   // an existing thread local run without allocating a new run.
@@ -589,7 +589,7 @@
   ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
 
   // Returns the size of the allocated slot for a given allocated memory chunk.
-  size_t UsableSize(const void* ptr);
+  size_t UsableSize(const void* ptr) REQUIRES(!lock_);
   // Returns the size of the allocated slot for a given size.
   size_t UsableSize(size_t bytes) {
     if (UNLIKELY(bytes > kLargeSizeThreshold)) {
@@ -600,33 +600,33 @@
   }
   // Try to reduce the current footprint by releasing the free page
   // run at the end of the memory region, if any.
-  bool Trim();
+  bool Trim() REQUIRES(!lock_);
   // Iterates over all the memory slots and apply the given function.
   void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                   void* arg)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
 
   // Release empty pages.
-  size_t ReleasePages() LOCKS_EXCLUDED(lock_);
+  size_t ReleasePages() REQUIRES(!lock_);
   // Returns the current footprint.
-  size_t Footprint() LOCKS_EXCLUDED(lock_);
+  size_t Footprint() REQUIRES(!lock_);
   // Returns the current capacity, maximum footprint.
-  size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
+  size_t FootprintLimit() REQUIRES(!lock_);
   // Update the current capacity.
-  void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+  void SetFootprintLimit(size_t bytes) REQUIRES(!lock_);
 
   // Releases the thread-local runs assigned to the given thread back to the common set of runs.
   // Returns the total bytes of free slots in the revoked thread local runs. This is to be
   // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
-  size_t RevokeThreadLocalRuns(Thread* thread);
+  size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_);
   // Releases the thread-local runs assigned to all the threads back to the common set of runs.
   // Returns the total bytes of free slots in the revoked thread local runs. This is to be
   // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
-  size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+  size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_);
   // Assert the thread local runs of a thread are revoked.
-  void AssertThreadLocalRunsAreRevoked(Thread* thread);
+  void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_);
   // Assert all the thread local runs are revoked.
-  void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+  void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_);
 
   static Run* GetDedicatedFullRun() {
     return dedicated_full_run_;
@@ -647,9 +647,11 @@
   }
 
   // Verify for debugging.
-  void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
+                         !lock_);
 
-  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
+      REQUIRES(!bulk_free_lock_, !lock_);
 
  private:
   friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c803655..baa33b3 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -185,7 +185,7 @@
       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -221,7 +221,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
     ConcurrentCopying* cc = concurrent_copying_;
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
@@ -290,8 +290,8 @@
   explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
       : collector_(cc) {}
 
-  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     DCHECK(obj != nullptr);
     DCHECK(collector_->immune_region_.ContainsObject(obj));
     accounting::ContinuousSpaceBitmap* cc_bitmap =
@@ -599,7 +599,7 @@
       : collector_(collector) {}
 
   void operator()(mirror::Object* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     if (ref == nullptr) {
       // OK.
       return;
@@ -624,7 +624,7 @@
   }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(root != nullptr);
     operator()(root);
   }
@@ -639,14 +639,14 @@
       : collector_(collector) {}
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
     ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
     visitor(ref);
   }
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     CHECK(klass->IsTypeOfReferenceClass());
     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
   }
@@ -660,11 +660,11 @@
   explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
       : collector_(collector) {}
   void operator()(mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ObjectCallback(obj, collector_);
   }
   static void ObjectCallback(mirror::Object* obj, void *arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(obj != nullptr);
     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
     space::RegionSpace* region_space = collector->RegionSpace();
@@ -733,7 +733,7 @@
       : collector_(collector) {}
 
   void operator()(mirror::Object* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     if (ref == nullptr) {
       // OK.
       return;
@@ -751,14 +751,14 @@
       : collector_(collector) {}
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
     ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
     visitor(ref);
   }
   void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     CHECK(klass->IsTypeOfReferenceClass());
   }
 
@@ -771,11 +771,11 @@
   explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
       : collector_(collector) {}
   void operator()(mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ObjectCallback(obj, collector_);
   }
   static void ObjectCallback(mirror::Object* obj, void *arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(obj != nullptr);
     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
     space::RegionSpace* region_space = collector->RegionSpace();
@@ -1130,8 +1130,8 @@
 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
   NO_RETURN
 #endif
-  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     DCHECK(obj != nullptr);
     DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
     DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
@@ -1277,8 +1277,8 @@
  public:
   explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
       : collector_(cc) {}
-  void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     DCHECK(ref != nullptr);
     DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
     DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
@@ -1335,7 +1335,7 @@
 
   template <class MirrorType>
   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!root->IsNull()) {
       VisitRoot(root);
     }
@@ -1343,13 +1343,13 @@
 
   template <class MirrorType>
   void VisitRoot(mirror::Object** root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
   }
 
   template <class MirrorType>
   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
   }
 };
@@ -1489,13 +1489,13 @@
       : collector_(collector) {}
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
-      const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     collector_->Process(obj, offset);
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     CHECK(klass->IsTypeOfReferenceClass());
     collector_->DelayReferenceReferent(klass, ref);
   }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index f1317b8..d324ce1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -62,14 +62,15 @@
   ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
   ~ConcurrentCopying();
 
-  virtual void RunPhases() OVERRIDE;
-  void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void FinishPhase();
+  virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+  void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+  void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
 
-  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
   virtual GcType GetGcType() const OVERRIDE {
     return kGcTypePartial;
   }
@@ -85,14 +86,15 @@
     return region_space_;
   }
   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     return IsMarked(ref) == ref;
   }
-  mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
   bool IsMarking() const {
     return is_marking_;
   }
@@ -105,68 +107,77 @@
   bool IsWeakRefAccessEnabled() {
     return weak_ref_access_enabled_.LoadRelaxed();
   }
-  void RevokeThreadLocalMarkStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
 
  private:
-  void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_);
+  void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
   void Process(mirror::Object* obj, MemberOffset offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+  void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
   accounting::ObjectStack* GetAllocationStack();
   accounting::ObjectStack* GetLiveStack();
-  virtual void ProcessMarkStack() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool ProcessMarkStackOnce() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
   virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void SweepSystemWeaks(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
   void Sweep(bool swap_bitmaps)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
   void SweepLargeObjects(bool swap_bitmaps)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
   void ClearBlackPtrs()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CheckEmptyMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_);
+  void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
-  void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
+  void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
   void RecordLiveStackFreezeSize(Thread* self);
   void ComputeUnevacFromSpaceLiveRatio();
   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ReenableWeakRefAccess(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index cfc4f96..954c80e 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@
   virtual GcType GetGcType() const = 0;
   virtual CollectorType GetCollectorType() const = 0;
   // Run the garbage collector.
-  void Run(GcCause gc_cause, bool clear_soft_references);
+  void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
   Heap* GetHeap() const {
     return heap_;
   }
@@ -150,11 +150,11 @@
   const CumulativeLogger& GetCumulativeTimings() const {
     return cumulative_timings_;
   }
-  void ResetCumulativeStatistics();
+  void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_);
   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
-  void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_);
+  void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
+  uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
   int64_t GetTotalFreedBytes() const {
     return total_freed_bytes_;
   }
@@ -162,7 +162,7 @@
     return total_freed_objects_;
   }
   // Reset the cumulative timings and pause histogram.
-  void ResetMeasurements();
+  void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
   // Returns the estimated throughput in bytes / second.
   uint64_t GetEstimatedMeanThroughput() const;
   // Returns how many GC iterations have been run.
@@ -179,23 +179,23 @@
   void RecordFree(const ObjectBytePair& freed);
   // Record a free of large objects.
   void RecordFreeLOS(const ObjectBytePair& freed);
-  void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
+  void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
 
   // Helper functions for querying if objects are marked. These are used for processing references,
   // and will be used for reading system weaks while the GC is running.
   virtual mirror::Object* IsMarked(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   // Used by reference processor.
-  virtual void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   // Force mark an object.
   virtual mirror::Object* MarkObject(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
  protected:
   // Run all of the GC phases.
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index 30144f0..3ead501 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -41,7 +41,7 @@
   ImmuneRegion();
   void Reset();
   bool AddContinuousSpace(space::ContinuousSpace* space)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_);
   bool ContainsSpace(const space::ContinuousSpace* space) const;
   // Returns true if an object is inside of the immune region (assumed to be marked).
   bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE {
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 0623fd4..c5ad613 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -89,7 +89,7 @@
  public:
   explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
       : collector_(collector) {}
-  void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
+  void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_,
                                                                       Locks::heap_bitmap_lock_) {
     DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
     DCHECK(collector_->IsMarked(obj) != nullptr);
@@ -301,8 +301,8 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      OVERRIDE REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mirror::Object* obj = *roots[i];
       mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -315,8 +315,8 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      OVERRIDE REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mirror::Object* obj = roots[i]->AsMirrorPtr();
       mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -335,8 +335,8 @@
  public:
   explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
   }
-  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+          REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     collector_->UpdateObjectReferences(obj);
   }
 
@@ -428,12 +428,12 @@
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
-      ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
   }
 
   void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     collector_->UpdateHeapReference(
         ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
   }
@@ -491,8 +491,8 @@
  public:
   explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
   }
-  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+          REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
       collector_->MoveObject(obj, obj->SizeOf());
   }
 
@@ -564,14 +564,14 @@
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Object was already verified when we scanned it.
     collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     collector_->DelayReferenceReferent(klass, ref);
   }
 
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 89d66b5..8d91939 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -64,13 +64,13 @@
 
   virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
   void InitializePhase();
-  void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkingPhase() REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
+  void ReclaimPhase() REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
+  void FinishPhase() REQUIRES(Locks::mutator_lock_);
   void MarkReachableObjects()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   virtual GcType GetGcType() const OVERRIDE {
     return kGcTypePartial;
   }
@@ -88,106 +88,106 @@
   void FindDefaultMarkBitmap();
 
   void ScanObject(mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Marks the root set at the start of a garbage collection.
   void MarkRoots()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
   // the image. Mark that portion of the heap as immune.
-  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
 
   void UnBindBitmaps()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_);
 
-  void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
 
   void SweepSystemWeaks()
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   // Schedules an unmarked object for reference processing.
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
  protected:
   // Returns null if the object is not marked, otherwise returns the forwarding address (same as
   // object for non movable things).
   mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
   // mark, otherwise we unmark.
   bool MarkLargeObject(const mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Expand mark stack to 2x its current size.
-  void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if we should sweep the space.
   bool ShouldSweepSpace(space::ContinuousSpace* space) const;
 
   // Push an object onto the mark stack.
-  void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void UpdateAndMarkModUnion()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Recursively blackens objects on the mark stack.
   void ProcessMarkStack()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   // 3 pass mark compact approach.
-  void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  void Compact() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
   // bitmap.
   void CalculateObjectForwardingAddresses()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   // Update the references of objects by using the forwarding addresses.
-  void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  void UpdateReferences() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   // Move objects and restore lock words.
-  void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MoveObjects() REQUIRES(Locks::mutator_lock_);
   // Move a single object to its forward address.
-  void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MoveObject(mirror::Object* obj, size_t len) REQUIRES(Locks::mutator_lock_);
   // Mark a single object.
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
   virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(Locks::mutator_lock_);
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(Locks::mutator_lock_);
+  void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
                                                                    Locks::mutator_lock_);
   // Update a single heap reference.
   void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(Locks::mutator_lock_);
   // Update all of the references of a single object.
   void UpdateObjectReferences(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(Locks::mutator_lock_);
 
   // Revoke all the thread-local buffers.
   void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index abb1d3d..92dde51 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -522,7 +522,7 @@
   explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
   }
 
@@ -547,7 +547,7 @@
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // See if the root is on any space bitmap.
     auto* heap = Runtime::Current()->GetHeap();
     if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
@@ -597,8 +597,8 @@
       : mark_sweep_(mark_sweep) {}
 
   void operator()(mirror::Object* obj) const ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -616,8 +616,8 @@
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     collector_->DelayReferenceReferent(klass, ref);
   }
 
@@ -654,7 +654,7 @@
             : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
 
     void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
       if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
         if (kUseFinger) {
@@ -679,8 +679,8 @@
         : chunk_task_(chunk_task) {}
 
     // No thread safety analysis since multiple threads will use this visitor.
-    void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::heap_bitmap_lock_) {
       MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
       MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
       DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -707,7 +707,7 @@
   size_t mark_stack_pos_;
 
   ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
       mark_stack_pos_ /= 2;
@@ -725,8 +725,8 @@
   }
 
   // Scans all of the objects
-  virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     UNUSED(self);
     ScanObjectParallelVisitor visitor(this);
     // TODO: Tune this.
@@ -1015,7 +1015,7 @@
   explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
 
   virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     mark_sweep_->VerifyIsLive(obj);
     return obj;
   }
@@ -1048,8 +1048,8 @@
   }
 
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
     }
@@ -1057,8 +1057,8 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
     }
@@ -1259,8 +1259,8 @@
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
-      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 7692b06..99e00f9 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -60,13 +60,12 @@
 
   virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
   void InitializePhase();
-  void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
+  void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
+  void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void FinishPhase() SHARED_REQUIRES(Locks::mutator_lock_);
   virtual void MarkReachableObjects()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   bool IsConcurrent() const {
     return is_concurrent_;
@@ -88,113 +87,96 @@
 
   // Marks all objects in the root set at the start of a garbage collection.
   void MarkRoots(Thread* self)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkNonThreadRoots()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkConcurrentRoots(VisitRootFlags flags)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Builds a mark stack and recursively mark until it empties.
   void RecursiveMark()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
   // the image. Mark that portion of the heap as immune.
-  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
   void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Remarks the root set after completing the concurrent mark.
   void ReMarkRoots()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ProcessReferences(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
 
   // Update and mark references from immune spaces.
   void UpdateAndMarkModUnion()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
 
   // Pre clean cards to reduce how much work is needed in the pause.
   void PreCleanCards()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
   // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
-  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Sweep only pointers within an array. WARNING: Trashes objects.
   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Blackens an object.
   void ScanObject(mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // No thread safety analysis due to lambdas.
   template<typename MarkVisitor, typename ReferenceVisitor>
   void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
                        const ReferenceVisitor& ref_visitor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   void SweepSystemWeaks(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
 
   static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void VerifySystemWeaks()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   // Verify that an object is live, either in a live bitmap or in the allocation stack.
   void VerifyIsLive(const mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   // Marks an object.
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
   void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   Barrier& GetBarrier() {
     return *gc_barrier_;
@@ -202,21 +184,20 @@
 
   // Schedules an unmarked object for reference processing.
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
  protected:
   // Returns object if the object is marked in the heap bitmap, otherwise null.
   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
                          MemberOffset offset = MemberOffset(0))
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
 
   // Marks an object atomically, safe to use from multiple threads.
   void MarkObjectNonNullParallel(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
 
   // Returns true if we need to add obj to a mark stack.
   bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -227,36 +208,34 @@
       NO_THREAD_SAFETY_ANALYSIS;
 
   // Expand mark stack to 2x its current size.
-  void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ExpandMarkStack() REQUIRES(mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns how many threads we should use for the current GC phase based on if we are paused,
   // whether or not we care about pauses.
   size_t GetThreadCount(bool paused) const;
 
   // Push a single reference on a mark stack.
-  void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
 
   // Blackens objects grayed during a garbage collection.
   void ScanGrayObjects(bool paused, uint8_t minimum_age)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  virtual void ProcessMarkStack() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ProcessMarkStack(false);
   }
 
   // Recursively blackens objects on the mark stack.
   void ProcessMarkStack(bool paused)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ProcessMarkStackParallel(size_t thread_count)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
   // IsExclusiveHeld.
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 1a211cd..7b69bce 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -37,7 +37,7 @@
   // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
   // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
   // StickyMarkSweep.
-  virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2a9f47a..e93ff05 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -273,8 +273,7 @@
 class SemiSpaceScanObjectVisitor {
  public:
   explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
-  void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
-                                                              Locks::heap_bitmap_lock_) {
+  void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     DCHECK(obj != nullptr);
     semi_space_->ScanObject(obj);
   }
@@ -289,7 +288,7 @@
       from_space_(from_space) {}
 
   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (from_space_->HasAddress(ref)) {
       Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
@@ -310,7 +309,7 @@
  public:
   explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
   void operator()(Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     semi_space_->VerifyNoFromSpaceReferences(obj);
   }
@@ -665,14 +664,14 @@
   }
 
   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Object was already verified when we scanned it.
     collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_) {
     collector_->DelayReferenceReferent(klass, ref);
   }
 
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 6b7ea0d..d5772a0 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -66,13 +66,13 @@
 
   virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
   virtual void InitializePhase();
-  virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
+  virtual void ReclaimPhase() REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
+  virtual void FinishPhase() REQUIRES(Locks::mutator_lock_);
   void MarkReachableObjects()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   virtual GcType GetGcType() const OVERRIDE {
     return kGcTypePartial;
   }
@@ -101,94 +101,94 @@
   // Updates obj_ptr if the object has moved.
   template<bool kPoisonReferences>
   void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void ScanObject(mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void VerifyNoFromSpaceReferences(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Marks the root set at the start of a garbage collection.
   void MarkRoots()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
   // the image. Mark that portion of the heap as immune.
-  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_);
 
   void UnBindBitmaps()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_);
 
-  void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
 
   void SweepSystemWeaks()
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Schedules an unmarked object for reference processing.
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
  protected:
   // Returns null if the object is not marked, otherwise returns the forwarding address (same as
   // object for non movable things).
   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
   // mark, otherwise we unmark.
   bool MarkLargeObject(const mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Expand mark stack to 2x its current size.
-  void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if we should sweep the space.
   virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
 
   // Push an object onto the mark stack.
-  void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void UpdateAndMarkModUnion()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Recursively blackens objects on the mark stack.
   void ProcessMarkStack()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Revoke all the thread-local buffers.
   void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index b9ef137..e8e70de 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -36,15 +36,15 @@
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
-  void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkReachableObjects() OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   void Sweep(bool swap_bitmaps) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2b94cf1..7a9e418 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1695,11 +1695,11 @@
 class InstanceCounter {
  public:
   InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
   }
   static void Callback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
     mirror::Class* instance_class = obj->GetClass();
     CHECK(instance_class != nullptr);
@@ -1731,11 +1731,11 @@
 class InstanceCollector {
  public:
   InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : class_(c), max_count_(max_count), instances_(instances) {
   }
   static void Callback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     DCHECK(arg != nullptr);
     InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
     if (obj->GetClass() == instance_collector->class_) {
@@ -1763,12 +1763,12 @@
  public:
   ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
                          std::vector<mirror::Object*>& referring_objects)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
   }
 
   static void Callback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
   }
 
@@ -1781,7 +1781,7 @@
 
   // For Object::VisitReferences.
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
       referring_objects_.push_back(obj);
@@ -2111,7 +2111,7 @@
   const bool is_running_on_memory_tool_;
 
   static void Callback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(arg != nullptr);
     BinContext* context = reinterpret_cast<BinContext*>(arg);
     ZygoteCompactingCollector* collector = context->collector_;
@@ -2139,7 +2139,7 @@
   }
 
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     size_t obj_size = obj->SizeOf();
     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
     mirror::Object* forward_address;
@@ -2583,7 +2583,7 @@
   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     if (root == obj_) {
       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
     }
@@ -2605,7 +2605,7 @@
 class VerifyReferenceVisitor : public SingleRootVisitor {
  public:
   explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
 
   size_t GetFailureCount() const {
@@ -2613,7 +2613,7 @@
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     UNUSED(klass);
     if (verify_referent_) {
       VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
@@ -2621,7 +2621,7 @@
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
   }
 
@@ -2630,7 +2630,7 @@
   }
 
   void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (root == nullptr) {
       LOG(ERROR) << "Root is null with info " << root_info.GetType();
     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
@@ -2748,7 +2748,7 @@
   }
 
   void operator()(mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -2757,13 +2757,13 @@
   }
 
   static void VisitCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
     visitor->operator()(obj);
   }
 
-  void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) {
+  void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_) {
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
     Runtime::Current()->VisitRoots(&visitor);
@@ -2855,7 +2855,7 @@
 class VerifyReferenceCardVisitor {
  public:
   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+      SHARED_REQUIRES(Locks::mutator_lock_,
                             Locks::heap_bitmap_lock_)
       : heap_(heap), failed_(failed) {
   }
@@ -2932,7 +2932,7 @@
         failed_(false) {}
 
   void operator()(mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
     obj->VisitReferences<true>(visitor, VoidFunctor());
   }
@@ -3425,7 +3425,7 @@
   const bool force_full_;  // If true, force full (or partial) collection.
 };
 
-static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) {
+static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
   Runtime* runtime = Runtime::Current();
   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
       !self->IsHandlingStackOverflow();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index ee3d510..790a98c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -188,26 +188,27 @@
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
                               const PreFenceVisitor& pre_fence_visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
-                                                         GetCurrentAllocator(),
-                                                         pre_fence_visitor);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_) {
+    return AllocObjectWithAllocator<kInstrumented, true>(
+        self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
   }
 
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
                                         const PreFenceVisitor& pre_fence_visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
-                                                         GetCurrentNonMovingAllocator(),
-                                                         pre_fence_visitor);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_) {
+    return AllocObjectWithAllocator<kInstrumented, true>(
+        self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
   }
 
   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
       Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
       const PreFenceVisitor& pre_fence_visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
 
   AllocatorType GetCurrentAllocator() const {
     return current_allocator_;
@@ -219,29 +220,29 @@
 
   // Visit all of the live objects in the heap.
   void VisitObjects(ObjectCallback callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
   void VisitObjectsPaused(ObjectCallback callback, void* arg)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
   void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void RegisterNativeAllocation(JNIEnv* env, size_t bytes);
-  void RegisterNativeFree(JNIEnv* env, size_t bytes);
+  void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+  void RegisterNativeFree(JNIEnv* env, size_t bytes)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
 
   // Change the allocator, updates entrypoints.
   void ChangeAllocator(AllocatorType allocator)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
 
   // Transition the garbage collector during runtime, may copy objects from one space to another.
-  void TransitionCollector(CollectorType collector_type);
+  void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
 
   // Change the collector to be one of the possible options (MS, CMS, SS).
   void ChangeCollector(CollectorType collector_type)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
 
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
@@ -249,61 +250,64 @@
   void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
 
   // Check sanity of all live references.
-  void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
   // Returns how many failures occured.
   size_t VerifyHeapReferences(bool verify_referents = true)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
   bool VerifyMissingCardMarks()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
   // and doesn't abort on error, allowing the caller to report more
   // meaningful diagnostics.
   bool IsValidObjectAddress(const mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
   // very slow.
   bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
   // Requires the heap lock to be held.
   bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
                           bool search_live_stack = true, bool sorted = false)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Returns true if there is any chance that the object (obj) will move.
-  bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Enables us to compacting GC until objects are released.
-  void IncrementDisableMovingGC(Thread* self);
-  void DecrementDisableMovingGC(Thread* self);
+  void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
+  void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
 
   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
-  void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_);
 
   // Initiates an explicit garbage collection.
-  void CollectGarbage(bool clear_soft_references);
+  void CollectGarbage(bool clear_soft_references)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
 
   // Does a concurrent GC, should only be called by the GC daemon thread
   // through runtime.
-  void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+  void ConcurrentGC(Thread* self, bool force_full)
+      REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
 
   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
   void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
                       uint64_t* counts)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Implements JDWP RT_Instances.
   void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Implements JDWP OR_ReferringObjects.
-  void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void GetReferringObjects(mirror::Object* o, int32_t max_count,
+                           std::vector<mirror::Object*>& referring_objects)
+      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
   // implement dalvik.system.VMRuntime.clearGrowthLimit.
@@ -311,7 +315,7 @@
 
   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
-  void ClampGrowthLimit() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
 
   // Target ideal heap utilization ratio, implements
   // dalvik.system.VMRuntime.getTargetHeapUtilization.
@@ -326,9 +330,9 @@
   // Set the heap's private space pointers to be the same as the space based on it's type. Public
   // due to usage by tests.
   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      REQUIRES(!Locks::heap_bitmap_lock_);
+  void AddSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_);
+  void RemoveSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_);
 
   // Set target ideal heap utilization ratio, implements
   // dalvik.system.VMRuntime.setTargetHeapUtilization.
@@ -341,10 +345,11 @@
   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
   // waited for.
   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
-      LOCKS_EXCLUDED(gc_complete_lock_);
+      REQUIRES(!*gc_complete_lock_);
 
   // Update the heap's process state to a new value, may cause compaction to occur.
-  void UpdateProcessState(ProcessState process_state);
+  void UpdateProcessState(ProcessState process_state)
+      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
 
   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
     return continuous_spaces_;
@@ -428,7 +433,7 @@
   }
 
   // Returns the number of objects currently allocated.
-  size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  size_t GetObjectsAllocated() const REQUIRES(!Locks::heap_bitmap_lock_);
 
   // Returns the total number of objects allocated since the heap was created.
   uint64_t GetObjectsAllocatedEver() const;
@@ -487,13 +492,13 @@
                                                               bool fail_ok) const;
   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
 
-  void DumpForSigQuit(std::ostream& os);
+  void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
 
   // Do a pending collector transition.
-  void DoPendingCollectorTransition();
+  void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
 
   // Deflate monitors, ... and trim the spaces.
-  void Trim(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+  void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
 
   void RevokeThreadLocalBuffers(Thread* thread);
   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
@@ -501,17 +506,17 @@
   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
   void RosAllocVerification(TimingLogger* timings, const char* name)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
 
-  accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     return live_bitmap_.get();
   }
 
-  accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     return mark_bitmap_.get();
   }
 
-  accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
     return live_stack_.get();
   }
 
@@ -519,13 +524,12 @@
 
   // Mark and empty stack.
   void FlushAllocStack()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_);
 
   // Revoke all the thread-local allocation stacks.
   void RevokeAllThreadLocalAllocationStacks(Thread* self)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
 
   // Mark all the objects in the allocation stack in the specified bitmap.
   // TODO: Refactor?
@@ -533,23 +537,21 @@
                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
                       accounting::ObjectStack* stack)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Mark the specified allocation stack as live.
   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Unbind any bound bitmaps.
-  void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
 
   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
   // Assumes there is only one image space.
   space::ImageSpace* GetImageSpace() const;
 
   // Permenantly disable moving garbage collection.
-  void DisableMovingGc();
+  void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
 
   space::DlMallocSpace* GetDlMallocSpace() const {
     return dlmalloc_space_;
@@ -595,8 +597,8 @@
   std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
 
   // GC performance measuring
-  void DumpGcPerformanceInfo(std::ostream& os);
-  void ResetGcPerformanceInfo();
+  void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+  void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
 
   // Returns true if we currently care about pause times.
   bool CareAboutPauseTimes() const {
@@ -656,16 +658,16 @@
     return false;
   }
 
-  bool IsMovingGCDisabled(Thread* self) {
+  bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
     MutexLock mu(self, *gc_complete_lock_);
     return disable_moving_gc_count_ > 0;
   }
 
   // Request an asynchronous trim.
-  void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+  void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
 
   // Request asynchronous GC.
-  void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_);
+  void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_);
 
   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
   bool MayUseCollector(CollectorType type) const;
@@ -680,8 +682,8 @@
   uint64_t GetGcTime() const;
   uint64_t GetBlockingGcCount() const;
   uint64_t GetBlockingGcTime() const;
-  void DumpGcCountRateHistogram(std::ostream& os) const;
-  void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+  void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
+  void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
 
   // Allocation tracking support
   // Callers to this function use double-checked locking to ensure safety on allocation_records_
@@ -689,33 +691,33 @@
     return alloc_tracking_enabled_.LoadRelaxed();
   }
 
-  void SetAllocTrackingEnabled(bool enabled) EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+  void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
     alloc_tracking_enabled_.StoreRelaxed(enabled);
   }
 
   AllocRecordObjectMap* GetAllocationRecords() const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) {
+      REQUIRES(Locks::alloc_tracker_lock_) {
     return allocation_records_.get();
   }
 
   void SetAllocationRecords(AllocRecordObjectMap* records)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+      REQUIRES(Locks::alloc_tracker_lock_);
 
   void VisitAllocationRecords(RootVisitor* visitor) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
 
   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
 
   void DisallowNewAllocationRecords() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
 
   void AllowNewAllocationRecords() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
 
  private:
   class ConcurrentGCTask;
@@ -726,10 +728,10 @@
   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
                                        space::ContinuousMemMapAllocSpace* source_space,
                                        GcCause gc_cause)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
 
   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
-  void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
+  void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
 
   // Create a mem map with a preferred base address.
   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
@@ -758,10 +760,10 @@
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
                                        mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
 
   accounting::ObjectStack* GetMarkStack() {
     return mark_stack_.get();
@@ -771,7 +773,8 @@
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
                                    const PreFenceVisitor& pre_fence_visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
 
   // Handles Allocate()'s slow allocation path with GC involved after
   // an initial allocation attempt failed.
@@ -779,17 +782,17 @@
                                          size_t* bytes_allocated, size_t* usable_size,
                                          size_t* bytes_tl_bulk_allocated,
                                          mirror::Class** klass)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Allocate into a specific space.
   mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
                                size_t bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
   // wrong space.
-  void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
 
   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
   // that the switch statement is constant optimized in the entrypoints.
@@ -798,17 +801,17 @@
                                               size_t alloc_size, size_t* bytes_allocated,
                                               size_t* usable_size,
                                               size_t* bytes_tl_bulk_allocated)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kGrow>
   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
 
   // Returns true if the address passed in is within the address range of a continuous space.
   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
   void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -816,36 +819,34 @@
   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
   // waited for.
   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
-      EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+      REQUIRES(gc_complete_lock_);
 
   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
-      LOCKS_EXCLUDED(pending_task_lock_);
+      REQUIRES(!*pending_task_lock_);
 
   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*pending_task_lock_);
   bool IsGCRequestPending() const;
 
   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
   // which type of Gc was actually ran.
   collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
                                            bool clear_soft_references)
-      LOCKS_EXCLUDED(gc_complete_lock_,
-                     Locks::heap_bitmap_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
+               !*pending_task_lock_);
 
   void PreGcVerification(collector::GarbageCollector* gc)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
   void PreGcVerificationPaused(collector::GarbageCollector* gc)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
   void PreSweepingGcVerification(collector::GarbageCollector* gc)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
   void PostGcVerification(collector::GarbageCollector* gc)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
   void PostGcVerificationPaused(collector::GarbageCollector* gc)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
 
   // Update the watermark for the native allocated bytes based on the current number of native
   // bytes allocated and the target utilization ratio.
@@ -855,7 +856,7 @@
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
   // Create a new alloc space and compact default alloc space to it.
-  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
+  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
 
   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
   void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
@@ -876,10 +877,10 @@
   size_t GetPercentFree();
 
   static void VerificationCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
   // Swap the allocation stack with the live stack.
-  void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
@@ -889,15 +890,15 @@
 
   // Push an object onto the allocation stack.
   void PushOnAllocationStack(Thread* self, mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
 
   void ClearConcurrentGCRequest();
-  void ClearPendingTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
-  void ClearPendingCollectorTransition(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+  void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
+  void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
 
   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
   // sweep GC, false for other GC types.
@@ -906,23 +907,23 @@
   }
 
   // Trim the managed and native spaces by releasing unused memory back to the OS.
-  void TrimSpaces(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+  void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
 
   // Trim 0 pages at the end of reference tables.
   void TrimIndirectReferenceTables(Thread* self);
 
   void VisitObjectsInternal(ObjectCallback callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
   void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
-  void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+  void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
 
   // GC stress mode attempts to do one GC per unique backtrace.
   void CheckGcStressMode(Thread* self, mirror::Object** obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
 
   // All-known continuous spaces, where objects lie within fixed bounds.
   std::vector<space::ContinuousSpace*> continuous_spaces_;
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 95877d1..d9dfedb 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -48,39 +48,39 @@
   explicit ReferenceProcessor();
   void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
                          gc::collector::GarbageCollector* collector)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!Locks::reference_processor_lock_);
   // The slow path bool is contained in the reference class object, can only be set once
   // Only allow setting this with mutators suspended so that we can avoid using a lock in the
   // GetReferent fast path as an optimization.
-  void EnableSlowPath() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_);
   void BroadcastForSlowPath(Thread* self);
   // Decode the referent, may block if references are being processed.
   mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
-  void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
+  void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                               collector::GarbageCollector* collector)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void UpdateRoots(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
   // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
   bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::reference_processor_lock_,
-                     Locks::reference_queue_finalizer_references_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::reference_processor_lock_,
+               !Locks::reference_queue_finalizer_references_lock_);
 
  private:
-  bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
   // Called by ProcessReferences.
-  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // If we are preserving references it means that some dead objects may become live, we use start
   // and stop preserving to block mutators using GetReferrent from getting access to these
   // referents.
-  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
-  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+  void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
+  void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
   // Collector which is clearing references, used by the GetReferent to return referents which are
   // already marked.
   collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_);
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 7d9ddf6..aabac97 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -22,6 +22,7 @@
 #include <vector>
 
 #include "atomic.h"
+#include "base/mutex.h"
 #include "base/timing_logger.h"
 #include "globals.h"
 #include "jni.h"
@@ -53,39 +54,39 @@
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
   void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
 
   // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
   // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
   // overhead.
-  void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Enqueue a reference without checking that it is enqueable.
-  void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Dequeue the first reference (returns list_).
-  mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
   // the zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
                                   collector::GarbageCollector* collector)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Walks the reference list marking any references subject to the reference clearing policy.
   // References with a black referent are removed from the list.  References with white referents
   // biased toward saving are blackened and also removed from the list.
   void ForwardSoftReferences(MarkObjectVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Unlink the reference list clearing references objects with white referents. Cleared references
   // registered to a reference queue are scheduled for appending by the heap worker thread.
   void ClearWhiteReferences(ReferenceQueue* cleared_references,
                             collector::GarbageCollector* collector)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+  size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsEmpty() const {
     return list_ == nullptr;
@@ -93,13 +94,13 @@
   void Clear() {
     list_ = nullptr;
   }
-  mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) {
     return list_;
   }
 
   // Visits list_, currently only used for the mark compact GC.
   void UpdateRoots(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 338a41e..2263797 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -87,7 +87,7 @@
 }
 
 inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   size_t num_bytes = obj->SizeOf();
   if (usable_size != nullptr) {
     *usable_size = RoundUp(num_bytes, kAlignment);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index df43606..0e27d84 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -51,14 +51,14 @@
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE REQUIRES(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(size_t num_bytes);
   mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
 
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
 
@@ -72,7 +72,7 @@
   }
 
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
   // maximum reserved size of the heap.
@@ -99,19 +99,21 @@
   }
 
   // Reset the space to empty.
-  void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+  void Clear() OVERRIDE REQUIRES(!block_lock_);
 
   void Dump(std::ostream& os) const;
 
-  size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
-  size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
-                                                      Locks::thread_list_lock_);
-  void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_);
-  void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
-                                                              Locks::thread_list_lock_);
+  size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
+  size_t RevokeAllThreadLocalBuffers()
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
+  void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
+  void AssertAllThreadLocalBuffersAreRevoked()
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
 
-  uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+  uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
   bool IsEmpty() const {
     return Begin() == End();
   }
@@ -130,10 +132,10 @@
 
   // Return the object which comes after obj, while ensuring alignment.
   static mirror::Object* GetNextObject(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Allocate a new TLAB, returns false if the allocation failed.
-  bool AllocNewTlab(Thread* self, size_t bytes);
+  bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
 
   BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
     return this;
@@ -141,7 +143,7 @@
 
   // Go through all of the blocks and visit the continuous objects.
   void Walk(ObjectCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
 
@@ -152,7 +154,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Object alignment within the space.
   static constexpr size_t kAlignment = 8;
@@ -161,13 +163,13 @@
   BumpPointerSpace(const std::string& name, MemMap* mem_map);
 
   // Allocate a raw block of bytes.
-  uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
-  void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+  uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
+  void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
 
   // The main block is an unbounded block where objects go when there are no other blocks. This
   // enables us to maintain tightly packed objects when you are not using thread local buffers for
   // allocation. The main block starts at the space Begin().
-  void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+  void UpdateMainBlock() REQUIRES(block_lock_);
 
   uint8_t* growth_end_;
   AtomicInteger objects_allocated_;  // Accumulated from revoked thread local regions.
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index ab527a4..eab757a 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -50,11 +50,11 @@
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated)
-      OVERRIDE LOCKS_EXCLUDED(lock_);
+      OVERRIDE REQUIRES(!lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE LOCKS_EXCLUDED(lock_) {
+      OVERRIDE REQUIRES(!lock_) {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
@@ -64,12 +64,12 @@
   }
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
-      LOCKS_EXCLUDED(lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
-      LOCKS_EXCLUDED(lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
     return num_bytes;
@@ -86,7 +86,7 @@
   // Faster non-virtual allocation path.
   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
 
   // Faster non-virtual allocation size path.
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
@@ -104,7 +104,7 @@
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
   // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
 
   // Returns the number of bytes that the space has currently obtained from the system. This is
   // greater or equal to the amount of live data in the space.
@@ -136,7 +136,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
   DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
@@ -147,7 +147,7 @@
   mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                            size_t* usable_size,
                                            size_t* bytes_tl_bulk_allocated)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      REQUIRES(lock_);
 
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                         size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 93ff8aa..215c18b 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -44,7 +44,7 @@
   // used to transfer ownership of the OatFile to the ClassLinker when
   // it is initialized.
   static ImageSpace* Create(const char* image, InstructionSet image_isa, std::string* error_msg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Reads the image header from the specified image location for the
   // instruction set image_isa or dies trying.
@@ -64,10 +64,10 @@
   // Releases the OatFile from the ImageSpace so it can be transfer to
   // the caller, presumably the ClassLinker.
   OatFile* ReleaseOatFile()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VerifyImageAllocations()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   const ImageHeader& GetImageHeader() const {
     return *reinterpret_cast<ImageHeader*>(Begin());
@@ -130,13 +130,13 @@
   // the OatFile in /data/dalvik-cache if necessary.
   static ImageSpace* Init(const char* image_filename, const char* image_location,
                           bool validate_oat_file, std::string* error_msg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   OatFile* OpenOatFile(const char* image, std::string* error_msg) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool ValidateOatFile(std::string* error_msg) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   friend class Space;
 
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 45ed0cd..c726998 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
     return Begin() <= byte_obj && byte_obj < End();
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return true if the large object is a zygote large object. Potentially slow.
   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
@@ -130,11 +130,12 @@
   // of malloc.
   static LargeObjectMapSpace* Create(const std::string& name);
   // Return the storage space required by obj.
-  size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
+  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated);
-  size_t Free(Thread* self, mirror::Object* ptr);
-  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+      REQUIRES(!lock_);
+  size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
@@ -146,8 +147,8 @@
   explicit LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
-  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_);
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
 
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -163,12 +164,13 @@
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      REQUIRES(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
-  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
-  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
-  void Dump(std::ostream& os) const;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+      OVERRIDE REQUIRES(!lock_);
+  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
+  void Dump(std::ostream& os) const REQUIRES(!lock_);
 
  protected:
   FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
@@ -186,9 +188,9 @@
     return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
   }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
-  void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE;
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
 
   class SortByPrevFree {
    public:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6c689cd..4e56c4a 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -63,9 +63,9 @@
   // amount of the storage space that may be used by obj.
   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
   virtual size_t Free(Thread* self, mirror::Object* ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Returns the maximum bytes that could be allocated for the given
   // size in bulk, that is the maximum value for the
@@ -160,8 +160,8 @@
                                 size_t maximum_size, bool low_memory_mode) = 0;
 
   virtual void RegisterRecentFree(mirror::Object* ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(lock_);
 
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
     return &SweepCallback;
@@ -196,7 +196,7 @@
 
  private:
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(MallocSpace);
 };
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 64c6f35..fe39e05 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -38,15 +38,15 @@
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE REQUIRES(Locks::mutator_lock_);
 
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
 
   size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
     UNUSED(ptr);
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index db005f7..66fd62c 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -138,8 +138,7 @@
   return reinterpret_cast<mirror::Object*>(old_top);
 }
 
-inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
   size_t num_bytes = obj->SizeOf();
   if (usable_size != nullptr) {
     if (LIKELY(num_bytes <= kRegionSize)) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 19109f0..14e8005 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -42,29 +42,31 @@
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                        size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
+                        size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+      OVERRIDE REQUIRES(!region_lock_);
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
   // The main allocation routine.
   template<bool kForEvac>
   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
                                                 size_t* usable_size,
-                                                size_t* bytes_tl_bulk_allocated);
+                                                size_t* bytes_tl_bulk_allocated)
+      REQUIRES(!region_lock_);
   // Allocate/free large objects (objects that are larger than the region size.)
   template<bool kForEvac>
   mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
-                             size_t* bytes_tl_bulk_allocated);
-  void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
+                             size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+  void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
 
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   size_t Free(Thread*, mirror::Object*) OVERRIDE {
     UNIMPLEMENTED(FATAL);
@@ -83,19 +85,19 @@
     return nullptr;
   }
 
-  void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
+  void Clear() OVERRIDE REQUIRES(!region_lock_);
 
   void Dump(std::ostream& os) const;
-  void DumpRegions(std::ostream& os);
-  void DumpNonFreeRegions(std::ostream& os);
+  void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
+  void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
 
-  size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
-  void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
-  size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
-                                                      Locks::thread_list_lock_);
-  void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
-  void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
-                                                              Locks::thread_list_lock_);
+  size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
+  void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
+  size_t RevokeAllThreadLocalBuffers()
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
+  void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
+  void AssertAllThreadLocalBuffersAreRevoked()
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
 
   enum class RegionType : uint8_t {
     kRegionTypeAll,              // All types.
@@ -112,24 +114,24 @@
     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
   };
 
-  template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal();
-  template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal();
-  uint64_t GetBytesAllocated() {
+  template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
+  template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
+  uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
   }
-  uint64_t GetObjectsAllocated() {
+  uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
   }
-  uint64_t GetBytesAllocatedInFromSpace() {
+  uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
   }
-  uint64_t GetObjectsAllocatedInFromSpace() {
+  uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
   }
-  uint64_t GetBytesAllocatedInUnevacFromSpace() {
+  uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
   }
-  uint64_t GetObjectsAllocatedInUnevacFromSpace() {
+  uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
   }
 
@@ -148,12 +150,12 @@
 
   // Go through all of the blocks and visit the continuous objects.
   void Walk(ObjectCallback* callback, void* arg)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     WalkInternal<false>(callback, arg);
   }
 
   void WalkToSpace(ObjectCallback* callback, void* arg)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     WalkInternal<true>(callback, arg);
   }
 
@@ -161,7 +163,7 @@
     return nullptr;
   }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
 
   // Object alignment within the space.
   static constexpr size_t kAlignment = kObjectAlignment;
@@ -201,22 +203,22 @@
   }
 
   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
-      LOCKS_EXCLUDED(region_lock_);
+      REQUIRES(!region_lock_);
 
-  size_t FromSpaceSize();
-  size_t UnevacFromSpaceSize();
-  size_t ToSpaceSize();
-  void ClearFromSpace();
+  size_t FromSpaceSize() REQUIRES(!region_lock_);
+  size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
+  size_t ToSpaceSize() REQUIRES(!region_lock_);
+  void ClearFromSpace() REQUIRES(!region_lock_);
 
   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
     Region* reg = RefToRegionUnlocked(ref);
     reg->AddLiveBytes(alloc_size);
   }
 
-  void AssertAllRegionLiveBytesZeroOrCleared();
+  void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_);
 
-  void RecordAlloc(mirror::Object* ref);
-  bool AllocNewTlab(Thread* self);
+  void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
+  bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
 
   uint32_t Time() {
     return time_;
@@ -476,7 +478,7 @@
     friend class RegionSpace;
   };
 
-  Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
+  Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
     MutexLock mu(Thread::Current(), region_lock_);
     return RefToRegionLocked(ref);
   }
@@ -492,7 +494,7 @@
     return RefToRegionLocked(ref);
   }
 
-  Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
+  Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
     DCHECK(HasAddress(ref));
     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
     size_t reg_idx = offset / kRegionSize;
@@ -504,7 +506,7 @@
   }
 
   mirror::Object* GetNextObject(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9dc6f31..bc14738 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -48,7 +48,7 @@
 
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE LOCKS_EXCLUDED(lock_);
+      OVERRIDE REQUIRES(!lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
@@ -56,7 +56,7 @@
   }
   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
-      OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE REQUIRES(Locks::mutator_lock_) {
     return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
                                        bytes_tl_bulk_allocated);
   }
@@ -64,9 +64,9 @@
     return AllocationSizeNonvirtual<true>(obj, usable_size);
   }
   size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
@@ -104,7 +104,7 @@
   }
 
   size_t Trim() OVERRIDE;
-  void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
+  void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
   size_t GetFootprint() OVERRIDE;
   size_t GetFootprintLimit() OVERRIDE;
   void SetFootprintLimit(size_t limit) OVERRIDE;
@@ -134,7 +134,7 @@
     return this;
   }
 
-  void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Verify() REQUIRES(Locks::mutator_lock_) {
     rosalloc_->Verify();
   }
 
@@ -166,11 +166,11 @@
 
   void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
                           void* arg, bool do_null_callback_at_end)
-      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
   void InspectAllRosAllocWithSuspendAll(
       void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
       void* arg, bool do_null_callback_at_end)
-      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
+      REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
 
   // Underlying rosalloc.
   allocator::RosAlloc* rosalloc_;
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 871ebac..fc558cf 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -219,7 +219,7 @@
   virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                             size_t* usable_size,
                                             size_t* bytes_tl_bulk_allocated)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
   }
 
@@ -420,10 +420,9 @@
     return this;
   }
 
-  bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void BindLiveToMarkBitmap()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
+  void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
+  void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
   void SwapBitmaps();
 
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 6e0e0d2..4d2db11 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -49,7 +49,7 @@
     heap->SetSpaceAsDefault(space);
   }
 
-  mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
     if (byte_array_class_ == nullptr) {
@@ -65,7 +65,7 @@
   mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
                         size_t* bytes_allocated, size_t* usable_size,
                         size_t* bytes_tl_bulk_allocated)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
@@ -79,7 +79,7 @@
   mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
                                   size_t* bytes_allocated, size_t* usable_size,
                                   size_t* bytes_tl_bulk_allocated)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
@@ -91,7 +91,7 @@
   }
 
   void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Note the minimum size, which is the size of a zero-length byte array.
     EXPECT_GE(size, SizeOfZeroLengthByteArray());
     EXPECT_TRUE(byte_array_class != nullptr);
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 934a234..f2889e2 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -33,7 +33,7 @@
   static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
                              accounting::ContinuousSpaceBitmap* live_bitmap,
                              accounting::ContinuousSpaceBitmap* mark_bitmap)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Dump(std::ostream& os) const;
 
@@ -77,7 +77,7 @@
   }
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 5f48619..e40fa06 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -54,17 +54,17 @@
  public:
   TaskProcessor();
   virtual ~TaskProcessor();
-  void AddTask(Thread* self, HeapTask* task) LOCKS_EXCLUDED(lock_);
-  HeapTask* GetTask(Thread* self) LOCKS_EXCLUDED(lock_);
-  void Start(Thread* self) LOCKS_EXCLUDED(lock_);
+  void AddTask(Thread* self, HeapTask* task) REQUIRES(!*lock_);
+  HeapTask* GetTask(Thread* self) REQUIRES(!*lock_);
+  void Start(Thread* self) REQUIRES(!*lock_);
   // Stop tells the RunAllTasks to finish up the remaining tasks as soon as
   // possible then return.
-  void Stop(Thread* self) LOCKS_EXCLUDED(lock_);
-  void RunAllTasks(Thread* self) LOCKS_EXCLUDED(lock_);
-  bool IsRunning() const LOCKS_EXCLUDED(lock_);
+  void Stop(Thread* self) REQUIRES(!*lock_);
+  void RunAllTasks(Thread* self) REQUIRES(!*lock_);
+  bool IsRunning() const REQUIRES(!*lock_);
   void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
-      LOCKS_EXCLUDED(lock_);
-  Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
+      REQUIRES(!*lock_);
+  Thread* GetRunningThread() const REQUIRES(!*lock_);
 
  private:
   class CompareByTargetRunTime {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index bb604f0..83471e6 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -91,24 +91,24 @@
 
   // Single root version, not overridable.
   ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     VisitRoots(&roots, 1, info);
   }
 
   // Single root version, not overridable.
   ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (*roots != nullptr) {
       VisitRoot(roots, info);
     }
   }
 
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 };
 
 // Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
@@ -116,7 +116,7 @@
 class SingleRootVisitor : public RootVisitor {
  private:
   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(*roots[i], info);
     }
@@ -124,7 +124,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                           const RootInfo& info) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       VisitRoot(roots[i]->AsMirrorPtr(), info);
     }
@@ -169,10 +169,10 @@
  public:
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!IsNull());
     mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
     visitor->VisitRoots(roots, 1u, info);
@@ -180,7 +180,7 @@
   }
 
   void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsNull()) {
       VisitRoot(visitor, info);
     }
@@ -195,7 +195,7 @@
     return root_.IsNull();
   }
 
-  ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
@@ -222,7 +222,7 @@
 
   template <class MirrorType>
   ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!root.IsNull()) {
       VisitRoot(root);
     }
@@ -230,27 +230,27 @@
 
   template <class MirrorType>
   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!root->IsNull()) {
       VisitRoot(root);
     }
   }
 
   template <class MirrorType>
-  void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) {
     VisitRoot(root.AddressWithoutBarrier());
   }
 
   template <class MirrorType>
   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
       Flush();
     }
     roots_[buffer_pos_++] = root;
   }
 
-  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
     visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
     buffer_pos_ = 0;
   }
diff --git a/runtime/handle.h b/runtime/handle.h
index d94d875..f939ec5 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -50,19 +50,19 @@
   ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
   }
 
-  ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return *Get();
   }
 
-  ALWAYS_INLINE T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return Get();
   }
 
-  ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return down_cast<T*>(reference_->AsMirrorPtr());
   }
 
-  ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
       // Special case so that we work with NullHandles.
       return nullptr;
@@ -71,12 +71,12 @@
   }
 
   ALWAYS_INLINE StackReference<mirror::Object>* GetReference()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return reference_;
   }
 
   ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return reference_;
   }
 
@@ -108,22 +108,22 @@
   }
 
   ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : Handle<T>(handle.reference_) {
   }
 
   ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Handle<T>::operator=(handle);
     return *this;
   }
 
   ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : Handle<T>(reference) {
   }
 
-  ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) {
     StackReference<mirror::Object>* ref = Handle<T>::GetReference();
     T* old = down_cast<T*>(ref->AsMirrorPtr());
     ref->Assign(reference);
@@ -131,12 +131,12 @@
   }
 
   template<typename S>
-  explicit MutableHandle(const MutableHandle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_)
       : Handle<T>(handle) {
   }
 
   template<typename S>
-  explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_)
       : Handle<T>(reference) {
   }
 
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 9a0e52e..5ed8ef0 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -60,16 +60,16 @@
   }
 
   ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
 
@@ -150,14 +150,14 @@
   ALWAYS_INLINE ~StackHandleScope();
 
   template<class T>
-  ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<class T>
   ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   Thread* Self() const {
     return self_;
@@ -165,7 +165,7 @@
 
  private:
   template<class T>
-  ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_LT(i, kNumReferences);
     return MutableHandle<T>(&GetReferences()[i]);
   }
@@ -209,7 +209,7 @@
   }
 
   template<class T>
-  MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
       StackHandleScope<kNumReferencesPerScope>* scope =
           new StackHandleScope<kNumReferencesPerScope>(self_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 71a69aa..e67ea3f 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -240,7 +240,7 @@
   }
 
   void AddIdList(mirror::ObjectArray<mirror::Object>* values)
-  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const int32_t length = values->GetLength();
     for (int32_t i = 0; i < length; ++i) {
       AddObjectId(values->GetWithoutChecks(i));
@@ -429,8 +429,7 @@
   }
 
   void Dump()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, Locks::alloc_tracker_lock_) {
+    REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !Locks::alloc_tracker_lock_) {
     {
       MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
       if (Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
@@ -471,26 +470,26 @@
 
  private:
   static void VisitObjectCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     DCHECK(arg != nullptr);
     reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
   }
 
   void DumpHeapObject(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpHeapClass(mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ProcessHeap(bool header_first)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     // Reset current heap and object count.
     current_heap_ = HPROF_HEAP_DEFAULT;
     objects_in_segment_ = 0;
@@ -504,7 +503,7 @@
     }
   }
 
-  void ProcessBody() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void ProcessBody() REQUIRES(Locks::mutator_lock_) {
     Runtime* const runtime = Runtime::Current();
     // Walk the roots and the heap.
     output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
@@ -517,7 +516,7 @@
     output_->EndRecord();
   }
 
-  void ProcessHeader(bool string_first) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void ProcessHeader(bool string_first) REQUIRES(Locks::mutator_lock_) {
     // Write the header.
     WriteFixedHeader();
     // Write the string and class tables, and any stack traces, to the header.
@@ -536,7 +535,7 @@
     output_->EndRecord();
   }
 
-  void WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
     for (const auto& p : classes_) {
       mirror::Class* c = p.first;
       HprofClassSerialNumber sn = p.second;
@@ -585,11 +584,11 @@
   }
 
   void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
                       uint32_t thread_serial);
 
-  HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
     if (c != nullptr) {
       auto it = classes_.find(c);
       if (it == classes_.end()) {
@@ -604,7 +603,7 @@
   }
 
   HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     auto r = allocation_records_.find(obj);
     if (r == allocation_records_.end()) {
       return kHprofNullStackTrace;
@@ -616,7 +615,7 @@
     }
   }
 
-  HprofStringId LookupStringId(mirror::String* string) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) {
     return LookupStringId(string->ToModifiedUtf8());
   }
 
@@ -634,7 +633,7 @@
     return id;
   }
 
-  HprofStringId LookupClassNameId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
     return LookupStringId(PrettyDescriptor(c));
   }
 
@@ -662,7 +661,7 @@
     __ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF));
   }
 
-  void WriteStackTraces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Write a dummy stack trace record so the analysis tools don't freak out.
     output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
     __ AddStackTraceSerialNumber(kHprofNullStackTrace);
@@ -725,7 +724,7 @@
   }
 
   bool DumpToDdmsBuffered(size_t overall_size ATTRIBUTE_UNUSED, size_t max_length ATTRIBUTE_UNUSED)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     LOG(FATAL) << "Unimplemented";
     UNREACHABLE();
     //        // Send the data off to DDMS.
@@ -738,7 +737,7 @@
   }
 
   bool DumpToFile(size_t overall_size, size_t max_length)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     // Where exactly are we writing to?
     int out_fd;
     if (fd_ >= 0) {
@@ -787,7 +786,7 @@
   }
 
   bool DumpToDdmsDirect(size_t overall_size, size_t max_length, uint32_t chunk_type)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::mutator_lock_) {
     CHECK(direct_to_ddms_);
     JDWP::JdwpState* state = Dbg::GetJdwpState();
     CHECK(state != nullptr);
@@ -818,7 +817,7 @@
   }
 
   void PopulateAllocationTrackingTraces()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::alloc_tracker_lock_) {
+      REQUIRES(Locks::mutator_lock_, Locks::alloc_tracker_lock_) {
     gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
     CHECK(records != nullptr);
     HprofStackTraceSerialNumber next_trace_sn = kHprofNullStackTrace + 1;
diff --git a/runtime/image.h b/runtime/image.h
index d856f21..cc98ba6 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -156,9 +156,9 @@
   }
 
   mirror::Object* GetImageRoot(ImageRoot image_root) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* GetImageRoots() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void RelocateImage(off_t delta);
 
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 20e4222..75fc84b 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -32,10 +32,10 @@
 class MutatorLockedDumpable {
  public:
   explicit MutatorLockedDumpable(T& value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) {
+      SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) {
   }
 
-  void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) {
     value_.Dump(os);
   }
 
@@ -47,7 +47,7 @@
 
 template<typename T>
 std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis
+// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis
 //       currently fails for this.
     NO_THREAD_SAFETY_ANALYSIS {
   rhs.Dump(os);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index dea5dfd..798b48c 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -199,7 +199,7 @@
 static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
 class IrtEntry {
  public:
-  void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     ++serial_;
     if (serial_ == kIRTPrevCount) {
       serial_ = 0;
@@ -228,11 +228,11 @@
 class IrtIterator {
  public:
   explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : table_(table), i_(i), capacity_(capacity) {
   }
 
-  IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) {
     ++i_;
     return *this;
   }
@@ -278,7 +278,7 @@
    * failed during expansion).
    */
   IndirectRef Add(uint32_t cookie, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Given an IndirectRef in the table, return the Object it refers to.
@@ -286,14 +286,14 @@
    * Returns kInvalidIndirectRefObject if iref is invalid.
    */
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_)
       ALWAYS_INLINE;
 
   // Synchronized get which reads a reference, acquiring a lock if necessary.
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
                                   IndirectRef iref) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return Get<kReadBarrierOption>(iref);
   }
 
@@ -302,7 +302,7 @@
    *
    * Updates an existing indirect reference to point to a new object.
    */
-  void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Remove an existing entry.
@@ -317,7 +317,7 @@
 
   void AssertEmpty();
 
-  void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Return the #of entries in the entire table.  This includes holes, and
@@ -337,7 +337,7 @@
   }
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   uint32_t GetSegmentState() const {
     return segment_state_.all;
@@ -352,7 +352,7 @@
   }
 
   // Release pages past the end of the table that may have previously held references.
-  void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Trim() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Extract the table index from an indirect reference.
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index c20002b..f376ec0 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -26,7 +26,7 @@
 class IndirectReferenceTableTest : public CommonRuntimeTest {};
 
 static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::ostringstream oss;
   irt->Dump(oss);
   if (num_objects == 0) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index abe9dc2..9711cf2 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -50,7 +50,7 @@
     StackVisitor::StackWalkKind::kSkipInlinedFrames;
 
 static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    REQUIRES(Locks::mutator_lock_) {
   Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
   instrumentation->InstallStubsForClass(klass);
   return true;  // we visit all classes.
@@ -87,7 +87,7 @@
 }
 
 static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Runtime* const runtime = Runtime::Current();
   jit::Jit* jit = runtime->GetJit();
   if (jit != nullptr) {
@@ -151,7 +151,7 @@
 // Since we may already have done this previously, we need to push new instrumentation frame before
 // existing instrumentation frames.
 static void InstrumentationInstallStack(Thread* thread, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   struct InstallStackVisitor FINAL : public StackVisitor {
     InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
         : StackVisitor(thread_in, context, kInstrumentationStackWalk),
@@ -161,7 +161,7 @@
           last_return_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
       if (m == nullptr) {
         if (kVerboseInstrumentation) {
@@ -291,7 +291,7 @@
 
 // Removes the instrumentation exit pc as the return PC for every quick frame.
 static void InstrumentationRestoreStack(Thread* thread, void* arg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   struct RestoreStackVisitor FINAL : public StackVisitor {
     RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
                         Instrumentation* instrumentation)
@@ -302,7 +302,7 @@
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
           frames_removed_(0) {}
 
-    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
       if (instrumentation_stack_->size() == 0) {
         return false;  // Stop.
       }
@@ -931,7 +931,7 @@
 
 static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
                             int delta)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
   if (frame_id != instrumentation_frame.frame_id_) {
     LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index db8e9c2..93ff567 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -63,24 +63,24 @@
   // Call-back for when a method is entered.
   virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
                              ArtMethod* method,
-                             uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+                             uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Call-back for when a method is exited.
   virtual void MethodExited(Thread* thread, mirror::Object* this_object,
                             ArtMethod* method, uint32_t dex_pc,
                             const JValue& return_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Call-back for when a method is popped due to an exception throw. A method will either cause a
   // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
   virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
                             ArtMethod* method, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Call-back for when the dex pc moves in a method.
   virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
                           ArtMethod* method, uint32_t new_dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Call-back for when we read from a field.
   virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
@@ -92,11 +92,11 @@
 
   // Call-back when an exception is caught.
   virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Call-back for when we get a backward branch.
   virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 };
 
 // Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -129,90 +129,83 @@
   // for saying you should have suspended all threads (installing stubs while threads are running
   // will break).
   void AddListener(InstrumentationListener* listener, uint32_t events)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
 
   // Removes a listener possibly removing instrumentation stubs.
   void RemoveListener(InstrumentationListener* listener, uint32_t events)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
 
   // Deoptimization.
   void EnableDeoptimization()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_);
   void DisableDeoptimization(const char* key)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_);
   bool AreAllMethodsDeoptimized() const {
     return interpreter_stubs_installed_;
   }
-  bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Executes everything with interpreter.
   void DeoptimizeEverything(const char* key)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+               !deoptimized_methods_lock_);
 
   // Executes everything with compiled code (or interpreter if there is no code).
   void UndeoptimizeEverything(const char* key)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+               !deoptimized_methods_lock_);
 
   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
   // once its declaring class is initialized.
   void Deoptimize(ArtMethod* method)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
 
   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
   // (except a class initializer) set to the resolution trampoline will be updated only once its
   // declaring class is initialized.
   void Undeoptimize(ArtMethod* method)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
 
   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
   bool IsDeoptimized(ArtMethod* method)
-      LOCKS_EXCLUDED(deoptimized_methods_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
   void EnableMethodTracing(const char* key,
                            bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+               !deoptimized_methods_lock_);
 
   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
   void DisableMethodTracing(const char* key)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_,
+               !deoptimized_methods_lock_);
 
   InterpreterHandlerTable GetInterpreterHandlerTable() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return interpreter_handler_table_;
   }
 
-  void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_);
-  void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_);
+  void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
+  void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
   void InstrumentQuickAllocEntryPointsLocked()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+      REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
+               !Locks::runtime_shutdown_lock_);
   void UninstrumentQuickAllocEntryPointsLocked()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
-  void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+      REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
+               !Locks::runtime_shutdown_lock_);
+  void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
 
   // Update the code of a method respecting any installed stubs.
   void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
 
   // Get the quick code for the given method. More efficient than asking the class linker as it
   // will short-cut to GetCode if instrumentation and static method resolution stubs aren't
   // installed.
   const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ForceInterpretOnly() {
     interpret_only_ = true;
@@ -232,39 +225,39 @@
     return instrumentation_stubs_installed_;
   }
 
-  bool HasMethodEntryListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_method_entry_listeners_;
   }
 
-  bool HasMethodExitListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_method_exit_listeners_;
   }
 
-  bool HasMethodUnwindListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_method_unwind_listeners_;
   }
 
-  bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_dex_pc_listeners_;
   }
 
-  bool HasFieldReadListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_field_read_listeners_;
   }
 
-  bool HasFieldWriteListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_field_write_listeners_;
   }
 
-  bool HasExceptionCaughtListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_exception_caught_listeners_;
   }
 
-  bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasBackwardBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_backward_branch_listeners_;
   }
 
-  bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
         have_field_read_listeners_ || have_field_write_listeners_ ||
         have_exception_caught_listeners_ || have_method_unwind_listeners_;
@@ -274,7 +267,7 @@
   // listeners into executing code and get method enter events for methods already on the stack.
   void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
                         ArtMethod* method, uint32_t dex_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasMethodEntryListeners())) {
       MethodEnterEventImpl(thread, this_object, method, dex_pc);
     }
@@ -284,7 +277,7 @@
   void MethodExitEvent(Thread* thread, mirror::Object* this_object,
                        ArtMethod* method, uint32_t dex_pc,
                        const JValue& return_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasMethodExitListeners())) {
       MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
     }
@@ -293,12 +286,12 @@
   // Inform listeners that a method has been exited due to an exception.
   void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
                          ArtMethod* method, uint32_t dex_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Inform listeners that the dex pc has moved (only supported by the interpreter).
   void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
                        ArtMethod* method, uint32_t dex_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasDexPcListeners())) {
       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
     }
@@ -306,7 +299,7 @@
 
   // Inform listeners that a backward branch has been taken (only supported by the interpreter).
   void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasBackwardBranchListeners())) {
       BackwardBranchImpl(thread, method, offset);
     }
@@ -316,7 +309,7 @@
   void FieldReadEvent(Thread* thread, mirror::Object* this_object,
                       ArtMethod* method, uint32_t dex_pc,
                       ArtField* field) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasFieldReadListeners())) {
       FieldReadEventImpl(thread, this_object, method, dex_pc, field);
     }
@@ -326,7 +319,7 @@
   void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
                        ArtMethod* method, uint32_t dex_pc,
                        ArtField* field, const JValue& field_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (UNLIKELY(HasFieldWriteListeners())) {
       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
     }
@@ -334,30 +327,31 @@
 
   // Inform listeners that an exception was caught.
   void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Called when an instrumented method is entered. The intended link register (lr) is saved so
   // that returning causes a branch to the method exit stub. Generates method enter events.
   void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
                                      ArtMethod* method, uintptr_t lr,
                                      bool interpreter_entry)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Called when an instrumented method is exited. Removes the pushed instrumentation frame
   // returning the intended link register. Generates method exit events.
   TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
                                              uint64_t gpr_result, uint64_t fpr_result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
 
   // Pops an instrumentation frame from the current thread and generate an unwind event.
   void PopMethodForUnwind(Thread* self, bool is_deoptimization) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Call back for configure stubs.
-  void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!deoptimized_methods_lock_);
 
   void InstallStubsForMethod(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
 
  private:
   InstrumentationLevel GetCurrentInstrumentationLevel() const;
@@ -368,11 +362,10 @@
   // instrumentation level it needs. Therefore the current instrumentation level
   // becomes the highest instrumentation level required by a client.
   void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
-                     deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_, !Locks::thread_list_lock_,
+               !Locks::classlinker_classes_lock_);
 
-  void UpdateInterpreterHandlerTable() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) {
     interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
   }
 
@@ -382,38 +375,36 @@
 
   void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
                             ArtMethod* method, uint32_t dex_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
                            ArtMethod* method,
                            uint32_t dex_pc, const JValue& return_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
                            ArtMethod* method, uint32_t dex_pc) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
                            ArtMethod* method, uint32_t dex_pc,
                            ArtField* field) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
                            ArtMethod* method, uint32_t dex_pc,
                            ArtField* field, const JValue& field_value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Read barrier-aware utility functions for accessing deoptimized_methods_
   bool AddDeoptimizedMethod(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
   bool IsDeoptimizedMethod(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
   bool RemoveDeoptimizedMethod(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
   ArtMethod* BeginDeoptimizedMethod()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
   bool IsDeoptimizedMethodsEmpty() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
 
   // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
   bool instrumentation_stubs_installed_;
@@ -508,7 +499,7 @@
         interpreter_entry_(interpreter_entry) {
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Object* this_object_;
   ArtMethod* method_;
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 85bb8c4..b49f7e1 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -44,7 +44,7 @@
                      mirror::Object* this_object ATTRIBUTE_UNUSED,
                      ArtMethod* method ATTRIBUTE_UNUSED,
                      uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_method_enter_event = true;
   }
 
@@ -53,7 +53,7 @@
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     const JValue& return_value ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_method_exit_event = true;
   }
 
@@ -61,7 +61,7 @@
                     mirror::Object* this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_method_unwind_event = true;
   }
 
@@ -69,7 +69,7 @@
                   mirror::Object* this_object ATTRIBUTE_UNUSED,
                   ArtMethod* method ATTRIBUTE_UNUSED,
                   uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_dex_pc_moved_event = true;
   }
 
@@ -78,7 +78,7 @@
                  ArtMethod* method ATTRIBUTE_UNUSED,
                  uint32_t dex_pc ATTRIBUTE_UNUSED,
                  ArtField* field ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_field_read_event = true;
   }
 
@@ -88,20 +88,20 @@
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
                     ArtField* field ATTRIBUTE_UNUSED,
                     const JValue& field_value ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_field_written_event = true;
   }
 
   void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
                        mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_exception_caught_event = true;
   }
 
   void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED,
                       ArtMethod* method ATTRIBUTE_UNUSED,
                       int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     received_backward_branch_event = true;
   }
 
@@ -198,7 +198,7 @@
   }
 
   void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -213,7 +213,7 @@
 
   void UndeoptimizeMethod(Thread* self, ArtMethod* method,
                           const char* key, bool disable_deoptimization)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -227,7 +227,7 @@
   }
 
   void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -241,7 +241,7 @@
   }
 
   void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -255,7 +255,7 @@
   }
 
   void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -266,7 +266,7 @@
   }
 
   void DisableMethodTracing(Thread* self, const char* key)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+        SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     self->TransitionFromRunnableToSuspended(kSuspended);
@@ -278,7 +278,7 @@
 
  private:
   static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     switch (event_type) {
       case instrumentation::Instrumentation::kMethodEntered:
         return instr->HasMethodEntryListeners();
@@ -305,7 +305,7 @@
   static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
                           Thread* self, ArtMethod* method, mirror::Object* obj,
                           uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     switch (event_type) {
       case instrumentation::Instrumentation::kMethodEntered:
         instr->MethodEnterEvent(self, obj, method, dex_pc);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index ef08d74..4819b9f 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -58,70 +58,68 @@
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Only used by image writer.
-  mirror::String* InternImageString(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
-  mirror::String* InternStrong(const char* utf8_data)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
-  mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Interns a potentially new string in the 'weak' table. May cause thread suspension.
-  mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SweepInternTableWeaks(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::intern_table_lock_);
 
-  bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Total number of interned strings.
-  size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+  size_t Size() const REQUIRES(!Locks::intern_table_lock_);
   // Total number of weakly live interned strings.
-  size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+  size_t StrongSize() const REQUIRES(!Locks::intern_table_lock_);
   // Total number of strongly live interned strings.
-  size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+  size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_);
 
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os) const;
 
-  void DisallowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+  void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+  void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
+  void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+  void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Adds all of the resolved image strings from the image space into the intern table. The
   // advantage of doing this is preventing expensive DexFile::FindStringId calls.
   void AddImageStringsToTable(gc::space::ImageSpace* image_space)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
   // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
   void SwapPostZygoteWithPreZygote()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
 
   // Add an intern table which was serialized to the image.
   void AddImageInternTable(gc::space::ImageSpace* image_space)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
 
   // Read the intern table from memory. The elements aren't copied, the intern hash set data will
   // point to somewhere within ptr. Only reads the strong interns.
-  size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Write the post zygote intern table to a pointer. Only writes the strong interns since it is
   // expected that there is no weak interns since this is called from the image writer.
-  size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::intern_table_lock_);
+  size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::intern_table_lock_);
 
   // Change the weak root state. May broadcast to waiters.
   void ChangeWeakRootState(gc::WeakRootState new_state)
-      LOCKS_EXCLUDED(Locks::intern_table_lock_);
+      REQUIRES(!Locks::intern_table_lock_);
 
  private:
   class StringHashEquals {
@@ -144,39 +142,39 @@
   // weak interns and strong interns.
   class Table {
    public:
-    mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+    mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
+    void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
     void Remove(mirror::String* s)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
     void VisitRoots(RootVisitor* visitor)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
     void SweepWeaks(IsMarkedVisitor* visitor)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
+    void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_);
+    size_t Size() const REQUIRES(Locks::intern_table_lock_);
     // Read pre zygote table is called from ReadFromMemory which happens during runtime creation
     // when we load the image intern table. Returns how many bytes were read.
     size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        REQUIRES(Locks::intern_table_lock_)
+        SHARED_REQUIRES(Locks::mutator_lock_);
     // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
     // the post zygote table. Returns how many bytes were written.
     size_t WriteFromPostZygoteTable(uint8_t* ptr)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        REQUIRES(Locks::intern_table_lock_)
+        SHARED_REQUIRES(Locks::mutator_lock_);
 
    private:
     typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
         TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
 
     void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
 
     // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
     // caused by modifying the zygote intern table hash table. The pre zygote table are the
@@ -188,57 +186,55 @@
 
   // Insert if non null, otherwise return null.
   mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
-      LOCKS_EXCLUDED(Locks::intern_table_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::String* LookupStrong(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   mirror::String* LookupWeak(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   mirror::String* InsertStrong(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   mirror::String* InsertWeak(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   void RemoveStrong(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   void RemoveWeak(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
 
   // Transaction rollback access.
   mirror::String* LookupStringFromImage(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   mirror::String* InsertStrongFromTransaction(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   mirror::String* InsertWeakFromTransaction(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   void RemoveStrongFromTransaction(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   void RemoveWeakFromTransaction(mirror::String* s)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::intern_table_lock_);
   friend class Transaction;
 
   size_t ReadFromMemoryLocked(const uint8_t* ptr)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Change the weak root state. May broadcast to waiters.
   void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      REQUIRES(Locks::intern_table_lock_);
 
   // Wait until we can read weak roots.
-  void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void WaitUntilAccessible(Thread* self)
+      REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
   bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index c987180..b60b32d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -62,7 +62,7 @@
 
 class TestPredicate : public IsMarkedVisitor {
  public:
-  mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     bool erased = false;
     for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
       if (*it == s) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 26860e7..6c6232c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -28,7 +28,7 @@
 
 static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
                            Object* receiver, uint32_t* args, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
   //       it should be removed and JNI compiled stubs used instead.
   ScopedObjectAccessUnchecked soa(self);
@@ -240,23 +240,23 @@
   UNREACHABLE();
 }
 // Explicit definitions of ExecuteGotoImpl.
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
                                     ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
                                      ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<true, true>(Thread* self,  const DexFile::CodeItem* code_item,
                                    ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
                                     ShadowFrame& shadow_frame, JValue result_register);
 #endif
 
 static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame,
                       JValue result_register)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
                              ShadowFrame& shadow_frame, JValue result_register) {
@@ -395,7 +395,7 @@
 }
 
 void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JValue value;
   // Set value to last known result in case the shadow frame chain is empty.
   value.SetJ(ret_val->GetJ());
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 446c5bb..61140a2 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -35,26 +35,26 @@
 // Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array.
 extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
                                        mirror::Object* receiver, uint32_t* args, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame,
                                            JValue* ret_val)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item,
                                              ShadowFrame* shadow_frame)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 
 }  // namespace interpreter
 
 extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
                                                   ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
                                                    ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 }  // namespace art
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 0980ea1..9de9e8a 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -192,7 +192,7 @@
 
 template<Primitive::Type field_type>
 static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JValue field_value;
   switch (field_type) {
     case Primitive::kPrimBoolean:
@@ -450,7 +450,7 @@
 // Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
 static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
                                   size_t dest_reg, size_t src_reg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Uint required, so that sign extension does not make this wrong on 64b systems
   uint32_t src_value = shadow_frame.GetVReg(src_reg);
   mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
@@ -482,7 +482,7 @@
 }
 
 // Separate declaration is required solely for the attributes.
-template<bool is_range, bool do_assignability_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<bool is_range, bool do_assignability_check> SHARED_REQUIRES(Locks::mutator_lock_)
 static inline bool DoCallCommon(ArtMethod* called_method,
                                 Thread* self,
                                 ShadowFrame& shadow_frame,
@@ -491,7 +491,7 @@
                                 uint32_t arg[Instruction::kMaxVarArgRegs],
                                 uint32_t vregC) ALWAYS_INLINE;
 
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
 static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) ALWAYS_INLINE;
 
 static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) {
@@ -834,7 +834,7 @@
   return true;
 }
 
-// TODO fix thread analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
 template<typename T>
 static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
     NO_THREAD_SAFETY_ANALYSIS {
@@ -845,7 +845,7 @@
 }
 
 void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(Runtime::Current()->IsActiveTransaction());
   DCHECK(array != nullptr);
   DCHECK_LE(count, array->GetLength());
@@ -884,7 +884,7 @@
 
 // Explicit DoCall template function declarations.
 #define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check)                      \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
   bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self,              \
                                                   ShadowFrame& shadow_frame,                    \
                                                   const Instruction* inst, uint16_t inst_data,  \
@@ -897,7 +897,7 @@
 
 // Explicit DoLambdaCall template function declarations.
 #define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check)               \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
   bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self,        \
                                                         ShadowFrame& shadow_frame,              \
                                                         const Instruction* inst,                \
@@ -911,7 +911,7 @@
 
 // Explicit DoFilledNewArray template function declarations.
 #define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active)       \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                            \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                            \
   bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst,         \
                                                                  const ShadowFrame& shadow_frame, \
                                                                  Thread* self, JValue* result)
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9babb18..2486a98 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -72,7 +72,7 @@
                               ShadowFrame& shadow_frame, JValue result_register);
 
 void ThrowNullPointerExceptionFromInterpreter()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
   ref->MonitorEnter(self);
@@ -84,13 +84,13 @@
 
 void AbortTransactionF(Thread* self, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 void AbortTransactionV(Thread* self, const char* fmt, va_list args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Invokes the given method. This is part of the invocation support and is used by DoInvoke and
 // DoInvokeVirtualQuick functions.
@@ -114,7 +114,7 @@
 //
 // If the validation fails, return false and raise an exception.
 static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   bool success = false;
 
   if (UNLIKELY(called_method == nullptr)) {
@@ -191,7 +191,7 @@
 // (Exceptions are thrown by creating a new exception and then being put in the thread TLS)
 static inline ArtMethod* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame,
                                                            uint32_t vreg)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // TODO(iam): Introduce a closure abstraction that will contain the captured variables
   // instead of just an ArtMethod.
   // This is temporarily using 2 vregs because a native ArtMethod can be up to 64-bit,
@@ -306,32 +306,32 @@
 // Returns true on success, otherwise throws an exception and returns false.
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
 bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type>
 bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Handles iput-XXX and sput-XXX instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
          bool transaction_active>
 bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type, bool transaction_active>
 bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 
 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
 // java.lang.String class is initialized.
 static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Class* java_lang_string_class = String::GetJavaLangString();
   if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -358,7 +358,7 @@
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
 static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
                                int32_t dividend, int32_t divisor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
   if (UNLIKELY(divisor == 0)) {
     ThrowArithmeticExceptionDivideByZero();
@@ -376,7 +376,7 @@
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
 static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
                                   int32_t dividend, int32_t divisor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
   if (UNLIKELY(divisor == 0)) {
     ThrowArithmeticExceptionDivideByZero();
@@ -394,7 +394,7 @@
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
 static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
                                 int64_t dividend, int64_t divisor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
   if (UNLIKELY(divisor == 0)) {
     ThrowArithmeticExceptionDivideByZero();
@@ -412,7 +412,7 @@
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
 static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
                                    int64_t dividend, int64_t divisor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
   if (UNLIKELY(divisor == 0)) {
     ThrowArithmeticExceptionDivideByZero();
@@ -436,7 +436,7 @@
 // Returns the branch offset to the next instruction to execute.
 static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
                                      uint16_t inst_data)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
   const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
   int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -464,7 +464,7 @@
 // Returns the branch offset to the next instruction to execute.
 static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
                                      uint16_t inst_data)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
   const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
   int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -497,7 +497,7 @@
 
 template <bool _do_check>
 static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
-                               uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+                               uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) {
   /*
    * box-lambda vA, vB /// opcode 0xf8, format 22x
    * - vA is the target register where the Object representation of the closure will be stored into
@@ -529,7 +529,7 @@
   return true;
 }
 
-template <bool _do_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_)
 static inline bool DoUnboxLambda(Thread* self,
                                  ShadowFrame& shadow_frame,
                                  const Instruction* inst,
@@ -565,15 +565,15 @@
 
 uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
     uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_);
 
 NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
   __attribute__((cold))
-  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  SHARED_REQUIRES(Locks::mutator_lock_);
 
 static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
                                   const uint32_t dex_pc)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   constexpr bool kTracing = false;
   if (kTracing) {
 #define TRACE_LOG std::cerr
@@ -605,7 +605,7 @@
 
 // Explicitly instantiate all DoInvoke functions.
 #define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check)                      \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                     \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                     \
   bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame,      \
                                              const Instruction* inst, uint16_t inst_data,  \
                                              JValue* result)
@@ -626,7 +626,7 @@
 
 // Explicitly instantiate all DoInvokeVirtualQuick functions.
 #define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range)                    \
-  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                               \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                               \
   bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame,      \
                                        const Instruction* inst, uint16_t inst_data,  \
                                        JValue* result)
@@ -637,7 +637,7 @@
 
 // Explicitly instantiate all DoCreateLambda functions.
 #define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check)                                    \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                 \
+template SHARED_REQUIRES(Locks::mutator_lock_)                                 \
 bool DoCreateLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame,              \
                         const Instruction* inst)
 
@@ -647,7 +647,7 @@
 
 // Explicitly instantiate all DoInvokeLambda functions.
 #define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check)                                    \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                 \
+template SHARED_REQUIRES(Locks::mutator_lock_)                                 \
 bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
                                uint16_t inst_data, JValue* result);
 
@@ -657,7 +657,7 @@
 
 // Explicitly instantiate all DoBoxLambda functions.
 #define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check)                                                \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
+template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
 bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
                             uint16_t inst_data);
 
@@ -667,7 +667,7 @@
 
 // Explicitly instantiate all DoUnBoxLambda functions.
 #define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check)                                                \
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                            \
+template SHARED_REQUIRES(Locks::mutator_lock_)                                            \
 bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
                               uint16_t inst_data);
 
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index ec923b6..7027cbf 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -2536,16 +2536,16 @@
 }  // NOLINT(readability/fn_size)
 
 // Explicit definitions of ExecuteGotoImpl.
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
 JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
                                     ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
 JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
                                      ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
                                    ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
                                     ShadowFrame& shadow_frame, JValue result_register);
 
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 78090bb..544f788 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -2283,16 +2283,16 @@
 }  // NOLINT(readability/fn_size)
 
 // Explicit definitions of ExecuteSwitchImpl.
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
 JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
                                       ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR
+template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
 JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
                                        ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
                                      ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template SHARED_REQUIRES(Locks::mutator_lock_)
 JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
                                       ShadowFrame& shadow_frame, JValue result_register);
 
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 43e24fa..22701ac 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -46,7 +46,7 @@
 
 static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)))
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) {
   va_list args;
@@ -69,7 +69,7 @@
                                       Handle<mirror::ClassLoader> class_loader, JValue* result,
                                       const std::string& method_name, bool initialize_class,
                                       bool abort_if_not_found)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(className.Get() != nullptr);
   std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -99,7 +99,7 @@
 // actually the transaction abort exception. This must not be wrapped, as it signals an
 // initialization abort.
 static void CheckExceptionGenerateClassNotFound(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (self->IsExceptionPending()) {
     // If it is not the transaction abort exception, wrap it.
     std::string type(PrettyTypeOf(self->GetException()));
@@ -111,7 +111,7 @@
 }
 
 static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
   if (param == nullptr) {
     AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
@@ -294,7 +294,7 @@
                                mirror::Array* src_array, int32_t src_pos,
                                mirror::Array* dst_array, int32_t dst_pos,
                                int32_t length)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
     AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
                            PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
@@ -490,7 +490,7 @@
 }
 
 static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile* dex_file = dex_cache->GetDexFile();
   if (dex_file == nullptr) {
     return nullptr;
@@ -601,7 +601,7 @@
 
 static void UnstartedMemoryPeekArray(
     Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
   mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
   if (obj == nullptr) {
@@ -840,7 +840,7 @@
 // This allows getting the char array for new style of String objects during compilation.
 void UnstartedRuntime::UnstartedStringToCharArray(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
   if (string == nullptr) {
     AbortTransactionOrFail(self, "String.charAt with null object");
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index a357d5f..03d7026 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -52,14 +52,14 @@
                      ShadowFrame* shadow_frame,
                      JValue* result,
                      size_t arg_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void Jni(Thread* self,
                   ArtMethod* method,
                   mirror::Object* receiver,
                   uint32_t* args,
                   JValue* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Methods that intercept available libcore implementations.
@@ -68,7 +68,7 @@
                                      ShadowFrame* shadow_frame, \
                                      JValue* result,            \
                                      size_t arg_offset)         \
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 #include "unstarted_runtime_list.h"
   UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
 #undef UNSTARTED_RUNTIME_DIRECT_LIST
@@ -82,7 +82,7 @@
                                         mirror::Object* receiver,  \
                                         uint32_t* args,            \
                                         JValue* result)            \
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 #include "unstarted_runtime_list.h"
   UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
 #undef UNSTARTED_RUNTIME_DIRECT_LIST
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 4b672e0..a1ae2aa 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -42,7 +42,7 @@
                                 ShadowFrame* shadow_frame, \
                                 JValue* result,            \
                                 size_t arg_offset)         \
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {        \
+      SHARED_REQUIRES(Locks::mutator_lock_) {        \
     interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
   }
 #include "unstarted_runtime_list.h"
@@ -58,7 +58,7 @@
                                    mirror::Object* receiver,  \
                                    uint32_t* args,            \
                                    JValue* result)            \
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {           \
+      SHARED_REQUIRES(Locks::mutator_lock_) {           \
     interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
   }
 #include "unstarted_runtime_list.h"
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 36e3aa3..9d41018 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -87,7 +87,7 @@
    * If the call has not yet finished in another thread, wait for it.
    */
   bool CheckOnLoadResult()
-      LOCKS_EXCLUDED(jni_on_load_lock_) {
+      REQUIRES(!jni_on_load_lock_) {
     Thread* self = Thread::Current();
     bool okay;
     {
@@ -112,7 +112,7 @@
     return okay;
   }
 
-  void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
+  void SetResult(bool result) REQUIRES(!jni_on_load_lock_) {
     Thread* self = Thread::Current();
     MutexLock mu(self, jni_on_load_lock_);
 
@@ -210,8 +210,8 @@
 
   // See section 11.3 "Linking Native Methods" of the JNI spec.
   void* FindNativeMethod(ArtMethod* m, std::string& detail)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      REQUIRES(Locks::jni_libraries_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     std::string jni_short_name(JniShortName(m));
     std::string jni_long_name(JniLongName(m));
     const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 97fbbc5..d70fc47 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -77,7 +77,7 @@
   // such as NewByteArray.
   // If -verbose:third-party-jni is on, we want to log any JNI function calls
   // made by a third-party native method.
-  bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /**
    * Loads the given shared library. 'path' is an absolute pathname.
@@ -93,56 +93,57 @@
    * using dlsym(3) on every native library that's been loaded so far.
    */
   void* FindCodeForNativeMethod(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_);
+      REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
 
   void DumpReferenceTables(std::ostream& os)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
 
   bool SetCheckJniEnabled(bool enabled);
 
-  void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!globals_lock_);
 
-  void DisallowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnsureNewWeakGlobalsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void BroadcastForNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+  void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+  void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!weak_globals_lock_);
+  void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!weak_globals_lock_);
 
   jobject AddGlobalRef(Thread* self, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
 
   jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
 
-  void DeleteGlobalRef(Thread* self, jobject obj);
+  void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
 
-  void DeleteWeakGlobalRef(Thread* self, jweak obj);
+  void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
 
   void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
 
   mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(globals_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
 
   mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
 
   void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(weak_globals_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
 
   const JNIInvokeInterface* GetUncheckedFunctions() const {
     return unchecked_functions_;
   }
 
-  void TrimGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(globals_lock_);
+  void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!globals_lock_);
 
  private:
   Runtime* const runtime_;
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 7c48985..f5ac9d0 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -88,7 +88,7 @@
   uint64_t dex_pc;
 };
 std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs);
 bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs);
 
@@ -130,7 +130,7 @@
    * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
    */
   static JdwpState* Create(const JdwpOptions* options)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
 
   ~JdwpState();
 
@@ -155,15 +155,15 @@
   // thread (command handler) so no event thread posts an event while
   // it processes a command. This must be called only from the debugger
   // thread.
-  void AcquireJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_);
-  void ReleaseJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_);
+  void AcquireJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
+  void ReleaseJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
 
   // Acquires/releases the JDWP synchronization token for the event thread
   // so no other thread (debugger thread or event thread) interleaves with
   // it when posting an event. This must NOT be called from the debugger
   // thread, only event thread.
-  void AcquireJdwpTokenForEvent(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_);
-  void ReleaseJdwpTokenForEvent() LOCKS_EXCLUDED(jdwp_token_lock_);
+  void AcquireJdwpTokenForEvent(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
+  void ReleaseJdwpTokenForEvent() REQUIRES(!jdwp_token_lock_);
 
   /*
    * These notify the debug code that something interesting has happened.  This
@@ -183,7 +183,7 @@
    * The VM has finished initializing.  Only called when the debugger is
    * connected at the time initialization completes.
    */
-  void PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
 
   /*
    * A location of interest has been reached.  This is used for breakpoints,
@@ -199,8 +199,7 @@
    */
   void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
                          const JValue* returnValue)
-     LOCKS_EXCLUDED(event_list_lock_)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+     REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * A field of interest has been accessed or modified. This is used for field access and field
@@ -211,8 +210,7 @@
    */
   void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
                       const JValue* fieldValue, bool is_modification)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * An exception has been thrown.
@@ -221,22 +219,19 @@
    */
   void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
                      const EventLocation* pCatchLoc, mirror::Object* thisPtr)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * A thread has started or stopped.
    */
   void PostThreadChange(Thread* thread, bool start)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Class has been prepared.
    */
   void PostClassPrepare(mirror::Class* klass)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * The VM is about to stop.
@@ -244,7 +239,7 @@
   bool PostVMDeath();
 
   // Called if/when we realize we're talking to DDMS.
-  void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_);
 
 
   void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header);
@@ -253,23 +248,23 @@
    * Send up a chunk of DDM data.
    */
   void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool HandlePacket();
+  bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_);
 
   void SendRequest(ExpandBuf* pReq);
 
   void ResetState()
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /* atomic ops to get next serial number */
   uint32_t NextRequestSerial();
   uint32_t NextEventSerial();
 
   void Run()
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
+               !attach_lock_, !event_list_lock_);
 
   /*
    * Register an event by adding it to the event list.
@@ -278,48 +273,45 @@
    * may discard its pointer after calling this.
    */
   JdwpError RegisterEvent(JdwpEvent* pEvent)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Unregister an event, given the requestId.
    */
   void UnregisterEventById(uint32_t requestId)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Unregister all events.
    */
   void UnregisterAll()
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   explicit JdwpState(const JdwpOptions* options);
-  size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply);
+  size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply)
+      REQUIRES(!jdwp_token_lock_);
   bool InvokeInProgress();
   bool IsConnected();
   void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
   void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
                                      ObjectId threadId)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
   void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
-      EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void EventFinish(ExpandBuf* pReq);
   bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
                           std::vector<JdwpEvent*>* match_list)
-      LOCKS_EXCLUDED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
                                 std::vector<JdwpEvent*>* match_list)
-      EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void UnregisterEvent(JdwpEvent* pEvent)
-      EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
 
   /*
@@ -351,8 +343,8 @@
    * events at the same time, so we grab a mutex in the SetWaitForJdwpToken
    * call, and release it in the ClearWaitForJdwpToken call.
    */
-  void SetWaitForJdwpToken(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_);
-  void ClearWaitForJdwpToken() LOCKS_EXCLUDED(jdwp_token_lock_);
+  void SetWaitForJdwpToken(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
+  void ClearWaitForJdwpToken() REQUIRES(!jdwp_token_lock_);
 
  public:  // TODO: fix privacy
   const JdwpOptions* options_;
@@ -415,9 +407,9 @@
   bool processing_request_ GUARDED_BY(shutdown_lock_);
 };
 
-std::string DescribeField(const FieldId& field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-std::string DescribeMethod(const MethodId& method_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
 class Request {
  public:
@@ -433,9 +425,9 @@
 
   uint32_t ReadUnsigned32(const char* what);
 
-  FieldId ReadFieldId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  MethodId ReadMethodId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ObjectId ReadObjectId(const char* specific_kind);
 
@@ -447,7 +439,7 @@
 
   ObjectId ReadThreadGroupId();
 
-  RefTypeId ReadRefTypeId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_);
 
   FrameId ReadFrameId();
 
@@ -461,7 +453,7 @@
 
   JdwpTypeTag ReadTypeTag();
 
-  JdwpLocation ReadLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_);
 
   JdwpModKind ReadModKind();
 
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 14f097f..5d21f17 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -447,7 +447,7 @@
  * need to do this even if later mods cause us to ignore the event.
  */
 static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JdwpEventMod* pMod = pEvent->mods;
 
   for (int i = pEvent->modCount; i > 0; i--, pMod++) {
@@ -784,7 +784,7 @@
 
 static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
                                        ObjectId thread_id)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   for (size_t i = 0, e = match_list.size(); i < e; ++i) {
     JdwpEvent* pEvent = match_list[i];
     VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
@@ -800,7 +800,7 @@
 
 static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
                                              JDWP::JdwpLocation* jdwp_location)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(event_location != nullptr);
   DCHECK(jdwp_location != nullptr);
   Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d4e2656..f449406 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -53,7 +53,7 @@
 }
 
 static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   uint8_t tag;
   JdwpError rc = Dbg::GetObjectTag(object_id, &tag);
   if (rc == ERR_NONE) {
@@ -64,7 +64,7 @@
 }
 
 static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   expandBufAdd4BE(reply, objects.size());
   for (size_t i = 0; i < objects.size(); ++i) {
     JdwpError rc = WriteTaggedObject(reply, objects[i]);
@@ -84,7 +84,7 @@
 static JdwpError RequestInvoke(JdwpState*, Request* request,
                                ObjectId thread_id, ObjectId object_id,
                                RefTypeId class_id, MethodId method_id, bool is_constructor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(!is_constructor || object_id != 0);
 
   int32_t arg_count = request->ReadSigned32("argument count");
@@ -123,7 +123,7 @@
 }
 
 static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // Text information on runtime version.
   std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion()));
   expandBufAddUtf8String(pReply, version);
@@ -147,7 +147,7 @@
  * been loaded by multiple class loaders.
  */
 static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string classDescriptor(request->ReadUtf8String());
 
   std::vector<RefTypeId> ids;
@@ -179,7 +179,7 @@
  * to be suspended, and that violates some JDWP expectations.
  */
 static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::vector<ObjectId> thread_ids;
   Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
 
@@ -195,7 +195,7 @@
  * List all thread groups that do not have a parent.
  */
 static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   /*
    * TODO: maintain a list of parentless thread groups in the VM.
    *
@@ -214,7 +214,7 @@
  * Respond with the sizes of the basic debugger types.
  */
 static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   expandBufAdd4BE(pReply, sizeof(FieldId));
   expandBufAdd4BE(pReply, sizeof(MethodId));
   expandBufAdd4BE(pReply, sizeof(ObjectId));
@@ -224,7 +224,7 @@
 }
 
 static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Dbg::Dispose();
   return ERR_NONE;
 }
@@ -236,7 +236,7 @@
  * This needs to increment the "suspend count" on all threads.
  */
 static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
   Dbg::SuspendVM();
@@ -248,13 +248,13 @@
  * Resume execution.  Decrements the "suspend count" of all threads.
  */
 static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Dbg::ResumeVM();
   return ERR_NONE;
 }
 
 static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t exit_status = request->ReadUnsigned32("exit_status");
   state->ExitAfterReplying(exit_status);
   return ERR_NONE;
@@ -267,7 +267,7 @@
  * string "java.util.Arrays".)
  */
 static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string str(request->ReadUtf8String());
   ObjectId string_id;
   JdwpError status = Dbg::CreateString(str, &string_id);
@@ -279,7 +279,7 @@
 }
 
 static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   expandBufAddUtf8String(pReply, "/");
 
   std::vector<std::string> class_path;
@@ -300,7 +300,7 @@
 }
 
 static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   size_t object_count = request->ReadUnsigned32("object_count");
   for (size_t i = 0; i < object_count; ++i) {
     ObjectId object_id = request->ReadObjectId();
@@ -311,7 +311,7 @@
 }
 
 static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   expandBufAdd1(reply, true);    // canWatchFieldModification
   expandBufAdd1(reply, true);    // canWatchFieldAccess
   expandBufAdd1(reply, true);    // canGetBytecodes
@@ -323,7 +323,7 @@
 }
 
 static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // The first few capabilities are the same as those reported by the older call.
   VM_Capabilities(nullptr, request, reply);
 
@@ -350,7 +350,7 @@
 }
 
 static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::vector<JDWP::RefTypeId> classes;
   Dbg::GetClassList(&classes);
 
@@ -381,17 +381,17 @@
 }
 
 static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return VM_AllClassesImpl(pReply, true, false);
 }
 
 static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return VM_AllClassesImpl(pReply, true, true);
 }
 
 static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   int32_t class_count = request->ReadSigned32("class count");
   if (class_count < 0) {
     return ERR_ILLEGAL_ARGUMENT;
@@ -415,7 +415,7 @@
 }
 
 static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::GetModifiers(refTypeId, pReply);
 }
@@ -424,7 +424,7 @@
  * Get values from static fields in a reference type.
  */
 static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   int32_t field_count = request->ReadSigned32("field count");
   expandBufAdd4BE(pReply, field_count);
@@ -442,7 +442,7 @@
  * Get the name of the source file in which a reference type was declared.
  */
 static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   std::string source_file;
   JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file);
@@ -457,7 +457,7 @@
  * Return the current status of the reference type.
  */
 static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   JDWP::JdwpTypeTag type_tag;
   uint32_t class_status;
@@ -473,7 +473,7 @@
  * Return interfaces implemented directly by this class.
  */
 static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::OutputDeclaredInterfaces(refTypeId, pReply);
 }
@@ -482,7 +482,7 @@
  * Return the class object corresponding to this type.
  */
 static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   ObjectId class_object_id;
   JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id);
@@ -500,13 +500,13 @@
  * JDB seems interested, but DEX files don't currently support this.
  */
 static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   /* referenceTypeId in, string out */
   return ERR_ABSENT_INFORMATION;
 }
 
 static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
 
   std::string signature;
@@ -522,12 +522,12 @@
 }
 
 static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return RT_Signature(state, request, pReply, false);
 }
 
 static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return RT_Signature(state, request, pReply, true);
 }
 
@@ -536,7 +536,7 @@
  * reference type, or null if it was loaded by the system loader.
  */
 static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::GetClassLoader(refTypeId, pReply);
 }
@@ -546,14 +546,14 @@
  * fields declared by a class.
  */
 static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::OutputDeclaredFields(refTypeId, true, pReply);
 }
 
 // Obsolete equivalent of FieldsWithGeneric, without the generic type information.
 static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::OutputDeclaredFields(refTypeId, false, pReply);
 }
@@ -563,20 +563,20 @@
  * methods declared by a class.
  */
 static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::OutputDeclaredMethods(refTypeId, true, pReply);
 }
 
 // Obsolete equivalent of MethodsWithGeneric, without the generic type information.
 static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   return Dbg::OutputDeclaredMethods(refTypeId, false, pReply);
 }
 
 static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   int32_t max_count = request->ReadSigned32("max count");
   if (max_count < 0) {
@@ -596,7 +596,7 @@
  * Return the immediate superclass of a class.
  */
 static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   RefTypeId superClassId;
   JdwpError status = Dbg::GetSuperclass(class_id, &superClassId);
@@ -611,7 +611,7 @@
  * Set static class values.
  */
 static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   int32_t values_count = request->ReadSigned32("values count");
 
@@ -641,7 +641,7 @@
  */
 static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
                                  ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   ObjectId thread_id = request->ReadThreadId();
   MethodId method_id = request->ReadMethodId();
@@ -658,7 +658,7 @@
  */
 static JdwpError CT_NewInstance(JdwpState* state, Request* request,
                                 ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   ObjectId thread_id = request->ReadThreadId();
   MethodId method_id = request->ReadMethodId();
@@ -675,7 +675,7 @@
  * Create a new array object of the requested type and length.
  */
 static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId arrayTypeId = request->ReadRefTypeId();
   int32_t length = request->ReadSigned32("length");
 
@@ -693,7 +693,7 @@
  * Return line number information for the method, if present.
  */
 static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId refTypeId = request->ReadRefTypeId();
   MethodId method_id = request->ReadMethodId();
 
@@ -704,7 +704,7 @@
 
 static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply,
                                  bool generic)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   MethodId method_id = request->ReadMethodId();
 
@@ -717,17 +717,17 @@
 }
 
 static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return M_VariableTable(state, request, pReply, false);
 }
 
 static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return M_VariableTable(state, request, pReply, true);
 }
 
 static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_id = request->ReadRefTypeId();
   MethodId method_id = request->ReadMethodId();
 
@@ -753,7 +753,7 @@
  * passed in here.
  */
 static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   return Dbg::GetReferenceType(object_id, pReply);
 }
@@ -762,7 +762,7 @@
  * Get values from the fields of an object.
  */
 static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   int32_t field_count = request->ReadSigned32("field count");
 
@@ -782,7 +782,7 @@
  * Set values in the fields of an object.
  */
 static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   int32_t field_count = request->ReadSigned32("field count");
 
@@ -804,7 +804,7 @@
 }
 
 static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   return Dbg::GetMonitorInfo(object_id, reply);
 }
@@ -822,7 +822,7 @@
  */
 static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
                                  ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   ObjectId thread_id = request->ReadThreadId();
   RefTypeId class_id = request->ReadRefTypeId();
@@ -832,19 +832,19 @@
 }
 
 static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   return Dbg::DisableCollection(object_id);
 }
 
 static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   return Dbg::EnableCollection(object_id);
 }
 
 static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   bool is_collected;
   JdwpError rc = Dbg::IsCollected(object_id, &is_collected);
@@ -853,7 +853,7 @@
 }
 
 static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId object_id = request->ReadObjectId();
   int32_t max_count = request->ReadSigned32("max count");
   if (max_count < 0) {
@@ -873,7 +873,7 @@
  * Return the string value in a string object.
  */
 static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId stringObject = request->ReadObjectId();
   std::string str;
   JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
@@ -892,7 +892,7 @@
  * Return a thread's name.
  */
 static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   std::string name;
@@ -913,7 +913,7 @@
  * resume it; only the JDI is allowed to resume it.
  */
 static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   if (thread_id == Dbg::GetThreadSelfId()) {
@@ -932,7 +932,7 @@
  * Resume the specified thread.
  */
 static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   if (thread_id == Dbg::GetThreadSelfId()) {
@@ -948,7 +948,7 @@
  * Return status of specified thread.
  */
 static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   JDWP::JdwpThreadStatus threadStatus;
@@ -970,7 +970,7 @@
  * Return the thread group that the specified thread is a member of.
  */
 static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
   return Dbg::GetThreadGroup(thread_id, pReply);
 }
@@ -982,7 +982,7 @@
  * be THREAD_NOT_SUSPENDED.
  */
 static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
   uint32_t start_frame = request->ReadUnsigned32("start frame");
   uint32_t length = request->ReadUnsigned32("length");
@@ -1014,7 +1014,7 @@
  * Returns the #of frames on the specified thread, which must be suspended.
  */
 static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   size_t frame_count;
@@ -1028,7 +1028,7 @@
 }
 
 static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   std::vector<ObjectId> monitors;
@@ -1052,17 +1052,17 @@
 }
 
 static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return TR_OwnedMonitors(request, reply, false);
 }
 
 static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return TR_OwnedMonitors(request, reply, true);
 }
 
 static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
 
   ObjectId contended_monitor;
@@ -1074,7 +1074,7 @@
 }
 
 static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   UNUSED(reply);
   ObjectId thread_id = request->ReadThreadId();
   return Dbg::Interrupt(thread_id);
@@ -1087,7 +1087,7 @@
  * its suspend count recently.)
  */
 static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
   return Dbg::GetThreadDebugSuspendCount(thread_id, pReply);
 }
@@ -1098,7 +1098,7 @@
  * The Eclipse debugger recognizes "main" and "system" as special.
  */
 static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
   return Dbg::GetThreadGroupName(thread_group_id, pReply);
 }
@@ -1108,7 +1108,7 @@
  * thread group.
  */
 static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
   return Dbg::GetThreadGroupParent(thread_group_id, pReply);
 }
@@ -1118,7 +1118,7 @@
  * specified thread group.
  */
 static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
   return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
 }
@@ -1127,7 +1127,7 @@
  * Return the #of components in the array.
  */
 static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId array_id = request->ReadArrayId();
 
   int32_t length;
@@ -1146,7 +1146,7 @@
  * Return the values from an array.
  */
 static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId array_id = request->ReadArrayId();
   uint32_t offset = request->ReadUnsigned32("offset");
   uint32_t length = request->ReadUnsigned32("length");
@@ -1157,7 +1157,7 @@
  * Set values in an array.
  */
 static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId array_id = request->ReadArrayId();
   uint32_t offset = request->ReadUnsigned32("offset");
   uint32_t count = request->ReadUnsigned32("count");
@@ -1165,7 +1165,7 @@
 }
 
 static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   request->ReadObjectId();  // classLoaderObject
   // TODO: we should only return classes which have the given class loader as a defining or
   // initiating loader. The former would be easy; the latter is hard, because we don't have
@@ -1179,7 +1179,7 @@
  * Reply with a requestID.
  */
 static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind");
   JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy");
   int32_t modifier_count = request->ReadSigned32("modifier count");
@@ -1322,7 +1322,7 @@
 }
 
 static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   request->ReadEnum1<JdwpEventKind>("event kind");
   uint32_t requestId = request->ReadUnsigned32("request id");
 
@@ -1336,7 +1336,7 @@
  * Return the values of arguments and local variables.
  */
 static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return Dbg::GetLocalValues(request, pReply);
 }
 
@@ -1344,12 +1344,12 @@
  * Set the values of arguments and local variables.
  */
 static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return Dbg::SetLocalValues(request);
 }
 
 static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ObjectId thread_id = request->ReadThreadId();
   FrameId frame_id = request->ReadFrameId();
 
@@ -1370,7 +1370,7 @@
  * that, or I have no idea what this is for.)
  */
 static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   RefTypeId class_object_id = request->ReadRefTypeId();
   return Dbg::GetReflectedType(class_object_id, pReply);
 }
@@ -1379,7 +1379,7 @@
  * Handle a DDM packet with a single chunk in it.
  */
 static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   state->NotifyDdmsActive();
   uint8_t* replyBuf = nullptr;
   int replyLen = -1;
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 6bc5e27..260abe7 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -256,12 +256,12 @@
     default:
       LOG(FATAL) << "Unknown transport: " << options->transport;
   }
-
   {
     /*
      * Grab a mutex before starting the thread.  This ensures they
      * won't signal the cond var before we're waiting.
      */
+    state->thread_start_lock_.AssertNotHeld(self);
     MutexLock thread_start_locker(self, state->thread_start_lock_);
 
     /*
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index d58467d..29314f6 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -86,8 +86,8 @@
 
   void Close();
 
-  ssize_t WritePacket(ExpandBuf* pReply, size_t length) LOCKS_EXCLUDED(socket_lock_);
-  ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) LOCKS_EXCLUDED(socket_lock_);
+  ssize_t WritePacket(ExpandBuf* pReply, size_t length) REQUIRES(!socket_lock_);
+  ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) REQUIRES(!socket_lock_);
   Mutex* GetSocketLock() {
     return &socket_lock_;
   }
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 2b28f7d..3fbad36 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -63,13 +63,13 @@
 
 // Explicit template instantiation.
 template
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
 JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
 
 template
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
 JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
 
 template<class T>
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 4c149cd..17490f4 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -63,28 +63,24 @@
   ObjectRegistry();
 
   JDWP::ObjectId Add(mirror::Object* o)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
 
   JDWP::RefTypeId AddRefType(mirror::Class* c)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
 
   template<class T>
   JDWP::ObjectId Add(Handle<T> obj_h)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
 
   JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
 
   template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) {
     if (id == 0) {
       *error = JDWP::ERR_NONE;
       return nullptr;
@@ -92,47 +88,42 @@
     return down_cast<T>(InternalGet(id, error));
   }
 
-  void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   void DisableCollection(JDWP::ObjectId id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   void EnableCollection(JDWP::ObjectId id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   bool IsCollected(JDWP::ObjectId id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   // This is needed to get the jobject instead of the Object*.
   // Avoid using this and use standard Get when possible.
-  jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
  private:
   template<class T>
   JDWP::ObjectId InternalAdd(Handle<T> obj_h)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   void Demote(ObjectRegistryEntry& entry)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
 
   void Promote(ObjectRegistryEntry& entry)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
 
   bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
                       ObjectRegistryEntry** out_entry)
-      EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index dbd8977..ca6e7ea 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -48,7 +48,7 @@
   virtual ~Jit();
   static Jit* Create(JitOptions* options, std::string* error_msg);
   bool CompileMethod(ArtMethod* method, Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateInstrumentationCache(size_t compile_threshold);
   void CreateThreadPool();
   CompilerCallbacks* GetCompilerCallbacks() {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index c1ea921..9707f6f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -78,27 +78,27 @@
 
   // Return true if the code cache contains the code pointer which si the entrypoint of the method.
   bool ContainsMethod(ArtMethod* method) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return true if the code cache contains a code ptr.
   bool ContainsCodePtr(const void* ptr) const;
 
   // Reserve a region of code of size at least "size". Returns null if there is no more room.
-  uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+  uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
 
   // Add a data array of size (end - begin) with the associated contents, returns null if there
   // is no more room.
   uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
-      LOCKS_EXCLUDED(lock_);
+      REQUIRES(!lock_);
 
   // Get code for a method, returns null if it is not in the jit cache.
   const void* GetCodeFor(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
   // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
   // entrypoint isn't within the cache.
   void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
 
  private:
   // Takes ownership of code_mem_map.
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 27894eb..0deaf8a 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -47,9 +47,9 @@
  public:
   explicit JitInstrumentationCache(size_t hot_method_threshold);
   void AddSamples(Thread* self, ArtMethod* method, size_t samples)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
   void SignalCompiled(Thread* self, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
   void CreateThreadPool();
   void DeleteThreadPool();
 
@@ -68,7 +68,7 @@
 
   virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
                              ArtMethod* method, uint32_t /*dex_pc*/)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     instrumentation_cache_->AddSamples(thread, method, 1);
   }
   virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
@@ -92,7 +92,7 @@
 
   // We only care about how many dex instructions were executed in the Jit.
   virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK_LE(dex_pc_offset, 0);
     instrumentation_cache_->AddSamples(thread, method, 1);
   }
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 84fc404..b18b430 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -63,14 +63,14 @@
 JNIEnvExt::~JNIEnvExt() {
 }
 
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
   if (obj == nullptr) {
     return nullptr;
   }
   return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
 }
 
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
   if (obj != nullptr) {
     locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
   }
@@ -86,14 +86,14 @@
   monitors.Dump(os);
 }
 
-void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_) {
   UNUSED(capacity);  // cpplint gets confused with (int) and thinks its a cast.
   // TODO: take 'capacity' into account.
   stacked_local_ref_cookies.push_back(local_ref_cookie);
   local_ref_cookie = locals.GetSegmentState();
 }
 
-void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void JNIEnvExt::PopFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
   locals.SetSegmentState(local_ref_cookie);
   local_ref_cookie = stacked_local_ref_cookies.back();
   stacked_local_ref_cookies.pop_back();
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 29d912c..9b55536 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -39,7 +39,7 @@
   ~JNIEnvExt();
 
   void DumpReferenceTables(std::ostream& os)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetCheckJniEnabled(bool enabled);
 
@@ -48,7 +48,7 @@
 
   template<typename T>
   T AddLocalReference(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static Offset SegmentStateOffset();
 
@@ -60,8 +60,8 @@
     return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
   }
 
-  jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+  void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   Thread* const self;
   JavaVMExt* const vm;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index cc176b7..6a716b5 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -89,7 +89,7 @@
 
 static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
                                    const char* name, const char* sig, const char* kind)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string temp;
   soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
                                  "no %s method \"%s.%s%s\"",
@@ -98,7 +98,7 @@
 
 static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
                                          const char* kind, jint idx, bool return_errors)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
       << PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
       << ": " << kind << " is null at index " << idx;
@@ -107,7 +107,7 @@
 }
 
 static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (LIKELY(klass->IsInitialized())) {
     return klass;
   }
@@ -121,7 +121,7 @@
 
 static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
                               const char* name, const char* sig, bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class));
   if (c == nullptr) {
     return nullptr;
@@ -148,7 +148,7 @@
 }
 
 static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
   // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
   if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
@@ -179,7 +179,7 @@
 
 static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
                             const char* sig, bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> c(
       hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
@@ -227,7 +227,7 @@
 
 static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
                         jsize length, const char* identifier)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string type(PrettyTypeOf(array));
   soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
                                  "%s offset=%d length=%d %s.length=%d",
@@ -236,14 +236,14 @@
 
 static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
                         jsize array_length)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
                                  "offset=%d length=%d string.length()=%d", start, length,
                                  array_length);
 }
 
 int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause)
-    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+    REQUIRES(!Locks::mutator_lock_) {
   // Turn the const char* into a java.lang.String.
   ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg));
   if (msg != nullptr && s.get() == nullptr) {
@@ -314,7 +314,7 @@
 
 template <bool kNative>
 static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   for (auto& method : c->GetDirectMethods(pointer_size)) {
     if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
@@ -2321,7 +2321,7 @@
  private:
   static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
                                           const char* caller)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // TODO: we should try to expand the table if necessary.
     if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
       LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2350,7 +2350,7 @@
   template <typename JArrayT, typename ElementT, typename ArtArrayT>
   static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array,
                                            const char* fn_name, const char* operation)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
     if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
       soa.Vm()->JniAbortF(fn_name,
@@ -2407,7 +2407,7 @@
 
   static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array,
                                     size_t component_size, void* elements, jint mode)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     void* array_data = array->GetRawData(component_size, 0);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     bool is_copy = array_data != elements;
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 12d3ff3..312d811 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -48,30 +48,28 @@
 
   // Boxes a closure into an object. Returns null and throws an exception on failure.
   mirror::Object* BoxLambda(const ClosureType& closure)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
 
   // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
   bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweep weak references to lambda boxes. Update the addresses if the objects have been
   // moved, and delete them from the table if the objects have been cleaned up.
   void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
 
   // GC callback: Temporarily block anyone from touching the map.
   void DisallowNewWeakBoxedLambdas()
-      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+      REQUIRES(!Locks::lambda_table_lock_);
 
   // GC callback: Unblock any readers who have been queued waiting to touch the map.
   void AllowNewWeakBoxedLambdas()
-      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+      REQUIRES(!Locks::lambda_table_lock_);
 
   // GC callback: Verify that the state is now blocking anyone from touching the map.
   void EnsureNewWeakBoxedLambdasDisallowed()
-      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+      REQUIRES(!Locks::lambda_table_lock_);
 
   BoxTable();
   ~BoxTable() = default;
@@ -93,11 +91,11 @@
 
   // Attempt to look up the lambda in the map, or return null if it's not there yet.
   ValueType FindBoxedLambda(const ClosureType& closure) const
-      SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+      SHARED_REQUIRES(Locks::lambda_table_lock_);
 
   // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
   void BlockUntilWeaksAllowed()
-      SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+      SHARED_REQUIRES(Locks::lambda_table_lock_);
 
   // EmptyFn implementation for art::HashMap
   struct EmptyFn {
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index c10ddfd..743ee77 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -28,24 +28,24 @@
  public:
   explicit LinearAlloc(ArenaPool* pool);
 
-  void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+  void* Alloc(Thread* self, size_t size) REQUIRES(!lock_);
 
   // Realloc never frees the input pointer, it is the caller's job to do this if necessary.
-  void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_);
+  void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_);
 
   // Allocate and construct an array of structs of type T.
   template<class T>
-  T* AllocArray(Thread* self, size_t elements) {
+  T* AllocArray(Thread* self, size_t elements) REQUIRES(!lock_) {
     return reinterpret_cast<T*>(Alloc(self, elements * sizeof(T)));
   }
 
   // Return the number of bytes used in the allocator.
-  size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_);
+  size_t GetUsedMemory() const REQUIRES(!lock_);
 
-  ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_);
+  ArenaPool* GetArenaPool() REQUIRES(!lock_);
 
   // Return true if the linear alloc contrains an address.
-  bool Contains(void* ptr) const;
+  bool Contains(void* ptr) const REQUIRES(!lock_);
 
  private:
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 01e29c9..196a7f6 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -92,7 +92,7 @@
                                   std::string* error_msg);
 
   // Releases the memory mapping.
-  ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+  ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
 
   const std::string& GetName() const {
     return name_;
@@ -142,25 +142,25 @@
                      std::string* error_msg);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
-      LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+      REQUIRES(!Locks::mem_maps_lock_);
   static void DumpMaps(std::ostream& os, bool terse = false)
-      LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+      REQUIRES(!Locks::mem_maps_lock_);
 
   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
 
-  static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
-  static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+  static void Init() REQUIRES(!Locks::mem_maps_lock_);
+  static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
 
  private:
   MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
-         int prot, bool reuse, size_t redzone_size = 0) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+         int prot, bool reuse, size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
 
   static void DumpMapsLocked(std::ostream& os, bool terse)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+      REQUIRES(Locks::mem_maps_lock_);
   static bool HasMemMap(MemMap* map)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+      REQUIRES(Locks::mem_maps_lock_);
   static MemMap* GetLargestMemMapAt(void* address)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+      REQUIRES(Locks::mem_maps_lock_);
 
   const std::string name_;
   uint8_t* const begin_;  // Start of data.
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 6240b3b..845d156 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,12 +34,12 @@
 class MANAGED AbstractMethod : public AccessibleObject {
  public:
   // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
-  bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
   // Only used by the image writer.
-  void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index 6d4c0f6..dcf5118 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
   }
 
   template<bool kTransactionActive>
-  void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
     UNUSED(padding_);
     return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
   }
 
-  bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldBoolean(FlagOffset());
   }
 
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 88d75ab..3d54029 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     UNUSED(usable_size);
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     Array* array = down_cast<Array*>(obj);
@@ -126,7 +126,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     Array* array = down_cast<Array*>(obj);
     // DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index d72c03f..4128689 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
 static Array* RecursiveCreateMultiArray(Thread* self,
                                         Handle<Class> array_class, int current_dimension,
                                         Handle<mirror::IntArray> dimensions)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
   StackHandleScope<1> hs(self);
   Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index e65611d..08c12e9 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,21 +39,21 @@
   template <bool kIsInstrumented, bool kFillUsable = false>
   ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
                                     size_t component_size_shift, gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
                                  Handle<IntArray> dimensions)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
   }
 
-  void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_GE(length, 0);
     // We use non transactional version since we can't undo this write. We also disable checking
     // since it would fail during a transaction.
@@ -67,7 +67,7 @@
   static MemberOffset DataOffset(size_t component_size);
 
   void* GetRawData(size_t component_size, int32_t index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
         + (index * component_size);
     return reinterpret_cast<void*>(data);
@@ -82,16 +82,16 @@
   // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
   // returns false.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
-  void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   void ThrowArrayIndexOutOfBoundsException(int32_t index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The number of array elements.
   int32_t length_;
@@ -107,32 +107,32 @@
   typedef T ElementType;
 
   static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const T* GetData() const ALWAYS_INLINE  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const T* GetData() const ALWAYS_INLINE  SHARED_REQUIRES(Locks::mutator_lock_) {
     return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
   }
 
-  T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
   }
 
-  T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
-  T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(CheckIsValidIndex(i));
     return GetData()[i];
   }
 
-  void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true>
   void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
 
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template<bool kTransactionActive,
            bool kCheckTransaction = true,
            VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -144,7 +144,7 @@
    * and the arrays non-null.
    */
   void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -152,7 +152,7 @@
    * and the arrays non-null.
    */
   void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void SetArrayClass(Class* array_class) {
     CHECK(array_class_.IsNull());
@@ -160,7 +160,7 @@
     array_class_ = GcRoot<Class>(array_class);
   }
 
-  static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!array_class_.IsNull());
     return array_class_.Read();
   }
@@ -170,7 +170,7 @@
     array_class_ = GcRoot<Class>(nullptr);
   }
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> array_class_;
@@ -183,11 +183,11 @@
  public:
   template<typename T>
   T GetElementPtrSize(uint32_t idx, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
   void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 }  // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 5bd6583..701ba4a 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -835,7 +835,7 @@
   }
 
   void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self_);
     Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
     mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ba0a9fc..be87dee 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -127,7 +127,7 @@
   };
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
     static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
     return static_cast<Status>(
         GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -135,7 +135,7 @@
 
   // This is static because 'this' may be moved by GC.
   static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset StatusOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -143,146 +143,146 @@
 
   // Returns true if the class has been retired.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsRetired() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusRetired;
   }
 
   // Returns true if the class has failed to link.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusError;
   }
 
   // Returns true if the class has been loaded.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusIdx;
   }
 
   // Returns true if the class has been loaded.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusLoaded;
   }
 
   // Returns true if the class has been linked.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusResolved;
   }
 
   // Returns true if the class was compile-time verified.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
   }
 
   // Returns true if the class has been verified.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusVerified;
   }
 
   // Returns true if the class is initializing.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() >= kStatusInitializing;
   }
 
   // Returns true if the class is initialized.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStatus<kVerifyFlags>() == kStatusInitialized;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
   static MemberOffset AccessFlagsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
   }
 
-  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the class is an interface.
-  ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccInterface) != 0;
   }
 
   // Returns true if the class is declared public.
-  ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the class is declared final.
-  ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
   }
 
-  ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccClassIsFinalizable);
   }
 
-  ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
   }
 
-  ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccClassIsStringClass);
   }
 
   // Returns true if the class is abstract.
-  ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
   // Returns true if the class is an annotation.
-  ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAnnotation) != 0;
   }
 
   // Returns true if the class is synthetic.
-  ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
   // Returns true if the class can avoid access checks.
-  bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPreverified) != 0;
   }
 
-  void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccPreverified);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
   }
 
@@ -291,7 +291,7 @@
   // For array classes, where all the classes are final due to there being no sub-classes, an
   // Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
   // types as the component is final.
-  bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!IsArrayClass()) {
       return IsFinal();
     } else {
@@ -306,18 +306,18 @@
 
   // Returns true if this class is the placeholder and should retire and
   // be replaced with a class with the right size for embedded imt/vtable.
-  bool IsTemp() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
     Status s = GetStatus();
     return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable();
   }
 
-  String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);  // Returns the cached name.
-  void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);  // Sets the cached name.
+  String* GetName() SHARED_REQUIRES(Locks::mutator_lock_);  // Returns the cached name.
+  void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_);  // Sets the cached name.
   // Computes the name, then sets the cached value.
-  static String* ComputeName(Handle<Class> h_this) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Read access flags without using getter as whether something is a proxy can be check in
     // any loaded state
     // TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -326,9 +326,9 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
     int32_t v32 = static_cast<int32_t>(new_type);
     DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
@@ -338,83 +338,83 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the class is a primitive type.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType() == Primitive::kPrimInt;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsArrayClass<kVerifyFlags>() &&
         GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
         IsPrimitive();
   }
 
   // Depth of class from java.lang.Object
-  uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset ComponentTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -422,11 +422,11 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
   }
 
-  void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(GetComponentType() == nullptr);
     DCHECK(new_component_type != nullptr);
     // Component type is invariant: use non-transactional mode without check.
@@ -434,43 +434,43 @@
   }
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
     return 1U << GetComponentSizeShift();
   }
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t GetComponentSizeShift() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
   }
 
-  bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return !IsPrimitive() && GetSuperClass() == nullptr;
   }
 
-  bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
     return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
   }
 
-  bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
         (IsAbstract() && IsArrayClass());
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetComponentType<kVerifyFlags>() != nullptr &&
         !GetComponentType<kVerifyFlags>()->IsPrimitive();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
     auto* component_type = GetComponentType<kVerifyFlags>();
     return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
     auto* component_type = GetComponentType<kVerifyFlags>();
     return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -479,16 +479,16 @@
   // Creates a raw object instance but does not invoke the default constructor.
   template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
   ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   Object* AllocObject(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   Object* AllocNonMovableObject(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Classes, arrays, and strings vary in size, and so the object_size_ field cannot
     // be used to Get their instance size
     return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -497,17 +497,17 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
   }
 
   void SetClassSize(uint32_t new_class_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Compute how many bytes would be used a class with the given elements.
   static uint32_t ComputeClassSize(bool has_embedded_tables,
@@ -533,31 +533,31 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
   static MemberOffset ObjectSizeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
   }
 
-  void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!IsVariableSize());
     // Not called within a transaction.
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
   }
 
   void SetObjectSizeWithoutChecks(uint32_t new_object_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     return SetField32<false, false, kVerifyNone>(
         OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
   }
 
   // Returns true if this class is in the same packages as that class.
-  bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
 
   // Returns true if this class can access that class.
-  bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
     return that->IsPublic() || this->IsInSamePackage(that);
   }
 
@@ -565,7 +565,7 @@
   // Note that access to the class isn't checked in case the declaring class is protected and the
   // method has been exposed by a public sub-class
   bool CanAccessMember(Class* access_to, uint32_t member_flags)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Classes can access all of their own members
     if (this == access_to) {
       return true;
@@ -593,30 +593,30 @@
   // referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedField(Class* access_to, ArtField* field,
                               DexCache* dex_cache, uint32_t field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
                                 uint32_t field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can this class access a resolved method?
   // Note that access to methods's class is checked and this may require looking up the class
   // referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
   bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
                                DexCache* dex_cache, uint32_t method_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template <InvokeType throw_invoke_type>
   bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
                                  uint32_t method_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can src be assigned to this class? For example, String can be assigned to Object (by an
   // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
   // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
   // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
   // to themselves. Classes for primitive types may not assign to each other.
-  ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(src != nullptr);
     if (this == src) {
       // Can always assign to things of the same type.
@@ -633,9 +633,9 @@
     }
   }
 
-  ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Super class is assigned once, except during class linker initialization.
     Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
     DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -643,7 +643,7 @@
     SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
   }
 
-  bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetSuperClass() != nullptr;
   }
 
@@ -651,9 +651,9 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
-  ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset DexCacheOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -665,83 +665,83 @@
     kDumpClassInitialized = (1 << 2),
   };
 
-  void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Also updates the dex_cache_strings_ variable from new_dex_cache.
-  void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\
+  ArtMethod* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);\
 
   void SetDirectMethodsPtr(ArtMethod* new_direct_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Used by image writer.
   void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Use only when we are allocating populating the method arrays.
   ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_);
   ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the number of static, private, and constructor methods.
-  ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_));
   }
-  void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumDirectMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the number of non-inherited virtual methods.
-  ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_));
   }
-  void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumVirtualMethods(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset VTableOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -751,362 +751,362 @@
     return MemberOffset(sizeof(Class));
   }
 
-  bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsInstantiable();
   }
 
-  bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
 
   static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
 
   ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Given a method implemented by this class but potentially from a super class, return the
   // specific implementation method for this class.
   ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Given a method implemented by this class' super class, return the specific implementation
   // method for this class.
   ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Given a method implemented by this class, but potentially from a
   // super class or interface, return the specific implementation
   // method for this class.
   ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
+      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
 
   ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
                                  size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
                                  size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                  size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
                                       size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
                                       size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                       size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
                               size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
                               size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                               size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
                                        size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
                                        size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                        size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
                                size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
                                size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
                                size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get instance fields of the class (See also GetSFields).
-  ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetIFields() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetIFields(ArtField* new_ifields) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Unchecked edition has no verification flags.
-  void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_));
   }
 
-  void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumInstanceFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num);
   }
 
-  ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the number of instance fields containing reference types.
-  uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
   }
 
-  uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
   }
 
-  void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the offset of the first reference instance field. Other reference instance fields follow.
   MemberOffset GetFirstReferenceInstanceFieldOffset()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the number of static fields containing reference types.
-  uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsResolved() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
   }
 
-  uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsLoaded() || IsErroneous() || IsRetired());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
   }
 
-  void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
   }
 
   // Get the offset of the first reference static field. Other reference static fields follow.
   MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the offset of the first reference static field. Other reference static fields follow.
   MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Gets the static fields of the class.
-  ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetSFields() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetSFields(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Unchecked edition has no verification flags.
-  void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_));
   }
 
-  void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetNumStaticFields(uint32_t num) SHARED_REQUIRES(Locks::mutator_lock_) {
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num);
   }
 
   // TODO: uint16_t
-  ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find a static or instance field using the JLS resolution order
   static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
                              const StringPiece& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds the given instance field in this class or a superclass.
   ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds the given instance field in this class or a superclass, only searches classes that
   // have the same dex cache.
   ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds the given static field in this class or a superclass.
   static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
                                    const StringPiece& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Finds the given static field in this class or superclass, only searches classes that
   // have the same dex cache.
   static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache,
                                    uint32_t dex_field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsIdxLoaded() || IsErroneous());
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
   }
 
-  void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Class* GetVerifyErrorClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     // DCHECK(IsErroneous());
     return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_));
   }
 
-  uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
   }
 
-  void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
   }
 
-  uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
   }
 
-  void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Not called within a transaction.
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
   }
 
-  static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(HasJavaLangClass());
     return java_lang_Class_.Read();
   }
 
-  static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return !java_lang_Class_.IsNull();
   }
 
   // Can't call this SetClass or else gets called instead of Object::SetClass in places.
-  static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
   // ArtMethods.
   template<class Visitor>
   void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // When class is verified, set the kAccPreverified flag on each method.
   void SetPreverifiedFlagOnAllMethods(size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kVisitClass, typename Visitor>
   void VisitReferences(mirror::Class* klass, const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the descriptor of the class. In a few cases a std::string is required, rather than
   // always create one the storage argument is populated and its internal c_str() returned. We do
   // this to avoid memory allocation in the common case.
-  const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
                                            uint32_t idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const DexFile::TypeList* GetInterfaceTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Asserts we are initialized or initializing in the given thread.
   void AssertInitializedOrInitializingInThread(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
-                size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For proxy class only.
-  ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For proxy class only.
-  ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For reference class only.
-  MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+  MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+  bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
   void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static MemberOffset DexCacheStringsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
   }
@@ -1114,7 +1114,7 @@
   // May cause thread suspension due to EqualParameters.
   ArtMethod* GetDeclaredConstructor(
       Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
   // fence.
@@ -1124,7 +1124,7 @@
     }
 
     void operator()(mirror::Object* obj, size_t usable_size) const
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_);
 
    private:
     const uint32_t class_size_;
@@ -1133,7 +1133,7 @@
   };
 
   // Returns true if the class loader is null, ie the class loader is the boot strap class loader.
-  bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetClassLoader() == nullptr;
   }
 
@@ -1146,34 +1146,34 @@
   }
 
   ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetVerifyErrorClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool throw_on_failure, bool use_referrers_cache>
   bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
                                uint32_t field_idx, DexCache* dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
   bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
                                 uint32_t method_idx, DexCache* dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Unchecked editions is for root visiting.
-  ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetSFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtField* GetIFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Check that the pointer size mathces the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index b10a296..134f1cd 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -32,7 +32,7 @@
   static constexpr uint32_t InstanceSize() {
     return sizeof(ClassLoader);
   }
-  ClassLoader* GetParent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
   }
 
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 0ce83ec..ba49a15 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -48,12 +48,12 @@
 
   void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings,
             ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields,
-            size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+            size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Fixup(ArtMethod* trampoline, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
   }
 
@@ -73,76 +73,76 @@
     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
   }
 
-  size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStrings()->GetLength();
   }
 
-  size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetResolvedTypes()->GetLength();
   }
 
-  size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetResolvedMethods()->GetLength();
   }
 
-  size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetResolvedFields()->GetLength();
   }
 
-  String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetResolvedString(uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetStrings()->Get(string_idx);
   }
 
   void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // TODO default transaction support.
     GetStrings()->Set(string_idx, resolved);
   }
 
   Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetResolvedTypes()->Get(type_idx);
   }
 
   void SetResolvedType(uint32_t type_idx, Class* resolved)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Pointer sized variant, used for patching.
   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Pointer sized variant, used for patching.
   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<ObjectArray<String>>(StringsOffset());
   }
 
-  ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<ObjectArray<Class>>(
         OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_));
   }
 
-  PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<PointerArray>(ResolvedMethodsOffset());
   }
 
-  PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<PointerArray>(ResolvedFieldsOffset());
   }
 
-  const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
   }
 
-  void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_)
       ALWAYS_INLINE {
     return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
   }
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index d927f0c..b5a954f 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -36,66 +36,66 @@
 // C++ mirror of java.lang.reflect.Field.
 class MANAGED Field : public AccessibleObject {
  public:
-  static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
   }
 
-  mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
   }
 
-  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
   }
 
-  bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
-  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
   ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetType()->GetPrimitiveType();
   }
 
-  mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
   }
 
-  int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
   }
 
-  static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Slow, try to use only for PrettyField and such.
-  ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kTransactionActive = false>
   static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
                                            bool force_resolve)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   HeapReference<mirror::Class> declaring_class_;
@@ -105,27 +105,27 @@
   int32_t offset_;
 
   template<bool kTransactionActive>
-  void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
   }
 
   template<bool kTransactionActive>
-  void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
   }
 
   template<bool kTransactionActive>
-  void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
   }
 
   template<bool kTransactionActive>
-  void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
   }
 
   template<bool kTransactionActive>
-  void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
   }
 
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 1ea5bee..b21ecdf 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,34 +25,34 @@
 
 class MANAGED IfTable FINAL : public ObjectArray<Object> {
  public:
-  ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
     Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
     DCHECK(interface != nullptr);
     return interface;
   }
 
   ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
     auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
     DCHECK(method_array != nullptr);
     return method_array;
   }
 
-  size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
     auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
     return method_array == nullptr ? 0u : method_array->GetLength();
   }
 
-  void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(arr != nullptr);
     auto idx = i * kMax + kMethodArray;
     DCHECK(Get(idx) == nullptr);
     Set<false>(idx, arr);
   }
 
-  size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetLength() / kMax;
   }
 
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 42c76c0..fe021db 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -29,25 +29,25 @@
 class MANAGED Method : public AbstractMethod {
  public:
   static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> static_class_;  // java.lang.reflect.Method.class.
@@ -60,25 +60,25 @@
 class MANAGED Constructor: public AbstractMethod {
  public:
   static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return static_class_.Read();
   }
 
-  static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return array_class_.Read();
   }
 
-  static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   static GcRoot<Class> static_class_;  // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e019d5a..c5610b5 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -477,7 +477,7 @@
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
     bool kIsVolatile>
 inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (kCheckTransaction) {
     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
   }
@@ -495,7 +495,7 @@
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
     bool kIsVolatile>
 inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (kCheckTransaction) {
     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
   }
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index b177e2f..27dd743 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
       : dest_obj_(dest_obj) {}
 
   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
-      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     // GetFieldObject() contains a RB.
     Object* ref = obj->GetFieldObject<Object>(offset);
     // No WB here as a large object space does not have a card table
@@ -56,7 +56,7 @@
   }
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     // Copy java.lang.ref.Reference.referent which isn't visited in
     // Object::VisitReferences().
     DCHECK(klass->IsTypeOfReferenceClass());
@@ -107,7 +107,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
   }
 
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f1c96b5..8b541fd 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -84,40 +84,39 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
   NO_RETURN
 #endif
-  void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
   NO_RETURN
 #endif
   bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
   // invoke-interface to detect incompatible interface types.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   int32_t IdentityHashCode() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   static MemberOffset MonitorOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -126,298 +125,298 @@
   // As_volatile can be false if the mutators are suspended. This is an optimization since it
   // avoids the barriers.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
   bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   uint32_t GetLockOwnerThreadId();
 
-  mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
       EXCLUSIVE_LOCK_FUNCTION();
-  bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
       UNLOCK_FUNCTION();
-  void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+  void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+  void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+  void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Accessor for Java type fields.
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
       ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
   ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
       ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
                                                 Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                                    Object* old_value,
                                                                    Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
                                                   Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
                                                                      Object* old_value,
                                                                      Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
                                                           int32_t old_value, int32_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
                              int32_t new_value) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
                                               int32_t new_value) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
                                             int64_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
                                               int64_t new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
   void SetFieldPtr(MemberOffset field_offset, T new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
         field_offset, new_value, sizeof(void*));
   }
@@ -426,7 +425,7 @@
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
   ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
                                          size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
     if (pointer_size == 4) {
       intptr_t ptr  = reinterpret_cast<intptr_t>(new_value);
@@ -439,13 +438,13 @@
     }
   }
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
       typename Visitor, typename JavaLangRefVisitor = VoidFunctor>
   void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Used by object_test.
   static void SetHashCodeSeed(uint32_t new_seed);
@@ -456,13 +455,13 @@
   // Accessors for non-Java type fields
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   T GetFieldPtr(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
   }
 
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
     if (pointer_size == 4) {
       return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
@@ -480,25 +479,25 @@
       NO_THREAD_SAFETY_ANALYSIS;
   template<bool kVisitClass, typename Visitor>
   void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template<bool kVisitClass, typename Visitor>
   void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Verify the type correctness of stores to fields.
   // TODO: This can cause thread suspension and isn't moving GC safe.
   void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kCheckFieldAssignments) {
       CheckFieldAssignmentImpl(field_offset, new_value);
     }
@@ -509,7 +508,7 @@
   // Class::CopyOf().
   static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
                             size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static Atomic<uint32_t> hash_code_seed;
 
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 5eddc18..cee17e9 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,21 +32,21 @@
 
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
                                gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  T* Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the object can be stored into the array. If not, throws
   // an ArrayStoreException and returns false.
-  // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
 
-  ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
+  // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -54,37 +54,37 @@
   // Set element without bound and element type checks, to be used in limited
   // circumstances, such as during boot image writing.
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
   void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
-                         int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                         int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Copy src into this array assuming no overlap and without assignability checks.
   void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
-                        int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                        int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Copy src into this array with assignability checks.
   void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
                                 int32_t count, bool throw_exception)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // TODO fix thread safety analysis broken by the use of template. This should be
-  // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+  // SHARED_REQUIRES(Locks::mutator_lock_).
   template<const bool kVisitClass, typename Visitor>
   void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 055be85..2a5c88e 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
 template<bool kPoisonReferences, class MirrorType>
 class MANAGED ObjectReference {
  public:
-  MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return UnCompress();
   }
 
-  void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
     reference_ = Compress(other);
   }
 
@@ -56,18 +56,18 @@
 
  protected:
   ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : reference_(Compress(mirror_ptr)) {
   }
 
   // Compress reference to its bit representation.
-  static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
     uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
     return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
   }
 
   // Uncompress an encoded reference from its bit representation.
-  MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
     uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
     return reinterpret_cast<MirrorType*>(as_bits);
   }
@@ -83,11 +83,11 @@
 class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
  public:
   static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return HeapReference<MirrorType>(mirror_ptr);
   }
  private:
-  HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
       : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
 };
 
@@ -95,16 +95,16 @@
 template<class MirrorType>
 class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
  public:
-  CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
       : mirror::ObjectReference<false, MirrorType>(nullptr) {}
 
   static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     return CompressedReference<MirrorType>(p);
   }
 
  private:
-  CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
       : mirror::ObjectReference<false, MirrorType>(p) {}
 };
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 85ea28f..f5a0445 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -48,7 +48,7 @@
                     const char* utf8_in,
                     const char* utf16_expected_le,
                     int32_t expected_hash)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
     for (int32_t i = 0; i < expected_utf16_length; i++) {
       uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 4bbdb99..51ae760 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -62,49 +62,49 @@
     return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
   }
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
         ReferentOffset());
   }
   template<bool kTransactionActive>
-  void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
   }
   template<bool kTransactionActive>
-  void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
   }
   // Volatile read/write is not necessary since the java pending next is only accessed from
   // the java threads for cleared references. Once these cleared references have a null referent,
   // we never end up reading their pending next from the GC again.
-  Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<Reference>(PendingNextOffset());
   }
   template<bool kTransactionActive>
-  void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetPendingNext(Reference* pending_next) SHARED_REQUIRES(Locks::mutator_lock_) {
     SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
   }
 
-  bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Since the references are stored as cyclic lists it means that once enqueued, the pending
     // next is always non-null.
     return GetPendingNext() != nullptr;
   }
 
-  bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!java_lang_ref_Reference_.IsNull());
     return java_lang_ref_Reference_.Read<kReadBarrierOption>();
   }
   static void SetClass(Class* klass);
   static void ResetClass();
-  static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Note: This avoids a read barrier, it should only be used by the GC.
-  HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
   }
 
@@ -130,10 +130,10 @@
   }
 
   template<bool kTransactionActive>
-  void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
     return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
   }
-  Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(ZombieOffset());
   }
 
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index dc7131e..d58c0b6 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
 // C++ mirror of java.lang.StackTraceElement
 class MANAGED StackTraceElement FINAL : public Object {
  public:
-  String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
   }
 
-  String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
   }
 
-  String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
   }
 
-  int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
   }
 
   static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
                                   Handle<String> method_name, Handle<String> file_name,
                                   int32_t line_number)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void SetClass(Class* java_lang_StackTraceElement);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!java_lang_StackTraceElement_.IsNull());
     return java_lang_StackTraceElement_.Read();
   }
@@ -71,7 +71,7 @@
   template<bool kTransactionActive>
   void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
             int32_t line_number)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static GcRoot<Class> java_lang_StackTraceElement_;
 
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index b689057..3a39f58 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -42,7 +42,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -61,7 +61,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -88,7 +88,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
@@ -111,7 +111,7 @@
   }
 
   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     String* string = down_cast<String*>(obj);
     string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index af06385..b4a2d1d 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -49,87 +49,87 @@
     return OFFSET_OF_OBJECT_MEMBER(String, value_);
   }
 
-  uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
     return &value_[0];
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
   }
 
-  void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Count is invariant so use non-transactional mode. Also disable check as we may run inside
     // a transaction.
     DCHECK_LE(0, new_count);
     SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
   }
 
-  int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Computes, stores, and returns the hash code.
-  int32_t ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kIsInstrumented, typename PreFenceVisitor>
   ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length,
                                      gc::AllocatorType allocator_type,
                                      const PreFenceVisitor& pre_fence_visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
                                                   Handle<ByteArray> array, int32_t offset,
                                                   int32_t high_byte,
                                                   gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
                                                   Handle<CharArray> array, int32_t offset,
                                                   gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
                                                Handle<String> string, int32_t offset,
                                                gc::AllocatorType allocator_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // TODO: This is only used in the interpreter to compare against
   // entries from a dex files constant pool (ArtField names). Should
   // we unify this with Equals(const StringPiece&); ?
-  bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // TODO: This is only used to compare DexCache.location with
   // a dex_file's location (which is an std::string). Do we really
   // need this in mirror::String just for that one usage ?
   bool Equals(const StringPiece& modified_utf8)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Compare UTF-16 code point values not in a locale-sensitive manner
   int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -137,21 +137,21 @@
   // TODO: do we need this overload? give it a more intention-revealing name.
   bool Equals(const uint16_t* that_chars, int32_t that_offset,
               int32_t that_length)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Create a modified UTF-8 encoded std::string from a java/lang/String object.
-  std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!java_lang_String_.IsNull());
     return java_lang_String_.Read();
   }
@@ -159,10 +159,10 @@
   static void SetClass(Class* java_lang_String);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
     // Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
     // a transaction.
     DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 1c21edb..e8633de 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -53,7 +53,7 @@
   }
 }
 
-void Throwable::SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
   CHECK(state != nullptr);
   if (Runtime::Current()->IsActiveTransaction()) {
     SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 9cc0b6f..0f488dc 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
 // C++ mirror of java.lang.Throwable
 class MANAGED Throwable : public Object {
  public:
-  void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
   }
 
-  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // This is a runtime version of initCause, you shouldn't use it if initCause may have been
   // overridden. Also it asserts rather than throwing exceptions. Currently this is only used
   // in cases like the verifier where the checks cannot fail and initCause isn't overridden.
-  void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!java_lang_Throwable_.IsNull());
     return java_lang_Throwable_.Read();
   }
 
-  int32_t GetStackDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void SetClass(Class* java_lang_Throwable);
   static void ResetClass();
   static void VisitRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_));
   }
-  Object* GetStackTrace() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_));
   }
 
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index fd9c1b1..da6ee25 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -298,7 +298,7 @@
                                               __attribute__((format(printf, 1, 2)));
 
 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   va_list args;
   va_start(args, fmt);
   Thread* self = Thread::Current();
@@ -667,11 +667,9 @@
     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
     self->SetMonitorEnterObject(obj.Get());
     bool timed_out;
-    Thread* owner;
-    {
-      ScopedThreadStateChange tsc(self, kBlocked);
-      owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
-    }
+    self->TransitionFromRunnableToSuspended(kBlocked);
+    Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
+    self->TransitionFromSuspendedToRunnable();
     if (owner != nullptr) {
       // We succeeded in suspending the thread, check the lock's status didn't change.
       lock_word = obj->GetLockWord(true);
@@ -1083,7 +1081,7 @@
   }
 }
 
-bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
   MutexLock mu(Thread::Current(), monitor_lock_);
   return owner_ != nullptr;
 }
@@ -1189,7 +1187,7 @@
   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
 
   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (Monitor::Deflate(self_, object)) {
       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
       ++deflate_count_;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 09a6cb6..3ca8954 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -62,34 +62,36 @@
   static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
       NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
 
+  // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
   static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
       EXCLUSIVE_LOCK_FUNCTION(obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static bool MonitorExit(Thread* thread, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      UNLOCK_FUNCTION(obj);
+      SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
 
-  static void Notify(Thread* self, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
+  static bool MonitorExit(Thread* thread, mirror::Object* obj)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS;
+
+  static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     DoNotify(self, obj, false);
   }
-  static void NotifyAll(Thread* self, mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     DoNotify(self, obj, true);
   }
 
   // Object.wait().  Also called for class init.
+  // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
   static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
 
   static void DescribeWait(std::ostream& os, const Thread* thread)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
   static mirror::Object* GetContendedMonitor(Thread* thread)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Calls 'callback' once for each lock held in the single stack frame represented by
   // the current state of 'stack_visitor'.
@@ -97,12 +99,12 @@
   // is necessary when we have already aborted but want to dump the stack as much as we can.
   static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
                          void* callback_context, bool abort_on_failure = true)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsValidLockWord(LockWord lock_word);
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) {
     return obj_.Read<kReadBarrierOption>();
   }
 
@@ -114,7 +116,7 @@
 
   int32_t GetHashCode();
 
-  bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
 
   bool HasHashCode() const {
     return hash_code_.LoadRelaxed() != 0;
@@ -126,12 +128,13 @@
 
   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
   static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
-                                uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
+                                uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_);
 
+  // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
+  // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
+  // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
   static bool Deflate(Thread* self, mirror::Object* obj)
-      // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
-      // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
 
 #ifndef __LP64__
   void* operator new(size_t size) {
@@ -149,57 +152,58 @@
 
  private:
   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_);
   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
-                   MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                   MonitorId id) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Install the monitor into its object, may fail if another thread installs a different monitor
   // first.
   bool Install(Thread* self)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
   // routine.
-  void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+  void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
 
   // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
   // this routine.
-  void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+  void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
 
   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
   // calling thread must own the lock or the owner must be suspended. There's a race with other
   // threads inflating the lock, installing hash codes and spurious failures. The caller should
   // re-read the lock word following the call.
   static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      NO_THREAD_SAFETY_ANALYSIS;  // For m->Install(self)
 
   void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
                           const char* owner_filename, uint32_t owner_line_number)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
                            Monitor* mon)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void Lock(Thread* self)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool Unlock(Thread* thread)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;  // For mon->Notify.
 
   void Notify(Thread* self)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void NotifyAll(Thread* self)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
 
   // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
@@ -222,15 +226,15 @@
   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
   // of the 32-bit time epoch.
   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
-      LOCKS_EXCLUDED(monitor_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!monitor_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Translates the provided method and pc into its declaring class' source file and line number.
   void TranslateLocation(ArtMethod* method, uint32_t pc,
                          const char** source_file, uint32_t* line_number) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uint32_t GetOwnerThreadId();
+  uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
 
   static bool (*is_sensitive_thread_hook_)();
   static uint32_t lock_profiling_threshold_;
@@ -285,17 +289,16 @@
   MonitorList();
   ~MonitorList();
 
-  void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
 
   void SweepMonitorList(IsMarkedVisitor* visitor)
-      LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
-  void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
-  void EnsureNewMonitorsDisallowed() LOCKS_EXCLUDED(monitor_list_lock_);
-  void BroadcastForNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
+      REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+  void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
+  void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
+  void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_);
+  void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
   // Returns how many monitors were deflated.
-  size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
 
   typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
 
@@ -318,7 +321,7 @@
 // For use only by the JDWP implementation.
 class MonitorInfo {
  public:
-  explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
 
   Thread* owner_;
   size_t entry_count_;
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 4a364ca..2832e32 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -90,7 +90,7 @@
 
 Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
                                           int32_t hash_code)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // We are gonna allocate, so acquire the writer lock.
   MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
 
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 4ab4e86..240ca61 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -43,7 +43,7 @@
   }
 
   static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
 #ifndef __LP64__
     Monitor* mon = new Monitor(self, owner, obj, hash_code);
     DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
@@ -110,10 +110,10 @@
   // analysis.
   MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
 
-  void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_);
+  void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_);
 
   Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
   void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2a29c60..1be637c 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -60,7 +60,7 @@
 static void FillHeap(Thread* self, ClassLinker* class_linker,
                      std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
                      std::vector<MutableHandle<mirror::Object>>* handles)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
 
   hsp->reset(new StackHandleScope<kMaxHandles>(self));
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5dd354d..7abc546 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -262,7 +262,7 @@
   explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::String* string = root->AsString();
     table_->operator[](string->ToModifiedUtf8()) = string;
   }
@@ -274,7 +274,7 @@
 // Based on ClassLinker::ResolveString.
 static void PreloadDexCachesResolveString(
     Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::String* string = dex_cache->GetResolvedString(string_idx);
   if (string != nullptr) {
     return;
@@ -292,7 +292,7 @@
 // Based on ClassLinker::ResolveType.
 static void PreloadDexCachesResolveType(
     Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
   if (klass != nullptr) {
     return;
@@ -321,7 +321,7 @@
 // Based on ClassLinker::ResolveField.
 static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
                                          bool is_static)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*));
   if (field != nullptr) {
     return;
@@ -349,7 +349,7 @@
 // Based on ClassLinker::ResolveMethod.
 static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
                                           InvokeType invoke_type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*));
   if (method != nullptr) {
     return;
@@ -423,7 +423,7 @@
 }
 
 static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (!kPreloadDexCachesCollectStats) {
       return;
   }
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index ee62755..541eeb1 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -29,7 +29,7 @@
 namespace art {
 
 static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   jobject trace = nullptr;
   if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
     trace = soa.Self()->CreateInternalStackTrace<false>(soa);
@@ -87,7 +87,7 @@
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         class_loader(nullptr) {}
 
-    bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
       DCHECK(class_loader == nullptr);
       mirror::Class* c = GetMethod()->GetDeclaringClass();
       // c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index a41aed6..eddb2d1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -41,7 +41,7 @@
 
 ALWAYS_INLINE static inline mirror::Class* DecodeClass(
     const ScopedFastNativeObjectAccess& soa, jobject java_class)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
   DCHECK(c != nullptr);
   DCHECK(c->IsClass());
@@ -108,7 +108,7 @@
 
 static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
     Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
   StackHandleScope<1> hs(self);
   auto* ifields = klass->GetIFields();
   auto* sfields = klass->GetSFields();
@@ -189,7 +189,7 @@
 // fast.
 ALWAYS_INLINE static inline ArtField* FindFieldByName(
     Thread* self ATTRIBUTE_UNUSED, mirror::String* name, ArtField* fields, size_t num_fields)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   size_t low = 0;
   size_t high = num_fields;
   const uint16_t* const data = name->GetValue();
@@ -218,7 +218,7 @@
 
 ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
     Thread* self, mirror::Class* c, mirror::String* name)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   auto* instance_fields = c->GetIFields();
   auto* art_field = FindFieldByName(self, name, instance_fields, c->NumInstanceFields());
   if (art_field != nullptr) {
@@ -274,7 +274,7 @@
 }
 
 static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(m != nullptr);
   return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
 }
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 97aae67..d9863c5 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -36,7 +36,7 @@
  */
 
 static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   std::string actualType(PrettyTypeOf(array));
   Thread* self = Thread::Current();
   self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index ba898c6..01faf3c 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -32,7 +32,7 @@
 template<bool kIsSet>
 ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
                                                    mirror::Object* obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (kIsSet && field->IsFinal()) {
     ThrowIllegalAccessException(
             StringPrintf("Cannot set %s field %s of class %s",
@@ -60,7 +60,7 @@
 template<bool kAllowReferences>
 ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
                                                Primitive::Type field_type, JValue* value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK_EQ(value->GetJ(), INT64_C(0));
   MemberOffset offset(f->GetOffset());
   const bool is_volatile = f->IsVolatile();
@@ -105,7 +105,7 @@
 ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
                                                jobject j_rcvr, mirror::Field** f,
                                                mirror::Object** class_or_rcvr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   soa.Self()->AssertThreadSuspensionIsAllowable();
   mirror::Class* declaringClass = (*f)->GetDeclaringClass();
   if ((*f)->IsStatic()) {
@@ -232,7 +232,7 @@
 ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
                                                Primitive::Type field_type, bool allow_references,
                                                const JValue& new_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(f->GetDeclaringClass()->IsInitialized());
   MemberOffset offset(f->GetOffset());
   const bool is_volatile = f->IsVolatile();
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 57b873b..c4a33df 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -27,7 +27,7 @@
 class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
  public:
   explicit ScopedFastNativeObjectAccess(JNIEnv* env)
-    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+    REQUIRES(!Locks::thread_suspend_count_lock_)
     SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
      : ScopedObjectAccessAlreadyRunnable(env) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 7fe3130..2295cb4 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -33,7 +33,7 @@
         count(0),
         caller(nullptr) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     bool do_count = false;
     if (m == nullptr || m->IsRuntimeMethod()) {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 7c4ef8b..27f8677 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -215,7 +215,7 @@
   const OatDexFile* GetOatDexFile(const char* dex_location,
                                   const uint32_t* const dex_location_checksum,
                                   bool exception_if_not_found = true) const
-      LOCKS_EXCLUDED(secondary_lookup_lock_);
+      REQUIRES(!secondary_lookup_lock_);
 
   const std::vector<const OatDexFile*>& GetOatDexFiles() const {
     return oat_dex_files_storage_;
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index acddc03..eb7cbd8 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -28,15 +28,15 @@
 template <typename T>
 class ObjectLock {
  public:
-  ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Notify() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   Thread* const self_;
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 87b0d43..3db3265 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -59,13 +59,13 @@
  public:
   BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
       Thread* thread, uint32_t max_depth)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         stack_(stack),
         max_depth_(max_depth),
         depth_(0) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       return true;
@@ -88,7 +88,7 @@
 
 // This is called from either a thread list traversal or from a checkpoint.  Regardless
 // of which caller, the mutator lock must be held.
-static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
   BackgroundMethodSamplingProfiler* profiler =
       reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
   const ProfilerOptions profile_options = profiler->GetProfilerOptions();
diff --git a/runtime/profiler.h b/runtime/profiler.h
index 7611487..30babe3 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -104,8 +104,8 @@
   explicit ProfileSampleResults(Mutex& lock);
   ~ProfileSampleResults();
 
-  void Put(ArtMethod* method);
-  void PutStack(const std::vector<InstructionLocation>& stack_dump);
+  void Put(ArtMethod* method) REQUIRES(!lock_);
+  void PutStack(const std::vector<InstructionLocation>& stack_dump) REQUIRES(!lock_);
   uint32_t Write(std::ostream &os, ProfileDataType type);
   void ReadPrevious(int fd, ProfileDataType type);
   void Clear();
@@ -168,17 +168,19 @@
   // Start a profile thread with the user-supplied arguments.
   // Returns true if the profile was started or if it was already running. Returns false otherwise.
   static bool Start(const std::string& output_filename, const ProfilerOptions& options)
-  LOCKS_EXCLUDED(Locks::mutator_lock_,
-                 Locks::thread_list_lock_,
-                 Locks::thread_suspend_count_lock_,
-                 Locks::profiler_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::profiler_lock_);
 
-  static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_);
-  static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_);
+  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+  static void Stop() REQUIRES(!Locks::profiler_lock_, !wait_lock_, !Locks::profiler_lock_)
+      NO_THREAD_SAFETY_ANALYSIS;
+  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+  static void Shutdown() REQUIRES(!Locks::profiler_lock_) NO_THREAD_SAFETY_ANALYSIS;
 
-  void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RecordMethod(ArtMethod *method) SHARED_REQUIRES(Locks::mutator_lock_);
+  void RecordStack(const std::vector<InstructionLocation>& stack)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool ProcessMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
   const ProfilerOptions& GetProfilerOptions() const { return options_; }
 
   Barrier& GetBarrier() {
@@ -190,13 +192,15 @@
     const std::string& output_filename, const ProfilerOptions& options);
 
   // The sampling interval in microseconds is passed as an argument.
-  static void* RunProfilerThread(void* arg) LOCKS_EXCLUDED(Locks::profiler_lock_);
+  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+  static void* RunProfilerThread(void* arg) REQUIRES(!Locks::profiler_lock_)
+      NO_THREAD_SAFETY_ANALYSIS;
 
-  uint32_t WriteProfile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t WriteProfile() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CleanProfile();
-  uint32_t DumpProfile(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static bool ShuttingDown(Thread* self) LOCKS_EXCLUDED(Locks::profiler_lock_);
+  uint32_t DumpProfile(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+  static bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
 
   static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_);
 
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index f40c0f1..c33b126 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -34,7 +34,7 @@
   mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader,
                                     const char* className,
                                     const std::vector<mirror::Class*>& interfaces)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
     CHECK(javaLangObject != nullptr);
 
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 0d39e22..65bbcbe 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -157,7 +157,7 @@
    * @return true if the method is a candidate for inlining, false otherwise.
    */
   static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
     return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
@@ -182,16 +182,16 @@
   static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
   static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
   static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can we fast path instance field access in a verified accessor?
   // If yes, computes field's offset and volatility and whether the method is static or not.
   static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
                                          verifier::MethodVerifier* verifier,
                                          InlineIGetIPutData* result)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 }  // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 02baad7..d1a4081 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -45,14 +45,14 @@
  public:
   CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
                          QuickExceptionHandler* exception_handler)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         self_(self),
         exception_(exception),
         exception_handler_(exception_handler) {
   }
 
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* method = GetMethod();
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     if (method == nullptr) {
@@ -83,7 +83,7 @@
 
  private:
   bool HandleTryItems(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t dex_pc = DexFile::kDexNoIndex;
     if (!method->IsNative()) {
       dex_pc = GetDexPc();
@@ -159,7 +159,7 @@
 class DeoptimizeStackVisitor FINAL : public StackVisitor {
  public:
   DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         self_(self),
         exception_handler_(exception_handler),
@@ -167,7 +167,7 @@
         stacked_shadow_frame_pushed_(false) {
   }
 
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
     ArtMethod* method = GetMethod();
     if (method == nullptr) {
@@ -196,7 +196,7 @@
     return static_cast<VRegKind>(kinds.at(reg * 2));
   }
 
-  bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     CHECK(code_item != nullptr);
     uint16_t num_regs = code_item->registers_size_;
@@ -350,14 +350,14 @@
 class InstrumentationStackVisitor : public StackVisitor {
  public:
   InstrumentationStackVisitor(Thread* self, size_t frame_depth)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         frame_depth_(frame_depth),
         instrumentation_frames_to_pop_(0) {
     CHECK_NE(frame_depth_, kInvalidFrameDepth);
   }
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     size_t current_frame_depth = GetFrameDepth();
     if (current_frame_depth < frame_depth_) {
       CHECK(GetMethod() != nullptr);
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 8d7cd12..ce9085d 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -36,17 +36,17 @@
 class QuickExceptionHandler {
  public:
   QuickExceptionHandler(Thread* self, bool is_deoptimization)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   NO_RETURN ~QuickExceptionHandler() {
     LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
     UNREACHABLE();
   }
 
-  void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+  void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+  void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+  NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
     handler_quick_frame_ = handler_quick_frame;
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 55cef68..e7ad731 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -49,7 +49,7 @@
             bool kMaybeDuringStartup = false>
   ALWAYS_INLINE static MirrorType* Barrier(
       mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // It's up to the implementation whether the given root gets updated
   // whereas the return value must be an updated reference.
@@ -57,7 +57,7 @@
             bool kMaybeDuringStartup = false>
   ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
                                                   GcRootSource* gc_root_source = nullptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // It's up to the implementation whether the given root gets updated
   // whereas the return value must be an updated reference.
@@ -65,24 +65,24 @@
             bool kMaybeDuringStartup = false>
   ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
                                                   GcRootSource* gc_root_source = nullptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static bool IsDuringStartup();
 
   // Without the holder object.
   static void AssertToSpaceInvariant(mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
   }
   // With the holder object.
   static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
                                      mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // With GcRootSource.
   static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static mirror::Object* Mark(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static mirror::Object* WhitePtr() {
     return reinterpret_cast<mirror::Object*>(white_ptr_);
@@ -96,7 +96,7 @@
 
   ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
                                                       uintptr_t* out_rb_ptr_high_bits)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
   static constexpr uintptr_t white_ptr_ = 0x0;    // Not marked.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index a31d8ac..49b6a38 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -62,7 +62,7 @@
 
 // If "obj" is an array, return the number of elements in the array.
 // Otherwise, return zero.
-static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
   // We assume the special cleared value isn't an array in the if statement below.
   DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance());
   if (obj == nullptr || !obj->IsArrayInstance()) {
@@ -78,7 +78,7 @@
 // or equivalent to the original.
 static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
                             int identical, int equiv)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (obj == nullptr) {
     os << "    null reference (count=" << equiv << ")\n";
     return;
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 94f16b6..f90ccd1 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -41,22 +41,22 @@
   ReferenceTable(const char* name, size_t initial_size, size_t max_size);
   ~ReferenceTable();
 
-  void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   size_t Size() const;
 
-  void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   typedef std::vector<GcRoot<mirror::Object>,
                       TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
   static void Dump(std::ostream& os, Table& entries)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   friend class IndirectReferenceTable;  // For Dump.
 
   std::string name_;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 11522d9..ee2e2c5 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -72,7 +72,7 @@
     num_bytes_ += 4;
   }
 
-  void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
   }
 
@@ -96,7 +96,7 @@
 
   void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
                                 mirror::Object* receiver, va_list ap)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Set receiver if non-null (method is not static)
     if (receiver != nullptr) {
       Append(receiver);
@@ -132,7 +132,7 @@
 
   void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa,
                                 mirror::Object* receiver, jvalue* args)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Set receiver if non-null (method is not static)
     if (receiver != nullptr) {
       Append(receiver);
@@ -171,7 +171,7 @@
   }
 
   void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Set receiver if non-null (method is not static)
     size_t cur_arg = arg_offset;
     if (!shadow_frame->GetMethod()->IsStatic()) {
@@ -206,7 +206,7 @@
 
   static void ThrowIllegalPrimitiveArgumentException(const char* expected,
                                                      const char* found_descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ThrowIllegalArgumentException(
         StringPrintf("Invalid primitive conversion from %s to %s", expected,
                      PrettyDescriptor(found_descriptor).c_str()).c_str());
@@ -214,7 +214,7 @@
 
   bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
                                     mirror::ObjectArray<mirror::Object>* args, ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::TypeList* classes = m->GetParameterTypeList();
     // Set receiver if non-null (method is not static)
     if (receiver != nullptr) {
@@ -343,7 +343,7 @@
 };
 
 static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   const DexFile::TypeList* params = m->GetParameterTypeList();
   if (params == nullptr) {
     return;  // No arguments so nothing to check.
@@ -418,7 +418,7 @@
 }
 
 static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*));
 }
 
@@ -426,7 +426,7 @@
 static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
                                ArtMethod* method, ArgArray* arg_array, JValue* result,
                                const char* shorty)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t* args = arg_array->GetArray();
   if (UNLIKELY(soa.Env()->check_jni)) {
     CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args);
@@ -436,7 +436,7 @@
 
 JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
                          va_list args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
   // check has been elided.
@@ -730,7 +730,7 @@
 }
 
 static std::string UnboxingFailureKind(ArtField* f)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (f != nullptr) {
     return "field " + PrettyField(f, false);
   }
@@ -740,7 +740,7 @@
 static bool UnboxPrimitive(mirror::Object* o,
                            mirror::Class* dst_class, ArtField* f,
                            JValue* unboxed_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   bool unbox_for_result = (f == nullptr);
   if (!dst_class->IsPrimitive()) {
     if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 825a721..d9c38c1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -33,60 +33,60 @@
 class ShadowFrame;
 
 mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
                             JValue* unboxed_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
                                          Primitive::Type src_class, Primitive::Type dst_class,
                                          const JValue& src, JValue* dst)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
                          va_list args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
                          jvalue* args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
                                            jobject obj, jmethodID mid, jvalue* args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
                                            jobject obj, jmethodID mid, va_list args)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // num_frames is number of frames we look up for access check.
 jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
                      jobject args, size_t num_frames = 1)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
                   uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // This version takes a known calling class.
 bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
                   uint32_t access_flags, mirror::Class* calling_class)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Get the calling class by using a stack visitor, may return null for unattached native threads.
 mirror::Class* GetCallingClass(Thread* self, size_t num_frames)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 void UpdateReference(Thread* self, jobject obj, mirror::Object* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 }  // namespace art
 
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 9707fb8..bd89be5 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -85,7 +85,7 @@
                                     mirror::Object** receiver,
                                     bool is_static, const char* method_name,
                                     const char* method_signature)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
     jobject jclass_loader(LoadDex(class_name));
     Thread* self = Thread::Current();
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 68d5ad2..380e72b 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -66,13 +66,13 @@
 }
 
 inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(HasCalleeSaveMethod(type));
   return GetCalleeSaveMethodUnchecked(type);
 }
 
 inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
 }
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cc8b215..c92f08f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -590,7 +590,7 @@
   return true;
 }
 
-void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
   DCHECK_GT(threads_being_born_, 0U);
   threads_being_born_--;
   if (shutting_down_started_ && threads_being_born_ == 0) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 55adaf1..8aed768 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -184,19 +184,19 @@
   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
 
   bool IsShuttingDown(Thread* self);
-  bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
     return shutting_down_;
   }
 
-  size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
     return threads_being_born_;
   }
 
-  void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
     threads_being_born_++;
   }
 
-  void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
 
   bool IsStarted() const {
     return started_;
@@ -212,7 +212,7 @@
 
   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
   // callers should prefer.
-  NO_RETURN static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
+  NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_);
 
   // Returns the "main" ThreadGroup, used when attaching user threads.
   jobject GetMainThreadGroup() const;
@@ -230,7 +230,7 @@
   void CallExitHook(jint status);
 
   // Detaches the current native thread from the runtime.
-  void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os);
   void DumpLockHolders(std::ostream& os);
@@ -279,15 +279,15 @@
   }
 
   // Is the given object the special object used to mark a cleared JNI weak global?
-  bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the special object used to mark a cleared JNI weak global.
-  mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   const std::vector<std::string>& GetProperties() const {
     return properties_;
@@ -301,77 +301,77 @@
     return "2.1.0";
   }
 
-  void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void BroadcastForNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+  void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+  void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
+  void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit image roots, only used for hprof since the GC uses the image space mod union table
   // instead.
-  void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit all of the roots we can do safely do concurrently.
   void VisitConcurrentRoots(RootVisitor* visitor,
                             VisitRootFlags flags = kVisitRootFlagAllRoots)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit all of the non thread roots, we can do this with mutators unpaused.
   void VisitNonThreadRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VisitTransactionRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit all of the thread roots.
-  void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Flip thread roots from from-space refs to to-space refs.
   size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
                          gc::collector::GarbageCollector* collector)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   // Visit all other roots which must be done with mutators suspended.
   void VisitNonConcurrentRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
   // system weak is updated to be the visitor's returned value.
   void SweepSystemWeaks(IsMarkedVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Constant roots are the roots which never change after the runtime is initialized, they only
   // need to be visited once per GC cycle.
   void VisitConstantRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns a special method that calls into a trampoline for runtime method resolution
-  ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasResolutionMethod() const {
     return resolution_method_ != nullptr;
   }
 
-  void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns a special method that calls into a trampoline for runtime imt conflicts.
-  ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+  ArtMethod* GetImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasImtConflictMethod() const {
     return imt_conflict_method_ != nullptr;
   }
 
-  void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* CreateImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns a special method that describes all callee saves being spilled to the stack.
   enum CalleeSaveType {
@@ -386,17 +386,17 @@
   }
 
   ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
     return callee_save_method_frame_infos_[type];
   }
 
   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
@@ -410,7 +410,7 @@
 
   void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
 
-  ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   int32_t GetStat(int kind);
 
@@ -424,8 +424,8 @@
 
   void ResetStats(int kinds);
 
-  void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_,
-                                                      Locks::mutator_lock_);
+  void SetStatsEnabled(bool new_state)
+      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
 
   enum class NativeBridgeAction {  // private
     kUnload,
@@ -463,9 +463,9 @@
   bool IsTransactionAborted() const;
 
   void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void ThrowTransactionAbortError(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
                                bool is_volatile) const;
@@ -482,17 +482,17 @@
   void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
                                  mirror::Object* value, bool is_volatile) const;
   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void RecordStrongStringInsertion(mirror::String* s) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      REQUIRES(Locks::intern_table_lock_);
   void RecordWeakStringInsertion(mirror::String* s) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      REQUIRES(Locks::intern_table_lock_);
   void RecordStrongStringRemoval(mirror::String* s) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      REQUIRES(Locks::intern_table_lock_);
   void RecordWeakStringRemoval(mirror::String* s) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+      REQUIRES(Locks::intern_table_lock_);
 
-  void SetFaultMessage(const std::string& message);
+  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
   // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
   // with the unexpected_signal_lock_.
   const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
@@ -572,7 +572,7 @@
 
   bool Init(const RuntimeOptions& options, bool ignore_unrecognized)
       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
-  void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
   void InitThreadGroups(Thread* self);
   void RegisterRuntimeNativeMethods(JNIEnv* env);
 
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 1cc2df6..b90aa0e 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -34,7 +34,7 @@
 class ScopedThreadStateChange {
  public:
   ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+      REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
     if (UNLIKELY(self_ == nullptr)) {
       // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
@@ -59,7 +59,7 @@
     }
   }
 
-  ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
+  ~ScopedThreadStateChange() REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
     if (UNLIKELY(self_ == nullptr)) {
       if (!expected_has_no_thread_) {
         Runtime* runtime = Runtime::Current();
@@ -130,7 +130,7 @@
    * it's best if we don't grab a mutex.
    */
   template<typename T>
-  T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
@@ -139,32 +139,32 @@
 
   template<typename T>
   T Decode(jobject obj) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     return down_cast<T>(Self()->DecodeJObject(obj));
   }
 
   ArtField* DecodeField(jfieldID fid) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     return reinterpret_cast<ArtField*>(fid);
   }
 
-  jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     return reinterpret_cast<jfieldID>(field);
   }
 
-  ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     return reinterpret_cast<ArtMethod*>(mid);
   }
 
-  jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
     return reinterpret_cast<jmethodID>(method);
@@ -176,12 +176,12 @@
 
  protected:
   explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+      REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
   }
 
   explicit ScopedObjectAccessAlreadyRunnable(Thread* self)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+      REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())),
         vm_(env_ != nullptr ? env_->vm : nullptr) {
   }
@@ -220,14 +220,14 @@
 class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable {
  public:
   explicit ScopedObjectAccessUnchecked(JNIEnv* env)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+      REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) {
     Self()->VerifyStack();
     Locks::mutator_lock_->AssertSharedHeld(Self());
   }
 
   explicit ScopedObjectAccessUnchecked(Thread* self)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
+      REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) {
     Self()->VerifyStack();
     Locks::mutator_lock_->AssertSharedHeld(Self());
@@ -250,13 +250,13 @@
 class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
  public:
   explicit ScopedObjectAccess(JNIEnv* env)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
       : ScopedObjectAccessUnchecked(env) {
   }
 
   explicit ScopedObjectAccess(Thread* self)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
       : ScopedObjectAccessUnchecked(self) {
   }
diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h
index 43bbef4..de6a212 100644
--- a/runtime/signal_catcher.h
+++ b/runtime/signal_catcher.h
@@ -35,19 +35,19 @@
   explicit SignalCatcher(const std::string& stack_trace_file);
   ~SignalCatcher();
 
-  void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_,
-                                      Locks::thread_list_lock_,
-                                      Locks::thread_suspend_count_lock_);
+  void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+                                !Locks::thread_suspend_count_lock_);
 
 
  private:
-  static void* Run(void* arg);
+  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+  static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS;
 
   void HandleSigUsr1();
   void Output(const std::string& s);
-  void SetHaltFlag(bool new_value);
-  bool ShouldHalt();
-  int WaitForSignal(Thread* self, SignalSet& signals);
+  void SetHaltFlag(bool new_value) REQUIRES(!lock_);
+  bool ShouldHalt() REQUIRES(!lock_);
+  int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_);
 
   std::string stack_trace_file_;
 
diff --git a/runtime/stack.cc b/runtime/stack.cc
index fede91c..b07b244 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -150,7 +150,7 @@
 }
 
 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 mirror::Object* StackVisitor::GetThisObject() const {
   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
@@ -655,7 +655,7 @@
           next_dex_pc_(0) {
     }
 
-    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
       if (found_frame_) {
         ArtMethod* method = GetMethod();
         if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -688,7 +688,7 @@
     explicit DescribeStackVisitor(Thread* thread_in)
         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-    bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
       return true;
     }
diff --git a/runtime/stack.h b/runtime/stack.h
index d60714f..8023de1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -155,7 +155,7 @@
   // If this returns non-null then this does not mean the vreg is currently a reference
   // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     mirror::Object* ref;
     if (HasReferenceArray()) {
@@ -229,7 +229,7 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     if (kVerifyFlags & kVerifyWrites) {
       VerifyObject(val);
@@ -244,14 +244,14 @@
     }
   }
 
-  ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(method_ != nullptr);
     return method_;
   }
 
-  mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
     if (HasReferenceArray()) {
@@ -333,7 +333,7 @@
      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
   }
   virtual void Describe(std::ostream& os) const OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   const StackVisitor* const stack_visitor_;
@@ -410,7 +410,7 @@
     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
   }
 
-  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
 
@@ -431,31 +431,31 @@
 
  protected:
   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  public:
   virtual ~StackVisitor() {}
 
   // Return 'true' if we should continue to visit more frames, 'false' to stop.
-  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   void WalkStack(bool include_transitions = false)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsShadowFrame() const {
     return cur_shadow_frame_ != nullptr;
   }
 
-  uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Callee saves are held at the top of the frame
     DCHECK(GetMethod() != nullptr);
     uint8_t* save_addr =
@@ -467,46 +467,46 @@
   }
 
   // Returns the height of the stack in the managed stack frames, including transitions.
-  size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetNumFrames() - cur_depth_ - 1;
   }
 
   // Returns a frame ID for JDWP use, starting from 1.
-  size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) {
     return GetFrameHeight() + 1;
   }
 
-  size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (num_frames_ == 0) {
       num_frames_ = ComputeNumFrames(thread_, walk_kind_);
     }
     return num_frames_;
   }
 
-  size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) {
     return cur_depth_;
   }
 
   // Get the method and dex pc immediately after the one that's currently being visited.
   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
                    uint64_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
                    VRegKind kind_lo, VRegKind kind_hi)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   uintptr_t* GetGPRAddress(uint32_t reg) const;
 
@@ -522,9 +522,9 @@
     return reinterpret_cast<uint32_t*>(vreg_addr);
   }
 
-  uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Return sp-relative offset for a Dalvik virtual register, compiler
@@ -606,17 +606,17 @@
     return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
   }
 
-  std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Private constructor known in the case that num_frames_ has already been computed.
   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
     return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
@@ -644,40 +644,40 @@
 
   bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                             uint32_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                 uint32_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
                                 VRegKind kind_hi, uint64_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
                                     VRegKind kind_lo, VRegKind kind_hi,
                                     uint64_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
                                    uint64_t* val) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
                             VRegKind kind)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
                                 VRegKind kind_lo, VRegKind kind_hi)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
                                    bool is_float)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   Thread* const thread_;
   const StackWalkKind walk_kind_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a2edfa3..2af31cc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1159,7 +1159,7 @@
 
 struct StackDumpVisitor : public StackVisitor {
   StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         os(os_in),
         thread(thread_in),
@@ -1175,7 +1175,7 @@
     }
   }
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       return true;
@@ -1223,7 +1223,7 @@
   }
 
   static void DumpLockedObject(mirror::Object* o, void* context)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     std::ostream& os = *reinterpret_cast<std::ostream*>(context);
     os << "  - locked ";
     if (o == nullptr) {
@@ -1255,7 +1255,7 @@
 };
 
 static bool ShouldShowNativeStack(const Thread* thread)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ThreadState state = thread->GetState();
 
   // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
@@ -1760,11 +1760,11 @@
 class CountStackDepthVisitor : public StackVisitor {
  public:
   explicit CountStackDepthVisitor(Thread* thread)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         depth_(0), skip_depth_(0), skipping_(true) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     // We want to skip frames up to and including the exception's constructor.
     // Note we also skip the frame if it doesn't have a method (namely the callee
     // save frame)
@@ -1809,7 +1809,7 @@
         pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
 
   bool Init(int depth)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Allocate method trace with format [method pointers][pcs].
     auto* cl = Runtime::Current()->GetClassLinker();
     trace_ = cl->AllocPointerArray(self_, depth * 2);
@@ -1830,7 +1830,7 @@
     }
   }
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (trace_ == nullptr) {
       return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
     }
@@ -2012,7 +2012,7 @@
 }
 
 static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   ArtMethod* method = self->GetCurrentMethod(nullptr);
   return method != nullptr
       ? method->GetDeclaringClass()->GetClassLoader()
@@ -2345,13 +2345,13 @@
 //       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
 struct CurrentMethodVisitor FINAL : public StackVisitor {
   CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         this_object_(nullptr),
         method_(nullptr),
         dex_pc_(0),
         abort_on_error_(abort_on_error) {}
-  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
       // Continue if this is a runtime method.
@@ -2391,13 +2391,13 @@
 class ReferenceMapVisitor : public StackVisitor {
  public:
   ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
         // We are visiting the references in compiled frames, so we do not need
         // to know the inlined frames.
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
         visitor_(visitor) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (false) {
       LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
                 << StringPrintf("@ PC:%04x", GetDexPc());
@@ -2411,7 +2411,7 @@
     return true;
   }
 
-  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = shadow_frame->GetMethod();
     DCHECK(m != nullptr);
     size_t num_regs = shadow_frame->NumberOfVRegs();
@@ -2453,7 +2453,7 @@
   }
 
  private:
-  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     auto* cur_quick_frame = GetCurrentQuickFrame();
     DCHECK(cur_quick_frame != nullptr);
     auto* m = *cur_quick_frame;
@@ -2557,7 +2557,7 @@
   RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
 
   void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
   }
 
@@ -2620,7 +2620,7 @@
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     VerifyObject(root);
   }
 };
diff --git a/runtime/thread.h b/runtime/thread.h
index cf87f22..06934ea 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -162,20 +162,18 @@
   static Thread* Current();
 
   // On a runnable thread, check for pending thread suspension request and handle if pending.
-  void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Process pending thread suspension request and handle if pending.
-  void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
 
   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
                                    mirror::Object* thread_peer)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Translates 172 to pAllocArrayFromCode and so on.
   template<size_t size_of_pointers>
@@ -186,18 +184,18 @@
 
   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
   void Dump(std::ostream& os) const
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpJavaStack(std::ostream& os) const
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ThreadState GetState() const {
     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
@@ -207,11 +205,11 @@
 
   ThreadState SetState(ThreadState new_state);
 
-  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
+  int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
     return tls32_.suspend_count;
   }
 
-  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
+  int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
     return tls32_.debug_suspend_count;
   }
 
@@ -223,10 +221,10 @@
   }
 
   bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+      REQUIRES(Locks::thread_suspend_count_lock_);
 
   bool RequestCheckpoint(Closure* function)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+      REQUIRES(Locks::thread_suspend_count_lock_);
 
   void SetFlipFunction(Closure* function);
   Closure* GetFlipFunction();
@@ -243,19 +241,19 @@
   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
   void FullSuspendCheck()
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
   ThreadState TransitionFromSuspendedToRunnable()
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
       ALWAYS_INLINE;
 
   // Transition from runnable into a state where mutator privileges are denied. Releases share of
   // mutator lock.
   void TransitionFromRunnableToSuspended(ThreadState new_state)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      REQUIRES(!Locks::thread_suspend_count_lock_)
       UNLOCK_FUNCTION(Locks::mutator_lock_)
       ALWAYS_INLINE;
 
@@ -290,7 +288,7 @@
 
   size_t NumberOfHeldMutexes() const;
 
-  bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Changes the priority of this thread to match that of the java.lang.Thread object.
@@ -318,19 +316,19 @@
 
   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
   mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
   // allocation, or locking.
   void GetThreadName(std::string& name) const;
 
   // Sets the thread's name.
-  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
   uint64_t GetCpuMicroTime() const;
 
-  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(tlsPtr_.jpeer == nullptr);
     return tlsPtr_.opeer;
   }
@@ -349,28 +347,28 @@
     return tlsPtr_.exception != nullptr;
   }
 
-  mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return tlsPtr_.exception;
   }
 
   void AssertPendingException() const;
-  void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
   void AssertNoPendingException() const;
   void AssertNoPendingExceptionForNewException(const char* msg) const;
 
   void SetException(mirror::Throwable* new_exception)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK(new_exception != nullptr);
     // TODO: DCHECK(!IsExceptionPending());
     tlsPtr_.exception = new_exception;
   }
 
-  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
     tlsPtr_.exception = nullptr;
   }
 
   // Find catch block and perform long jump to appropriate exception handle
-  NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
 
   Context* GetLongJumpContext();
   void ReleaseLongJumpContext(Context* context) {
@@ -392,12 +390,12 @@
   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
   // abort the runtime iff abort_on_error is true.
   ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns whether the given exception was thrown by the current Java method being executed
   // (Note that this includes native Java methods).
   bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetTopOfStack(ArtMethod** top_method) {
     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
@@ -414,23 +412,23 @@
 
   // If 'msg' is null, no detail message is set.
   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
   // used as the new exception's cause.
   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
       __attribute__((format(printf, 3, 4)))
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // OutOfMemoryError is special, because we need to pre-allocate an instance.
   // Only the GC should call this.
-  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void Startup();
   static void FinishStartup();
@@ -442,50 +440,49 @@
   }
 
   // Convert a jobject into a Object*
-  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return tlsPtr_.monitor_enter_object;
   }
 
-  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
     tlsPtr_.monitor_enter_object = obj;
   }
 
   // Implements java.lang.Thread.interrupted.
-  bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
+  bool Interrupted() REQUIRES(!*wait_mutex_);
   // Implements java.lang.Thread.isInterrupted.
-  bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
-  bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+  bool IsInterrupted() REQUIRES(!*wait_mutex_);
+  bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
     return interrupted_;
   }
-  void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
-  void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+  void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
+  void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
     interrupted_ = i;
   }
-  void Notify() LOCKS_EXCLUDED(wait_mutex_);
+  void Notify() REQUIRES(!*wait_mutex_);
 
  private:
-  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
+  void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
 
  public:
   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
     return wait_mutex_;
   }
 
-  ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+  ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
     return wait_cond_;
   }
 
-  Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+  Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
     return wait_monitor_;
   }
 
-  void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
+  void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
     wait_monitor_ = mon;
   }
 
-
   // Waiter link-list support.
   Thread* GetWaitNext() const {
     return tlsPtr_.wait_next;
@@ -505,7 +502,7 @@
   // and space efficient to compute than the StackTraceElement[].
   template<bool kTransactionActive>
   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
@@ -514,11 +511,11 @@
   static jobjectArray InternalStackTraceToStackTraceElementArray(
       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
 
   //
   // Offsets of various members of native Thread class, used by compiled code.
@@ -649,7 +646,7 @@
   }
 
   // Set the stack end to that to be used during a stack overflow
-  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the stack end to that to be used during regular execution
   void ResetDefaultStackEnd() {
@@ -712,7 +709,7 @@
   }
 
   // Number of references allocated in JNI ShadowFrames on this thread.
-  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
   }
 
@@ -720,7 +717,7 @@
   size_t NumHandleReferences();
 
   // Number of references allocated in handle scopes & JNI shadow frames on this thread.
-  size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
     return NumHandleReferences() + NumJniShadowFrameReferences();
   }
 
@@ -728,7 +725,7 @@
   bool HandleScopeContains(jobject obj) const;
 
   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   HandleScope* GetTopHandleScope() {
     return tlsPtr_.top_handle_scope;
@@ -867,10 +864,10 @@
   void RunCheckpointFunction();
 
   bool PassActiveSuspendBarriers(Thread* self)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_);
 
   void ClearSuspendBarrier(AtomicInteger* target)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
+      REQUIRES(Locks::thread_suspend_count_lock_);
 
   bool ReadFlag(ThreadFlag flag) const {
     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
@@ -920,7 +917,7 @@
 
   // Push an object onto the allocation stack.
   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the thread local allocation pointers to the given pointers.
   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
@@ -974,8 +971,7 @@
 
  private:
   explicit Thread(bool daemon);
-  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
-                           Locks::thread_suspend_count_lock_);
+  ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   void Destroy();
 
   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
@@ -983,7 +979,7 @@
   template<bool kTransactionActive>
   void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
                 jobject thread_name, jint thread_priority)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
   // Dbg::Disconnected.
@@ -998,23 +994,23 @@
     return old_state;
   }
 
-  void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
   void DumpStack(std::ostream& os) const
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Out-of-line conveniences for debugging in gdb.
   static Thread* CurrentFromGdb();  // Like Thread::Current.
   // Like Thread::Dump(std::cerr).
-  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void* CreateCallback(void* arg);
 
   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Initialize a thread.
   //
@@ -1024,7 +1020,7 @@
   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
   // of false).
   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+      REQUIRES(Locks::runtime_shutdown_lock_);
   void InitCardTable();
   void InitCpu();
   void CleanupCpu();
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index edd1e05..4c50181 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -46,27 +46,25 @@
   ~ThreadList();
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::mutator_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
   // For thread suspend timeout dumps.
   void Dump(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   pid_t GetLockOwner();  // For SignalCatcher.
 
   // Thread suspension support.
   void ResumeAll()
       UNLOCK_FUNCTION(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   void Resume(Thread* thread, bool for_debugger = false)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_);
 
   // Suspends all threads and gets exclusive access to the mutator_lock_.
   // If long suspend is true, then other people who try to suspend will never timeout. Long suspend
   // is currenly used for hprof since large heaps take a long time.
   void SuspendAll(const char* cause, bool long_suspend = false)
       EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
 
   // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
@@ -76,18 +74,16 @@
   // is set to true.
   Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
                               bool* timed_out)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
   // thread on success else null. The thread id is used to identify the thread to avoid races with
   // the thread terminating. Note that as thread ids are recycled this may not suspend the expected
   // thread, that may be terminating. If the suspension times out then *timeout is set to true.
   Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   // Find an already suspended thread (or self) by its id.
   Thread* FindThreadByThreadId(uint32_t thin_lock_id);
@@ -95,87 +91,78 @@
   // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
   // of the suspend check. Returns how many checkpoints we should expect to run.
   size_t RunCheckpoint(Closure* checkpoint_function)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
-  LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                 Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   // Flip thread roots from from-space refs to to-space refs. Used by
   // the concurrent copying collector.
   size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
                          gc::collector::GarbageCollector* collector)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   // Suspends all threads
   void SuspendAllForDebugger()
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
 
   void SuspendSelfForDebugger()
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_suspend_count_lock_);
 
   // Resume all threads
   void ResumeAllForDebugger()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   void UndoDebuggerSuspensions()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   // Iterates over all the threads.
   void ForEach(void (*callback)(Thread*, void*), void* context)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+      REQUIRES(Locks::thread_list_lock_);
 
   // Add/remove current thread from list.
   void Register(Thread* self)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_)
-      LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
-  void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
+      REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_);
+  void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+                                         !Locks::thread_suspend_count_lock_);
 
   void VisitRoots(RootVisitor* visitor) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return a copy of the thread list.
-  std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) {
+  std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
     return list_;
   }
 
   void DumpNativeStacks(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
 
  private:
   uint32_t AllocThreadId(Thread* self);
-  void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_);
+  void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
 
-  bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
-  bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
+  bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);
+  bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_);
   size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   void DumpUnattachedThreads(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      REQUIRES(!Locks::thread_list_lock_);
 
   void SuspendAllDaemonThreads()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
   void WaitForOtherNonDaemonThreadsToExit()
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr,
                           bool debug_suspend = false)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_);
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
 
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 88700e6..1ca0a21 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -61,7 +61,7 @@
 
  protected:
   ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
-  static void* Callback(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_);
+  static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_);
   virtual void Run();
 
   ThreadPool* const thread_pool_;
@@ -82,22 +82,22 @@
   }
 
   // Broadcast to the workers and tell them to empty out the work queue.
-  void StartWorkers(Thread* self);
+  void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_);
 
   // Do not allow workers to grab any new tasks.
-  void StopWorkers(Thread* self);
+  void StopWorkers(Thread* self) REQUIRES(!task_queue_lock_);
 
   // Add a new task, the first available started worker will process it. Does not delete the task
   // after running it, it is the caller's responsibility.
-  void AddTask(Thread* self, Task* task);
+  void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_);
 
   explicit ThreadPool(const char* name, size_t num_threads);
   virtual ~ThreadPool();
 
   // Wait for all tasks currently on queue to get completed.
-  void Wait(Thread* self, bool do_work, bool may_hold_locks);
+  void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
 
-  size_t GetTaskCount(Thread* self);
+  size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
 
   // Returns the total amount of workers waited for tasks.
   uint64_t GetWaitTime() const {
@@ -106,18 +106,18 @@
 
   // Provides a way to bound the maximum number of worker threads, threads must be less the the
   // thread count of the thread pool.
-  void SetMaxActiveWorkers(size_t threads);
+  void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_);
 
  protected:
   // get a task to run, blocks if there are no tasks left
-  virtual Task* GetTask(Thread* self);
+  virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
 
   // Try to get a task, returning null if there is none available.
-  Task* TryGetTask(Thread* self);
-  Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+  Task* TryGetTask(Thread* self) REQUIRES(!task_queue_lock_);
+  Task* TryGetTaskLocked() REQUIRES(task_queue_lock_);
 
   // Are we shutting down?
-  bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
+  bool IsShuttingDown() const REQUIRES(task_queue_lock_) {
     return shutting_down_;
   }
 
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 487baed..4393430 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -57,7 +57,7 @@
       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         method_trace_(Trace::AllocStackTrace()) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     // Ignore runtime frames (in particular callee save).
     if (!m->IsRuntimeMethod()) {
@@ -218,7 +218,7 @@
   *buf++ = static_cast<uint8_t>(val >> 56);
 }
 
-static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
   BuildStackTraceVisitor build_trace_visitor(thread);
   build_trace_visitor.WalkStack();
   std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
@@ -636,7 +636,7 @@
 
 static void GetVisitedMethodsFromBitSets(
     const std::map<const DexFile*, DexIndexBitSet*>& seen_methods,
-    std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   for (auto& e : seen_methods) {
     DexIndexBitSet* bit_set = e.second;
@@ -749,7 +749,7 @@
 
 void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
                        ArtMethod* method, uint32_t dex_pc, ArtField* field)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   UNUSED(thread, this_object, method, dex_pc, field);
   // We're not recorded to listen to this kind of event, so complain.
   LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
@@ -758,7 +758,7 @@
 void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
                           ArtMethod* method, uint32_t dex_pc, ArtField* field,
                           const JValue& field_value)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   UNUSED(thread, this_object, method, dex_pc, field, field_value);
   // We're not recorded to listen to this kind of event, so complain.
   LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
@@ -793,14 +793,14 @@
 }
 
 void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   UNUSED(thread, exception_object);
   LOG(ERROR) << "Unexpected exception caught event in tracing";
 }
 
 void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method,
                            int32_t /*dex_pc_offset*/)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
   LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
 }
 
diff --git a/runtime/trace.h b/runtime/trace.h
index 69e6acc..04be3dd 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -114,28 +114,20 @@
 
   static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
                     TraceOutputMode output_mode, TraceMode trace_mode, int interval_us)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::thread_suspend_count_lock_,
-                     Locks::trace_lock_);
-  static void Pause() LOCKS_EXCLUDED(Locks::trace_lock_, Locks::thread_list_lock_);
-  static void Resume() LOCKS_EXCLUDED(Locks::trace_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::trace_lock_);
+  static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
+  static void Resume() REQUIRES(!Locks::trace_lock_);
 
   // Stop tracing. This will finish the trace and write it to file/send it via DDMS.
   static void Stop()
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::trace_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
   // Abort tracing. This will just stop tracing and *not* write/send the collected data.
   static void Abort()
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::trace_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
   static void Shutdown()
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::trace_lock_);
-  static TracingMode GetMethodTracingMode() LOCKS_EXCLUDED(Locks::trace_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
+  static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_);
 
   bool UseWallClock();
   bool UseThreadCpuClock();
@@ -143,33 +135,37 @@
   uint32_t GetClockOverheadNanoSeconds();
 
   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
 
   // InstrumentationListener implementation.
   void MethodEntered(Thread* thread, mirror::Object* this_object,
                      ArtMethod* method, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      OVERRIDE;
   void MethodExited(Thread* thread, mirror::Object* this_object,
                     ArtMethod* method, uint32_t dex_pc,
                     const JValue& return_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      OVERRIDE;
   void MethodUnwind(Thread* thread, mirror::Object* this_object,
                     ArtMethod* method, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      OVERRIDE;
   void DexPcMoved(Thread* thread, mirror::Object* this_object,
                   ArtMethod* method, uint32_t new_dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      OVERRIDE;
   void FieldRead(Thread* thread, mirror::Object* this_object,
                  ArtMethod* method, uint32_t dex_pc, ArtField* field)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   void FieldWritten(Thread* thread, mirror::Object* this_object,
                     ArtMethod* method, uint32_t dex_pc, ArtField* field,
                     const JValue& field_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
   static std::vector<ArtMethod*>* AllocStackTrace();
   // Clear and store an old stack trace for later use.
@@ -177,57 +173,61 @@
   // Save id and name of a thread before it exits.
   static void StoreExitingThreadInfo(Thread* thread);
 
-  static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_);
-  static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_);
-  static size_t GetBufferSize() LOCKS_EXCLUDED(Locks::trace_lock_);
+  static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_);
+  static TraceMode GetMode() REQUIRES(!Locks::trace_lock_);
+  static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_);
 
  private:
   Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
         TraceOutputMode output_mode, TraceMode trace_mode);
 
   // The sampling interval in microseconds is passed as an argument.
-  static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_);
+  static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_);
 
   static void StopTracing(bool finish_tracing, bool flush_file)
-      LOCKS_EXCLUDED(Locks::mutator_lock_,
-                     Locks::thread_list_lock_,
-                     Locks::trace_lock_);
-  void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_)
+      // There is an annoying issue with static functions that create a new object and call into
+      // that object that causes them to not be able to tell that we don't currently hold the lock.
+      // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
+      // how to annotate this.
+      NO_THREAD_SAFETY_ANALYSIS;
+  void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
 
   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
 
   void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
                            instrumentation::Instrumentation::InstrumentationEvent event,
                            uint32_t thread_clock_diff, uint32_t wall_clock_diff)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
 
   // Methods to output traced methods and threads.
-  void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods);
+  void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
+      REQUIRES(!*unique_methods_lock_);
   void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+  void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
 
   // Methods to register seen entitites in streaming mode. The methods return true if the entity
   // is newly discovered.
   bool RegisterMethod(ArtMethod* method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_);
   bool RegisterThread(Thread* thread)
-      EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+      REQUIRES(streaming_lock_);
 
   // Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock
   // annotation.
   void WriteToBuf(const uint8_t* src, size_t src_size)
-      EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+      REQUIRES(streaming_lock_);
 
-  uint32_t EncodeTraceMethod(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_);
+  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
   uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
-      LOCKS_EXCLUDED(unique_methods_lock_);
-  ArtMethod* DecodeTraceMethod(uint32_t tmid) LOCKS_EXCLUDED(unique_methods_lock_);
-  std::string GetMethodLine(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!*unique_methods_lock_);
+  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
+  std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
 
   // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 030478c..8ff0614 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -46,63 +46,63 @@
   ~Transaction();
 
   void Abort(const std::string& abort_message)
-      LOCKS_EXCLUDED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void ThrowAbortError(Thread* self, const std::string* abort_message)
-      LOCKS_EXCLUDED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsAborted() LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsAborted() REQUIRES(!log_lock_);
 
   // Record object field changes.
   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
                                bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
                                bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
                             bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
                              bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
                           bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
                           bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
   void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
                                  mirror::Object* value, bool is_volatile)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(!log_lock_);
 
   // Record array change.
   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
-      LOCKS_EXCLUDED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Record intern string table changes.
   void RecordStrongStringInsertion(mirror::String* s)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(!log_lock_);
   void RecordWeakStringInsertion(mirror::String* s)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(!log_lock_);
   void RecordStrongStringRemoval(mirror::String* s)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(!log_lock_);
   void RecordWeakStringRemoval(mirror::String* s)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(!log_lock_);
 
   // Abort transaction by undoing all recorded changes.
   void Rollback()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!log_lock_);
 
   void VisitRoots(RootVisitor* visitor)
-      LOCKS_EXCLUDED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(!log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   class ObjectLog : public ValueObject {
@@ -115,8 +115,8 @@
     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
 
-    void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-    void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+    void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
     size_t Size() const {
       return field_values_.size();
@@ -141,7 +141,7 @@
 
     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
     void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
-                        const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                        const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_);
 
     // Maps field's offset to its value.
     std::map<uint32_t, FieldValue> field_values_;
@@ -151,7 +151,7 @@
    public:
     void LogValue(size_t index, uint64_t value);
 
-    void Undo(mirror::Array* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_);
 
     size_t Size() const {
       return array_values_.size();
@@ -159,7 +159,7 @@
 
    private:
     void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
-                        uint64_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                        uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_);
 
     // Maps index to value.
     // TODO use JValue instead ?
@@ -182,9 +182,9 @@
     }
 
     void Undo(InternTable* intern_table)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+        SHARED_REQUIRES(Locks::mutator_lock_)
+        REQUIRES(Locks::intern_table_lock_);
+    void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
 
    private:
     mirror::String* str_;
@@ -193,31 +193,31 @@
   };
 
   void LogInternedString(const InternStringLog& log)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      LOCKS_EXCLUDED(log_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(!log_lock_);
 
   void UndoObjectModifications()
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void UndoArrayModifications()
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void UndoInternStringTableModifications()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(Locks::intern_table_lock_)
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VisitObjectLogs(RootVisitor* visitor)
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void VisitArrayLogs(RootVisitor* visitor)
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void VisitStringLogs(RootVisitor* visitor)
-      EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      REQUIRES(log_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const std::string& GetAbortMessage() LOCKS_EXCLUDED(log_lock_);
+  const std::string& GetAbortMessage() REQUIRES(!log_lock_);
 
   Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_);
   std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_);
diff --git a/runtime/utf.h b/runtime/utf.h
index 7f05248..1193d29 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -77,7 +77,7 @@
  * The java.lang.String hashCode() algorithm.
  */
 int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count);
 
 // Compute a hash code of a modified UTF-8 string. Not the standard java hash since it returns a
diff --git a/runtime/utils.h b/runtime/utils.h
index 1ef98e7..4fa5f5a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -111,22 +111,22 @@
 // "[[I" would be "int[][]", "[Ljava/lang/String;" would be
 // "java.lang.String[]", and so forth.
 std::string PrettyDescriptor(mirror::String* descriptor)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 std::string PrettyDescriptor(const char* descriptor);
 std::string PrettyDescriptor(mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 std::string PrettyDescriptor(Primitive::Type type);
 
 // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
 // "int a.b.C.f" (depending on the value of 'with_type').
 std::string PrettyField(ArtField* f, bool with_type = true)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true);
 
 // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
 // "a.b.C.m(II)V" (depending on the value of 'with_signature').
 std::string PrettyMethod(ArtMethod* m, bool with_signature = true)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true);
 
 // Returns a human-readable form of the name of the *class* of the given object.
@@ -134,7 +134,7 @@
 // be "java.lang.String". Given an array of int, the output would be "int[]".
 // Given String.class, the output would be "java.lang.Class<java.lang.String>".
 std::string PrettyTypeOf(mirror::Object* obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Returns a human-readable form of the type at an index in the specified dex file.
 // Example outputs: char[], java.lang.String.
@@ -143,11 +143,11 @@
 // Returns a human-readable form of the name of the given class.
 // Given String.class, the output would be "java.lang.Class<java.lang.String>".
 std::string PrettyClass(mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Returns a human-readable form of the name of the given class with its class loader.
 std::string PrettyClassAndClassLoader(mirror::Class* c)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 // Returns a human-readable version of the Java part of the access flags, e.g., "private static "
 // (note the trailing whitespace).
@@ -182,10 +182,10 @@
 
 // Returns the JNI native function name for the non-overloaded method 'm'.
 std::string JniShortName(ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 // Returns the JNI native function name for the overloaded method 'm'.
 std::string JniLongName(ArtMethod* m)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 bool ReadFileToString(const std::string& file_name, std::string* result);
 bool PrintFileToLog(const std::string& file_name, LogSeverity level);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 8c950a0..d63b455 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3590,7 +3590,7 @@
     ++pos_;
   }
 
-  const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
     return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_);
   }
 
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index a2835f5..3b59bba 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -150,13 +150,13 @@
   /* Verify a class. Returns "kNoFailure" on success. */
   static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures,
                                  std::string* error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static FailureKind VerifyClass(Thread* self, const DexFile* dex_file,
                                  Handle<mirror::DexCache> dex_cache,
                                  Handle<mirror::ClassLoader> class_loader,
                                  const DexFile::ClassDef* class_def,
                                  bool allow_soft_failures, std::string* error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static MethodVerifier* VerifyMethodAndDump(Thread* self,
                                              VariableIndentationOutputStream* vios,
@@ -167,10 +167,10 @@
                                              const DexFile::ClassDef* class_def,
                                              const DexFile::CodeItem* code_item, ArtMethod* method,
                                              uint32_t method_access_flags)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures,
-                                  std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                                  std::string* error) SHARED_REQUIRES(Locks::mutator_lock_);
 
   uint8_t EncodePcToReferenceMapData() const;
 
@@ -193,29 +193,29 @@
 
   // Dump the state of the verifier, namely each instruction, what flags are set on it, register
   // information
-  void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Dump(VariableIndentationOutputStream* vios) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+  void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
   // to the locks held at 'dex_pc' in method 'm'.
   static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
                                std::vector<uint32_t>* monitor_enter_dex_pcs)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the accessed field corresponding to the quick instruction's field
   // offset at 'dex_pc' in method 'm'.
   static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the invoked method corresponding to the quick instruction's vtable
   // index at 'dex_pc' in method 'm'.
   static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
   static void Shutdown();
 
   bool CanLoadClasses() const {
@@ -228,7 +228,7 @@
                  ArtMethod* method,
                  uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
                  bool need_precise_constants, bool allow_thread_suspension)
-          SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+          SHARED_REQUIRES(Locks::mutator_lock_)
       : MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, method_idx,
                        method, access_flags, can_load_classes, allow_soft_failures,
                        need_precise_constants, false, allow_thread_suspension) {}
@@ -237,22 +237,22 @@
 
   // Run verification on the method. Returns true if verification completes and false if the input
   // has an irrecoverable corruption.
-  bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool Verify() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Describe VRegs at the given dex pc.
   std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
 
   static void VisitStaticRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void VisitRoots(RootVisitor* visitor, const RootInfo& roots)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Accessors used by the compiler via CompilerCallback
   const DexFile::CodeItem* CodeItem() const;
   RegisterLine* GetRegLine(uint32_t dex_pc);
   const InstructionFlags& GetInstructionFlags(size_t index) const;
-  mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
   MethodReference GetMethodReference() const;
   uint32_t GetAccessFlags() const;
   bool HasCheckCasts() const;
@@ -263,15 +263,15 @@
   }
 
   const RegType& ResolveCheckedClass(uint32_t class_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Returns the method of a quick invoke or null if it cannot be found.
   ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
                                            bool is_range, bool allow_failure)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Returns the access field of a quick field access (iget/iput-quick) or null
   // if it cannot be found.
   ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Is the method being verified a constructor?
   bool IsConstructor() const {
@@ -295,7 +295,7 @@
                  ArtMethod* method, uint32_t access_flags,
                  bool can_load_classes, bool allow_soft_failures, bool need_precise_constants,
                  bool verify_to_dump, bool allow_thread_suspension)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Adds the given string to the beginning of the last failure message.
   void PrependToLastFailMessage(std::string);
@@ -321,18 +321,18 @@
                                   const DexFile::CodeItem* code_item,
                                   ArtMethod* method, uint32_t method_access_flags,
                                   bool allow_soft_failures, bool need_precise_constants)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Compute the width of the instruction at each address in the instruction stream, and store it in
@@ -360,7 +360,7 @@
    * Returns "false" if something in the exception table looks fishy, but we're expecting the
    * exception table to be somewhat sane.
    */
-  bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Perform static verification on all instructions in a method.
@@ -466,11 +466,11 @@
                        bool* selfOkay);
 
   /* Perform detailed code-flow analysis on a single method. */
-  bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the register types for the first instruction in the method based on the method signature.
   // This has the side-effect of validating the signature.
-  bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Perform code flow on a method.
@@ -518,7 +518,7 @@
    * reordering by specifying that you can't execute the new-instance instruction if a register
    * contains an uninitialized instance created by that same instruction.
    */
-  bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Perform verification for a single instruction.
@@ -530,33 +530,33 @@
    * addresses. Does not set or clear any other flags in "insn_flags_".
    */
   bool CodeFlowVerifyInstruction(uint32_t* start_guess)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Perform verification of a new array instruction
   void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Helper to perform verification on puts of primitive type.
   void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
-                          const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                          const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Perform verification of an aget instruction. The destination register's type will be set to
   // be that of component type of the array unless the array type is unknown, in which case a
   // bottom type inferred from the type of instruction is used. is_primitive is false for an
   // aget-object.
   void VerifyAGet(const Instruction* inst, const RegType& insn_type,
-                  bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                  bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Perform verification of an aput instruction.
   void VerifyAPut(const Instruction* inst, const RegType& insn_type,
-                  bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                  bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Lookup instance field and fail for resolution violations
   ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Lookup static field and fail for resolution violations
-  ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Perform verification of an iget/sget/iput/sput instruction.
   enum class FieldAccessType {  // private
@@ -566,16 +566,16 @@
   template <FieldAccessType kAccType>
   void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
                            bool is_primitive, bool is_static)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <FieldAccessType kAccType>
   void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolves a class based on an index and performs access checks to ensure the referrer can
   // access the resolved class.
   const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler
@@ -583,7 +583,7 @@
    * exception handler can be found or if the Join of exception types fails.
    */
   const RegType& GetCaughtExceptionType()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Resolves a method based on an index and performs access checks to ensure
@@ -591,7 +591,7 @@
    * Does not throw exceptions.
    */
   ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify the arguments to a method. We're executing in "method", making
@@ -618,22 +618,22 @@
   ArtMethod* VerifyInvocationArgs(const Instruction* inst,
                                           MethodType method_type,
                                           bool is_range, bool is_super)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Similar checks to the above, but on the proto. Will be used when the method cannot be
   // resolved.
   void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type,
                                             bool is_range)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   template <class T>
   ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
                                                       MethodType method_type, bool is_range,
                                                       ArtMethod* res_method)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
-  SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify that the target instruction is not "move-exception". It's important that the only way
@@ -665,18 +665,18 @@
   * Returns "false" if an error is encountered.
   */
   bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Return the register type for the method.
-  const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get a type representing the declaring class of the method.
-  const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
 
   InstructionFlags* CurrentInsnFlags();
 
   const RegType& DetermineCat1Constant(int32_t value, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Try to create a register type from the given class. In case a precise type is requested, but
   // the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a
@@ -684,7 +684,7 @@
   // Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is
   //       actually touched.
   const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The thread we're verifying on.
   Thread* const self_;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 3994536..2ab6b4a 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -30,7 +30,7 @@
 class MethodVerifierTest : public CommonRuntimeTest {
  protected:
   void VerifyClass(const std::string& descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ASSERT_TRUE(descriptor != nullptr);
     Thread* self = Thread::Current();
     mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
@@ -42,7 +42,7 @@
   }
 
   void VerifyDexFile(const DexFile& dex)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     // Verify all the classes defined in this file
     for (size_t i = 0; i < dex.NumClassDefs(); i++) {
       const DexFile::ClassDef& class_def = dex.GetClassDef(i);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 6e23234..9319cc2 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -46,19 +46,19 @@
 const IntegerType* IntegerType::instance_ = nullptr;
 
 PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     : RegType(klass, descriptor, cache_id) {
   CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
 Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
 Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    SHARED_REQUIRES(Locks::mutator_lock_)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
@@ -280,7 +280,7 @@
   }
 }
 
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) {
   return "Undefined";
 }
 
@@ -538,7 +538,7 @@
   }
 }
 
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     // Primitive arrays will always resolve
     DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -551,11 +551,11 @@
   }
 }
 
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
   return IsReference() && GetClass()->IsObjectClass();
 }
 
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     return descriptor_[0] == '[';
   } else if (HasClass()) {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index d08c937..4893088 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -112,7 +112,7 @@
   }
   // The high half that corresponds to this low half
   const RegType& HighHalf(RegTypeCache* cache) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsConstantBoolean() const;
   virtual bool IsConstantChar() const { return false; }
@@ -165,20 +165,20 @@
     return result;
   }
   virtual bool HasClassVirtual() const { return false; }
-  bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
   Primitive::Type GetPrimitiveType() const;
   bool IsJavaLangObjectArray() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
   const std::string& GetDescriptor() const {
     DCHECK(HasClass() ||
            (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
             !IsUnresolvedSuperClass()));
     return descriptor_;
   }
-  mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(!IsUnresolvedReference());
     DCHECK(!klass_.IsNull()) << Dump();
     DCHECK(HasClass());
@@ -186,25 +186,25 @@
   }
   uint16_t GetId() const { return cache_id_; }
   const RegType& GetSuperClass(RegTypeCache* cache) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   virtual std::string Dump() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 
   // Can this type access other?
   bool CanAccess(const RegType& other) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can this type access a member with the given properties?
   bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can this type be assigned by src?
   // Note: Object and interface types may always be assigned to one another, see
   // comment on
   // ClassJoin.
   bool IsAssignableFrom(const RegType& src) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can this array type potentially be assigned by src.
   // This function is necessary as array types are valid even if their components types are not,
@@ -215,13 +215,13 @@
   // (both are reference types).
   bool CanAssignArray(const RegType& src, RegTypeCache& reg_types,
                       Handle<mirror::ClassLoader> class_loader, bool* soft_error) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
   // allow assignment to
   // an interface from an Object.
   bool IsStrictlyAssignableFrom(const RegType& src) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Are these RegTypes the same?
   bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
@@ -229,7 +229,7 @@
   // Compute the merge of this register from one edge (path) with incoming_type
   // from another.
   const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * A basic Join operation on classes. For a pair of types S and T the Join,
@@ -258,23 +258,23 @@
    * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
    */
   static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   virtual ~RegType() {}
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  protected:
   RegType(mirror::Class* klass, const std::string& descriptor,
-          uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+          uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
   }
 
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   const std::string descriptor_;
   mutable GcRoot<mirror::Class>
@@ -285,7 +285,7 @@
 
  private:
   static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(RegType);
 };
@@ -295,7 +295,7 @@
  public:
   bool IsConflict() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
   static const ConflictType* GetInstance() PURE;
@@ -304,14 +304,14 @@
   static const ConflictType* CreateInstance(mirror::Class* klass,
                                             const std::string& descriptor,
                                             uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
   ConflictType(mirror::Class* klass, const std::string& descriptor,
-               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
   static const ConflictType* instance_;
@@ -324,7 +324,7 @@
  public:
   bool IsUndefined() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
   static const UndefinedType* GetInstance() PURE;
@@ -333,14 +333,14 @@
   static const UndefinedType* CreateInstance(mirror::Class* klass,
                                              const std::string& descriptor,
                                              uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
   UndefinedType(mirror::Class* klass, const std::string& descriptor,
-                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
   static const UndefinedType* instance_;
@@ -349,7 +349,7 @@
 class PrimitiveType : public RegType {
  public:
   PrimitiveType(mirror::Class* klass, const std::string& descriptor,
-                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasClassVirtual() const OVERRIDE { return true; }
 };
@@ -357,23 +357,23 @@
 class Cat1Type : public PrimitiveType {
  public:
   Cat1Type(mirror::Class* klass, const std::string& descriptor,
-           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class IntegerType : public Cat1Type {
  public:
   bool IsInteger() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(mirror::Class* klass,
                                            const std::string& descriptor,
                                            uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
  private:
   IntegerType(mirror::Class* klass, const std::string& descriptor,
-              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const IntegerType* instance_;
 };
@@ -381,17 +381,17 @@
 class BooleanType FINAL : public Cat1Type {
  public:
   bool IsBoolean() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(mirror::Class* klass,
                                            const std::string& descriptor,
                                            uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
  private:
   BooleanType(mirror::Class* klass, const std::string& descriptor,
-              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
 
   static const BooleanType* instance_;
@@ -400,17 +400,17 @@
 class ByteType FINAL : public Cat1Type {
  public:
   bool IsByte() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* CreateInstance(mirror::Class* klass,
                                         const std::string& descriptor,
                                         uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
  private:
   ByteType(mirror::Class* klass, const std::string& descriptor,
-           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ByteType* instance_;
 };
@@ -418,17 +418,17 @@
 class ShortType FINAL : public Cat1Type {
  public:
   bool IsShort() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* CreateInstance(mirror::Class* klass,
                                          const std::string& descriptor,
                                          uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
  private:
   ShortType(mirror::Class* klass, const std::string& descriptor,
-            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ShortType* instance_;
 };
@@ -436,17 +436,17 @@
 class CharType FINAL : public Cat1Type {
  public:
   bool IsChar() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* CreateInstance(mirror::Class* klass,
                                         const std::string& descriptor,
                                         uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
  private:
   CharType(mirror::Class* klass, const std::string& descriptor,
-           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const CharType* instance_;
 };
@@ -454,17 +454,17 @@
 class FloatType FINAL : public Cat1Type {
  public:
   bool IsFloat() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* CreateInstance(mirror::Class* klass,
                                          const std::string& descriptor,
                                          uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
  private:
   FloatType(mirror::Class* klass, const std::string& descriptor,
-            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const FloatType* instance_;
 };
@@ -472,86 +472,86 @@
 class Cat2Type : public PrimitiveType {
  public:
   Cat2Type(mirror::Class* klass, const std::string& descriptor,
-           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class LongLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsLongLo() const OVERRIDE { return true; }
   bool IsLong() const OVERRIDE { return true; }
   static const LongLoType* CreateInstance(mirror::Class* klass,
                                           const std::string& descriptor,
                                           uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
   LongLoType(mirror::Class* klass, const std::string& descriptor,
-             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongLoType* instance_;
 };
 
 class LongHiType FINAL : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsLongHi() const OVERRIDE { return true; }
   static const LongHiType* CreateInstance(mirror::Class* klass,
                                           const std::string& descriptor,
                                           uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
   LongHiType(mirror::Class* klass, const std::string& descriptor,
-             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongHiType* instance_;
 };
 
 class DoubleLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsDoubleLo() const OVERRIDE { return true; }
   bool IsDouble() const OVERRIDE { return true; }
   static const DoubleLoType* CreateInstance(mirror::Class* klass,
                                             const std::string& descriptor,
                                             uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
   DoubleLoType(mirror::Class* klass, const std::string& descriptor,
-               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleLoType* instance_;
 };
 
 class DoubleHiType FINAL : public Cat2Type {
  public:
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   virtual bool IsDoubleHi() const OVERRIDE { return true; }
   static const DoubleHiType* CreateInstance(mirror::Class* klass,
                                       const std::string& descriptor,
                                       uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
   DoubleHiType(mirror::Class* klass, const std::string& descriptor,
-               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleHiType* instance_;
 };
 
 class ConstantType : public RegType {
  public:
-  ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(nullptr, "", cache_id), constant_(constant) {
   }
 
@@ -609,58 +609,58 @@
 class PreciseConstType FINAL : public ConstantType {
  public:
   PreciseConstType(uint32_t constant, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {}
 
   bool IsPreciseConstant() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class PreciseConstLoType FINAL : public ConstantType {
  public:
   PreciseConstLoType(uint32_t constant, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {}
   bool IsPreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class PreciseConstHiType FINAL : public ConstantType {
  public:
   PreciseConstHiType(uint32_t constant, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {}
   bool IsPreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class ImpreciseConstType FINAL : public ConstantType {
  public:
   ImpreciseConstType(uint32_t constat, uint16_t cache_id)
-       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+       SHARED_REQUIRES(Locks::mutator_lock_)
        : ConstantType(constat, cache_id) {
   }
   bool IsImpreciseConstant() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class ImpreciseConstLoType FINAL : public ConstantType {
  public:
   ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {}
   bool IsImpreciseConstantLo() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class ImpreciseConstHiType FINAL : public ConstantType {
  public:
   ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : ConstantType(constant, cache_id) {}
   bool IsImpreciseConstantHi() const OVERRIDE { return true; }
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // Common parent of all uninitialized types. Uninitialized types are created by
@@ -690,14 +690,14 @@
   UninitializedReferenceType(mirror::Class* klass,
                              const std::string& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
 
   bool IsUninitializedReference() const OVERRIDE { return true; }
 
   bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // Similar to UnresolvedReferenceType but not yet having been passed to a
@@ -706,7 +706,7 @@
  public:
   UnresolvedUninitializedRefType(const std::string& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -717,10 +717,10 @@
 
   bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // Similar to UninitializedReferenceType but special case for the this argument
@@ -730,7 +730,7 @@
   UninitializedThisReferenceType(mirror::Class* klass,
                                  const std::string& descriptor,
                                  uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -741,17 +741,17 @@
 
   bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
   UnresolvedUninitializedThisRefType(const std::string& descriptor,
                                      uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, 0, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -762,10 +762,10 @@
 
   bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // A type of register holding a reference to an Object of type GetClass or a
@@ -773,7 +773,7 @@
 class ReferenceType FINAL : public RegType {
  public:
   ReferenceType(mirror::Class* klass, const std::string& descriptor,
-                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
   bool IsReference() const OVERRIDE { return true; }
@@ -782,7 +782,7 @@
 
   bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // A type of register holding a reference to an Object of type GetClass and only
@@ -792,7 +792,7 @@
  public:
   PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
                        uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsPreciseReference() const OVERRIDE { return true; }
 
@@ -800,14 +800,14 @@
 
   bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
   UnresolvedType(const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
   bool IsNonZeroReferenceTypes() const OVERRIDE;
@@ -819,7 +819,7 @@
 class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
   UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -830,10 +830,10 @@
 
   bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
 // Type representing the super-class of an unresolved type.
@@ -841,7 +841,7 @@
  public:
   UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
                        uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType("", cache_id),
         unresolved_child_id_(child_id),
         reg_type_cache_(reg_type_cache) {
@@ -859,10 +859,10 @@
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   const uint16_t unresolved_child_id_;
   const RegTypeCache* const reg_type_cache_;
@@ -875,7 +875,7 @@
  public:
   UnresolvedMergedType(uint16_t left_id, uint16_t right_id,
                        const RegTypeCache* reg_type_cache, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType("", cache_id),
         reg_type_cache_(reg_type_cache),
         merged_types_(left_id, right_id) {
@@ -897,17 +897,17 @@
 
   bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
-  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
   const RegTypeCache* const reg_type_cache_;
   const std::pair<uint16_t, uint16_t> merged_types_;
 };
 
 std::ostream& operator<<(std::ostream& os, const RegType& rhs)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+    SHARED_REQUIRES(Locks::mutator_lock_);
 
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index b371d7e..d656500 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -31,7 +31,7 @@
 const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
 
 static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SHARED_REQUIRES(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
     return true;
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 4b3105c..8319de6 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -42,7 +42,7 @@
  public:
   explicit RegTypeCache(bool can_load_classes);
   ~RegTypeCache();
-  static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
       CHECK_EQ(RegTypeCache::primitive_count_, 0);
       CreatePrimitiveAndSmallConstantTypes();
@@ -53,110 +53,110 @@
   static void ShutDown();
   const art::verifier::RegType& GetFromId(uint16_t id) const;
   const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat1Const(int32_t value, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& FromUnresolvedSuperClass(const RegType& child)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
-  const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) {
     return FromCat1Const(1, true);
   }
   size_t GetCacheSize() {
     return entries_.size();
   }
-  const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *BooleanType::GetInstance();
   }
-  const ByteType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *ByteType::GetInstance();
   }
-  const CharType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *CharType::GetInstance();
   }
-  const ShortType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *ShortType::GetInstance();
   }
-  const IntegerType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *IntegerType::GetInstance();
   }
-  const FloatType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *FloatType::GetInstance();
   }
-  const LongLoType& LongLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *LongLoType::GetInstance();
   }
-  const LongHiType& LongHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *LongHiType::GetInstance();
   }
-  const DoubleLoType& DoubleLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *DoubleLoType::GetInstance();
   }
-  const DoubleHiType& DoubleHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *DoubleHiType::GetInstance();
   }
-  const UndefinedType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) {
     return *UndefinedType::GetInstance();
   }
   const ConflictType& Conflict() {
     return *ConflictType::GetInstance();
   }
 
-  const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_);
+  const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_);
+  const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
+  const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
 
   const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   // Create an uninitialized 'this' argument for the given type.
   const UninitializedType& UninitializedThisArgument(const RegType& type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& FromUninitialized(const RegType& uninit_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+  const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
   const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
 
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   static void VisitStaticRoots(RootVisitor* visitor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void AddEntry(RegType* new_entry);
 
   template <class Type>
   static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 0de0d9c..4fb3a2c 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -60,54 +60,54 @@
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
   void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
   // copies both halves of the register.
   void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Implement "move-result". Copy the category-1 value from the result register to another
   // register, and reset the result register.
   void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Implement "move-result-wide". Copy the category-2 value from the result register to another
   // register, and reset the result register.
   void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the invisible result register to unknown
-  void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
   ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
                                      const RegType& new_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1,
                            const RegType& new_type2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /* Set the type of the "result" register. */
   void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the type of register vsrc.
   const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
 
   ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
                                         const RegType& check_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1,
                               const RegType& check_type2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CopyFromLine(const RegisterLine* src) {
     DCHECK_EQ(num_regs_, src->num_regs_);
@@ -116,7 +116,7 @@
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
   }
 
-  std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_);
 
   void FillWithGarbage() {
     memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -131,7 +131,7 @@
    * the new ones at the same time).
    */
   void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Update all registers holding "uninit_type" to instead hold the corresponding initialized
@@ -140,7 +140,7 @@
    */
   void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
                              uint32_t this_reg, uint32_t dex_pc)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Update all registers to be Conflict except vsrc.
@@ -194,7 +194,7 @@
    */
   const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
                                    bool is_range, bool allow_failure = false)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple two-register instruction (e.g. "neg-int").
@@ -202,22 +202,22 @@
    */
   void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type,
                     const RegType& src_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                         const RegType& dst_type1, const RegType& dst_type2,
                         const RegType& src_type1, const RegType& src_type2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
                           const RegType& dst_type1, const RegType& dst_type2,
                           const RegType& src_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
                             const RegType& dst_type,
                             const RegType& src_type1, const RegType& src_type2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple three-register instruction (e.g. "add-int").
@@ -227,18 +227,18 @@
   void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
                      const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
                      bool check_boolean_op)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                          const RegType& dst_type1, const RegType& dst_type2,
                          const RegType& src_type1_1, const RegType& src_type1_2,
                          const RegType& src_type2_1, const RegType& src_type2_2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& long_lo_type, const RegType& long_hi_type,
                               const RegType& int_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
@@ -248,18 +248,18 @@
                           const RegType& dst_type,
                           const RegType& src_type1, const RegType& src_type2,
                           bool check_boolean_op)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& dst_type1, const RegType& dst_type2,
                               const RegType& src_type1_1, const RegType& src_type1_2,
                               const RegType& src_type2_1, const RegType& src_type2_2)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
                                    const RegType& long_lo_type, const RegType& long_hi_type,
                                    const RegType& int_type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
    * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8").
@@ -270,15 +270,15 @@
   void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
                       const RegType& dst_type, const RegType& src_type,
                       bool check_boolean_op, bool is_lit16)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
   void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
   void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Stack of currently held monitors and where they were locked
   size_t MonitorStackDepth() const {
@@ -290,7 +290,7 @@
   bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
 
   bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const;
 
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 66b9abe..6dd8168 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -38,7 +38,7 @@
   static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
 
   static mirror::Class* ToClass(jclass global_jclass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static jclass com_android_dex_Dex;
   static jclass dalvik_system_DexFile;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index e626e48..767e1de 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -29,10 +29,10 @@
 } while (false);
 
 struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
-  explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
       : CheckReferenceMapVisitor(thread) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (CheckReferenceMapVisitor::VisitFrame()) {
       return true;
     }
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 6b15514..3a5854b 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -29,10 +29,10 @@
 
 class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
  public:
-  explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
       : CheckReferenceMapVisitor(thread) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (CheckReferenceMapVisitor::VisitFrame()) {
       return true;
     }
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 33bdc20..9facfdb 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -28,12 +28,12 @@
 class TestVisitor : public StackVisitor {
  public:
   TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         this_value_(this_value),
         found_method_index_(0) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc
index 7541189..21149f6 100644
--- a/test/455-set-vreg/set_vreg_jni.cc
+++ b/test/455-set-vreg/set_vreg_jni.cc
@@ -28,11 +28,11 @@
 class TestVisitor : public StackVisitor {
  public:
   TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         this_value_(this_value) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 96f0e52..c21168b 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -28,10 +28,10 @@
 class TestVisitor : public StackVisitor {
  public:
   TestVisitor(Thread* thread, Context* context)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 23fe43d..8108c97 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -28,12 +28,12 @@
 class TestVisitor : public StackVisitor {
  public:
   TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
         this_value_(this_value),
         found_method_index_(0) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());
 
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index c4f415b..9b32fc3 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -27,10 +27,10 @@
 
 class TestVisitor : public StackVisitor {
  public:
-  TestVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_)
       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
 
-  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
     std::string m_name(m->GetName());