Merge "ART: cleanup exit_block_ in graph if exit block is removed"
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index e041a42..e0380bd 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -789,23 +789,38 @@
 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
                                               ArtMethod* method,
                                               const std::vector<uint32_t>& entries,
-                                              bool retry_allocation) {
-  ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
+                                              bool retry_allocation)
+    // No thread safety analysis as we are using TryLock/Unlock explicitly.
+    NO_THREAD_SAFETY_ANALYSIS {
+  ProfilingInfo* info = nullptr;
+  if (!retry_allocation) {
+    // If we are allocating for the interpreter, just try to lock, to avoid
+    // lock contention with the JIT.
+    if (lock_.ExclusiveTryLock(self)) {
+      info = AddProfilingInfoInternal(self, method, entries);
+      lock_.ExclusiveUnlock(self);
+    }
+  } else {
+    {
+      MutexLock mu(self, lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
 
-  if (info == nullptr && retry_allocation) {
-    GarbageCollectCache(self);
-    info = AddProfilingInfoInternal(self, method, entries);
+    if (info == nullptr) {
+      GarbageCollectCache(self);
+      MutexLock mu(self, lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
   }
   return info;
 }
 
-ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
                                                       ArtMethod* method,
                                                       const std::vector<uint32_t>& entries) {
   size_t profile_info_size = RoundUp(
       sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
       sizeof(void*));
-  MutexLock mu(self, lock_);
 
   // Check whether some other thread has concurrently created it.
   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7b33b92..aa1b139 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -213,7 +213,7 @@
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
-      REQUIRES(!lock_)
+      REQUIRES(lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 67efccd..e05d4f5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -551,6 +551,7 @@
   $(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS)
 
 # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
+# 145: Test sometimes times out in read barrier configuration (b/27467554).
 # 484: Baker's fast path based read barrier compiler instrumentation generates code containing
 #      more parallel moves on x86, thus some Checker assertions may fail.
 # 527: On ARM64, the read barrier instrumentation does not support the HArm64IntermediateAddress
@@ -558,6 +559,7 @@
 # 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are
 #      not yet handled in the read barrier configuration.
 TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
+  145-alloc-tracking-stress \
   484-checker-register-hints \
   527-checker-array-access-split \
   537-checker-arraycopy