More SMP fixes.
Convert some ANDROID_MEMBAR_FULL uses into equivalent atomic ops. A
couple of "bool" had to convert to "int" since we don't have atomic
ops for bools.
Replaced a local implementation of atomic inc with a call to the
atomic inc function.
Change-Id: I948b8080d743552bde014d3a6e716ed2c30ebef8
diff --git a/vm/AtomicCache.c b/vm/AtomicCache.c
index bf8639e..a6f48c2 100644
--- a/vm/AtomicCache.c
+++ b/vm/AtomicCache.c
@@ -135,24 +135,25 @@
* while we work. We use memory barriers to ensure that the state
* is always consistent when the version number is even.
*/
- pEntry->version++;
- ANDROID_MEMBAR_FULL();
+ u4 newVersion = (firstVersion | ATOMIC_LOCK_FLAG) + 1;
+ assert((newVersion & 0x01) == 1);
- pEntry->key1 = key1;
+ pEntry->version = newVersion;
+
+ android_atomic_release_store(key1, (int32_t*) &pEntry->key1);
pEntry->key2 = key2;
pEntry->value = value;
- ANDROID_MEMBAR_FULL();
- pEntry->version++;
+ newVersion++;
+ android_atomic_release_store(newVersion, (int32_t*) &pEntry->version);
/*
* Clear the lock flag. Nobody else should have been able to modify
* pEntry->version, so if this fails the world is broken.
*/
- firstVersion += 2;
- assert((firstVersion & 0x01) == 0);
+ assert(newVersion == ((firstVersion + 2) | ATOMIC_LOCK_FLAG));
if (android_atomic_release_cas(
- firstVersion | ATOMIC_LOCK_FLAG, firstVersion,
+ newVersion, newVersion & ~ATOMIC_LOCK_FLAG,
(volatile s4*) &pEntry->version) != 0)
{
//LOGE("unable to reset the instanceof cache ownership\n");
diff --git a/vm/AtomicCache.h b/vm/AtomicCache.h
index 66d222e..686eac5 100644
--- a/vm/AtomicCache.h
+++ b/vm/AtomicCache.h
@@ -95,28 +95,26 @@
#define ATOMIC_CACHE_LOOKUP(_cache, _cacheSize, _key1, _key2) ({ \
AtomicCacheEntry* pEntry; \
int hash; \
- u4 firstVersion; \
+ u4 firstVersion, secondVersion; \
u4 value; \
\
/* simple hash function */ \
hash = (((u4)(_key1) >> 2) ^ (u4)(_key2)) & ((_cacheSize)-1); \
pEntry = (_cache)->entries + hash; \
\
- firstVersion = pEntry->version; \
- ANDROID_MEMBAR_FULL(); \
+ firstVersion = android_atomic_acquire_load((int32_t*)&pEntry->version); \
\
if (pEntry->key1 == (u4)(_key1) && pEntry->key2 == (u4)(_key2)) { \
/* \
* The fields match. Get the value, then read the version a \
* second time to verify that we didn't catch a partial update. \
* We're also hosed if "firstVersion" was odd, indicating that \
- * an update was in progress before we got here. \
+ * an update was in progress before we got here (unlikely). \
*/ \
- value = pEntry->value; \
- ANDROID_MEMBAR_FULL(); \
+ value = android_atomic_acquire_load((int32_t*) &pEntry->value); \
+ secondVersion = pEntry->version; \
\
- if ((firstVersion & 0x01) != 0 || firstVersion != pEntry->version) \
- { \
+ if ((firstVersion & 0x01) != 0 || firstVersion != secondVersion) { \
/* \
* We clashed with another thread. Instead of sitting and \
* spinning, which might not complete if we're a high priority \
diff --git a/vm/Globals.h b/vm/Globals.h
index 90e9e88..954bb5d 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -296,7 +296,7 @@
//int offJavaNioDirectByteBufferImpl_pointer;
/* method pointers - java.security.AccessController */
- volatile bool javaSecurityAccessControllerReady;
+ volatile int javaSecurityAccessControllerReady;
Method* methJavaSecurityAccessController_doPrivileged[4];
/* constructor method pointers; no vtable involved, so use Method* */
diff --git a/vm/Profile.c b/vm/Profile.c
index 57c96df..92d4001 100644
--- a/vm/Profile.c
+++ b/vm/Profile.c
@@ -407,13 +407,11 @@
storeLongLE(state->buf + 8, state->startWhen);
state->curOffset = TRACE_HEADER_LEN;
- ANDROID_MEMBAR_FULL();
-
/*
* Set the "enabled" flag. Once we do this, threads will wait to be
* signaled before exiting, so we have to make sure we wake them up.
*/
- state->traceEnabled = true;
+ android_atomic_release_store(true, &state->traceEnabled);
dvmUnlockMutex(&state->startStopLock);
return;
diff --git a/vm/Profile.h b/vm/Profile.h
index a294f83..afa529d 100644
--- a/vm/Profile.h
+++ b/vm/Profile.h
@@ -53,7 +53,7 @@
int bufferSize;
int flags;
- bool traceEnabled;
+ int traceEnabled;
u1* buf;
volatile int curOffset;
u8 startWhen;
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index 5f54ebe..94e2d1c 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -1490,8 +1490,8 @@
* The update order matters - make sure clazz is updated last since it
* will bring the uninitialized chaining cell to life.
*/
- ANDROID_MEMBAR_FULL();
- cellAddr->clazz = newContent->clazz;
+ android_atomic_release_store((int32_t)newContent->clazz,
+ (void*) &cellAddr->clazz);
cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
UPDATE_CODE_CACHE_PATCHES();
diff --git a/vm/interp/Interp.c b/vm/interp/Interp.c
index ab61882..d3de730 100644
--- a/vm/interp/Interp.c
+++ b/vm/interp/Interp.c
@@ -450,6 +450,14 @@
#ifdef WITH_DEBUGGER
/*
* Get the original opcode from under a breakpoint.
+ *
+ * On SMP hardware it's possible one core might try to execute a breakpoint
+ * after another core has cleared it. We need to handle the case where
+ * there's no entry in the breakpoint set. (The memory barriers in the
+ * locks and in the breakpoint update code should ensure that, once we've
+ * observed the absence of a breakpoint entry, we will also now observe
+ * the restoration of the original opcode. The fact that we're holding
+ * the lock prevents other threads from confusing things further.)
*/
u1 dvmGetOriginalOpCode(const u2* addr)
{
diff --git a/vm/jdwp/JdwpMain.c b/vm/jdwp/JdwpMain.c
index 1688e5e..24e5c6c 100644
--- a/vm/jdwp/JdwpMain.c
+++ b/vm/jdwp/JdwpMain.c
@@ -231,8 +231,7 @@
*/
state->debugThreadHandle = dvmThreadSelf()->handle;
state->run = true;
- ANDROID_MEMBAR_FULL();
- state->debugThreadStarted = true; // touch this last
+ android_atomic_release_store(true, &state->debugThreadStarted);
dvmDbgLockMutex(&state->threadStartLock);
dvmDbgCondBroadcast(&state->threadStartCond);
diff --git a/vm/jdwp/JdwpPriv.h b/vm/jdwp/JdwpPriv.h
index 85f9ec2..bc249f1 100644
--- a/vm/jdwp/JdwpPriv.h
+++ b/vm/jdwp/JdwpPriv.h
@@ -79,7 +79,7 @@
pthread_mutex_t threadStartLock;
pthread_cond_t threadStartCond;
- bool debugThreadStarted;
+ int debugThreadStarted;
pthread_t debugThreadHandle;
ObjectId debugThreadId;
bool run;
diff --git a/vm/native/InternalNative.c b/vm/native/InternalNative.c
index 700c8af..62d4d04 100644
--- a/vm/native/InternalNative.c
+++ b/vm/native/InternalNative.c
@@ -335,7 +335,8 @@
}
/* all good, raise volatile readiness flag */
- gDvm.javaSecurityAccessControllerReady = true;
+ android_atomic_release_store(true,
+ &gDvm.javaSecurityAccessControllerReady);
}
for (i = 0; i < NUM_DOPRIV_FUNCS; i++) {
diff --git a/vm/oo/Class.c b/vm/oo/Class.c
index 1f6e685..e0d002d 100644
--- a/vm/oo/Class.c
+++ b/vm/oo/Class.c
@@ -1123,17 +1123,8 @@
*/
void dvmSetClassSerialNumber(ClassObject* clazz)
{
- u4 oldValue, newValue;
-
assert(clazz->serialNumber == 0);
-
- do {
- oldValue = gDvm.classSerialNumber;
- newValue = oldValue + 1;
- } while (android_atomic_release_cas(oldValue, newValue,
- &gDvm.classSerialNumber) != 0);
-
- clazz->serialNumber = (u4) oldValue;
+ clazz->serialNumber = android_atomic_inc(&gDvm.classSerialNumber);
}