resolved conflicts for merge of a5d99892 to dalvik-dev
Change-Id: I3c030a6b19416a2ac3ca709cdbcbc6b41e6379d3
diff --git a/libdex/InstrUtils.c b/libdex/InstrUtils.c
index b98f002..fb0328a 100644
--- a/libdex/InstrUtils.c
+++ b/libdex/InstrUtils.c
@@ -291,6 +291,9 @@
* Optimized instructions. We return negative size values for these
* to distinguish them.
*/
+ case OP_RETURN_VOID_BARRIER:
+ width = -1;
+ break;
case OP_IGET_QUICK:
case OP_IGET_WIDE_QUICK:
case OP_IGET_OBJECT_QUICK:
@@ -333,7 +336,6 @@
case OP_UNUSED_79:
case OP_UNUSED_7A:
case OP_BREAKPOINT:
- case OP_UNUSED_F1:
case OP_UNUSED_FF:
assert(width == 0);
break;
@@ -642,7 +644,6 @@
case OP_SPUT_WIDE_VOLATILE:
flags = kInstrCanContinue | kInstrCanThrow;
break;
-
case OP_INVOKE_VIRTUAL_QUICK:
case OP_INVOKE_VIRTUAL_QUICK_RANGE:
case OP_INVOKE_SUPER_QUICK:
@@ -650,6 +651,9 @@
case OP_INVOKE_DIRECT_EMPTY:
flags = kInstrCanContinue | kInstrCanThrow | kInstrInvoke;
break;
+ case OP_RETURN_VOID_BARRIER:
+ flags = kInstrCanReturn;
+ break;
/* these should never appear when scanning code */
case OP_UNUSED_3E:
@@ -662,7 +666,6 @@
case OP_UNUSED_79:
case OP_UNUSED_7A:
case OP_BREAKPOINT:
- case OP_UNUSED_F1:
case OP_UNUSED_FF:
break;
@@ -1010,6 +1013,9 @@
case OP_INVOKE_DIRECT_EMPTY:
fmt = kFmt35c;
break;
+ case OP_RETURN_VOID_BARRIER:
+ fmt = kFmt10x;
+ break;
/* these should never appear when scanning code */
case OP_UNUSED_3E:
@@ -1022,7 +1028,6 @@
case OP_UNUSED_79:
case OP_UNUSED_7A:
case OP_BREAKPOINT:
- case OP_UNUSED_F1:
case OP_UNUSED_FF:
fmt = kFmtUnknown;
break;
diff --git a/libdex/OpCode.h b/libdex/OpCode.h
index 312ff01..b0886af 100644
--- a/libdex/OpCode.h
+++ b/libdex/OpCode.h
@@ -34,6 +34,7 @@
* - update the instruction info table generators and (if you changed an
* instruction format) instruction decoder in InstrUtils.c
* - update the instruction format list in InstrUtils.h, if necessary
+ * - update the gOpNames table in OpCodeNames.c
* - update the parallel definitions in the class dalvik.bytecode.Opcodes
*
* Interpreter:
@@ -47,12 +48,11 @@
* (see notes in mterp/ReadMe.txt for rebuilding instructions)
*
* Verifier / optimizer:
- * - update some stuff in analysis/DexOptimize.c, analysis/DexVerify.c,
+ * - update some stuff in analysis/Optimize.c, analysis/DexVerify.c,
* and/or analysis/CodeVerify.c as needed
* - verify by running with verifier enabled (it's on by default)
*
* Tools:
- * - update the OpCodeNames table in dexdump/OpCodeNames.c
* - update dexdump/DexDump.c if an instruction format has changed
*
* Note: The Dalvik VM tests (in the tests subdirectory) provide a convenient
@@ -347,7 +347,7 @@
OP_EXECUTE_INLINE_RANGE = 0xef,
OP_INVOKE_DIRECT_EMPTY = 0xf0,
- OP_UNUSED_F1 = 0xf1, /* OP_INVOKE_DIRECT_EMPTY_RANGE? */
+ OP_RETURN_VOID_BARRIER = 0xf1,
OP_IGET_QUICK = 0xf2,
OP_IGET_WIDE_QUICK = 0xf3,
OP_IGET_OBJECT_QUICK = 0xf4,
@@ -644,7 +644,7 @@
H(OP_EXECUTE_INLINE_RANGE), \
/* f0..ff */ \
H(OP_INVOKE_DIRECT_EMPTY), \
- H(OP_UNUSED_F1), \
+ H(OP_RETURN_VOID_BARRIER), \
H(OP_IGET_QUICK), \
H(OP_IGET_WIDE_QUICK), \
H(OP_IGET_OBJECT_QUICK), \
diff --git a/libdex/OpCodeNames.c b/libdex/OpCodeNames.c
index c182d4e..fca249f 100644
--- a/libdex/OpCodeNames.c
+++ b/libdex/OpCodeNames.c
@@ -306,7 +306,7 @@
/* 0xf0 */
"+invoke-direct-empty",
- "UNUSED",
+ "+return-void-barrier",
"+iget-quick",
"+iget-wide-quick",
"+iget-object-quick",
diff --git a/tests/046-reflect/expected.txt b/tests/046-reflect/expected.txt
index 3be8d1c..55b0eca 100644
--- a/tests/046-reflect/expected.txt
+++ b/tests/046-reflect/expected.txt
@@ -82,8 +82,8 @@
Access flags are 0x11
cantTouchThis is 77
got expected set-final failure
-
cantTouchThis is now 77
+ cantTouchThis is now 88
cons modifiers=1
SuperTarget constructor ()V
Target constructor (IF)V : ii=7 ff=3.3333
diff --git a/tests/046-reflect/src/Main.java b/tests/046-reflect/src/Main.java
index 399a417..e604979 100644
--- a/tests/046-reflect/src/Main.java
+++ b/tests/046-reflect/src/Main.java
@@ -272,13 +272,19 @@
System.out.println(" cantTouchThis is " + intVal);
try {
field.setInt(instance, 99);
- System.out.println("ERROR: set-final succeeded\n");
+ System.out.println("ERROR: set-final succeeded");
} catch (IllegalAccessException iae) {
- System.out.println(" got expected set-final failure\n");
+ System.out.println(" got expected set-final failure");
}
intVal = field.getInt(instance);
System.out.println(" cantTouchThis is now " + intVal);
+ field.setAccessible(true);
+ field.setInt(instance, 87); // exercise int version
+ field.set(instance, 88); // exercise Object version
+ intVal = field.getInt(instance);
+ System.out.println(" cantTouchThis is now " + intVal);
+
Constructor<Target> cons;
Target targ;
Object[] args;
diff --git a/tests/084-old-style-inner-class/build b/tests/085-old-style-inner-class/build
similarity index 100%
rename from tests/084-old-style-inner-class/build
rename to tests/085-old-style-inner-class/build
diff --git a/tests/084-old-style-inner-class/expected.txt b/tests/085-old-style-inner-class/expected.txt
similarity index 100%
rename from tests/084-old-style-inner-class/expected.txt
rename to tests/085-old-style-inner-class/expected.txt
diff --git a/tests/084-old-style-inner-class/info.txt b/tests/085-old-style-inner-class/info.txt
similarity index 100%
rename from tests/084-old-style-inner-class/info.txt
rename to tests/085-old-style-inner-class/info.txt
diff --git a/tests/084-old-style-inner-class/src/Main.java b/tests/085-old-style-inner-class/src/Main.java
similarity index 100%
rename from tests/084-old-style-inner-class/src/Main.java
rename to tests/085-old-style-inner-class/src/Main.java
diff --git a/vm/Android.mk b/vm/Android.mk
index 98f004c..9b1e5a2 100644
--- a/vm/Android.mk
+++ b/vm/Android.mk
@@ -77,7 +77,7 @@
LOCAL_MODULE := libdvm_sv
include $(BUILD_SHARED_LIBRARY)
- # Devivation #3
+ # Derivation #3
# Compile out the JIT
WITH_JIT := false
include $(LOCAL_PATH)/ReconfigureDvm.mk
@@ -86,6 +86,13 @@
LOCAL_MODULE := libdvm_interp
include $(BUILD_SHARED_LIBRARY)
+ # Derivation #4
+ WITH_JIT := true
+ include $(LOCAL_PATH)/ReconfigureDvm.mk
+
+ LOCAL_CFLAGS += $(target_smp_flag) -DWITH_INLINE_PROFILING
+ LOCAL_MODULE := libdvm_traceview
+ include $(BUILD_SHARED_LIBRARY)
endif
#
@@ -103,6 +110,7 @@
dvm_arch_variant := $(HOST_ARCH)
dvm_simulator := false
+ WITH_JIT := false
include $(LOCAL_PATH)/Dvm.mk
LOCAL_SHARED_LIBRARIES += libcrypto libssl libicuuc libicui18n
diff --git a/vm/CheckJni.c b/vm/CheckJni.c
index 59d34eb..e111187 100644
--- a/vm/CheckJni.c
+++ b/vm/CheckJni.c
@@ -184,8 +184,6 @@
#define BASE_ENV(_env) (((JNIEnvExt*)_env)->baseFuncTable)
#define BASE_VM(_vm) (((JavaVMExt*)_vm)->baseFuncTable)
-#define kRedundantDirectBufferTest false
-
/*
* Flags passed into checkThread().
*/
@@ -2374,59 +2372,6 @@
CHECK_OBJECT(env, buf);
void* result = BASE_ENV(env)->GetDirectBufferAddress(env, buf);
CHECK_EXIT(env);
-
- /* optional - check result vs. "safe" implementation */
- if (kRedundantDirectBufferTest) {
- jobject platformAddr = NULL;
- void* checkResult = NULL;
-
- /*
- * Start by determining if the object supports the DirectBuffer
- * interfaces. Note this does not guarantee that it's a direct buffer.
- */
- if (JNI_FALSE == (*env)->IsInstanceOf(env, buf,
- gDvm.jclassOrgApacheHarmonyNioInternalDirectBuffer))
- {
- goto bail;
- }
-
- /*
- * Get the PlatformAddress object.
- *
- * If this isn't a direct buffer, platformAddr will be NULL and/or an
- * exception will have been thrown.
- */
- platformAddr = (*env)->CallObjectMethod(env, buf,
- (jmethodID) gDvm.methOrgApacheHarmonyNioInternalDirectBuffer_getEffectiveAddress);
-
- if ((*env)->ExceptionCheck(env)) {
- (*env)->ExceptionClear(env);
- platformAddr = NULL;
- }
- if (platformAddr == NULL) {
- LOGV("Got request for address of non-direct buffer\n");
- goto bail;
- }
-
- jclass platformAddrClass = (*env)->FindClass(env,
- "org/apache/harmony/luni/platform/PlatformAddress");
- jmethodID toLongMethod = (*env)->GetMethodID(env, platformAddrClass,
- "toLong", "()J");
- checkResult = (void*)(u4)(*env)->CallLongMethod(env, platformAddr,
- toLongMethod);
-
- bail:
- if (platformAddr != NULL)
- (*env)->DeleteLocalRef(env, platformAddr);
-
- if (result != checkResult) {
- LOGW("JNI WARNING: direct buffer result mismatch (%p vs %p)\n",
- result, checkResult);
- abortMaybe();
- /* keep going */
- }
- }
-
return result;
}
diff --git a/vm/Ddm.c b/vm/Ddm.c
index 774ed3f..3cd3a80 100644
--- a/vm/Ddm.c
+++ b/vm/Ddm.c
@@ -364,95 +364,6 @@
}
/*
- * Get some per-thread stats.
- *
- * This is currently generated by opening the appropriate "stat" file
- * in /proc and reading the pile of stuff that comes out.
- */
-static bool getThreadStats(pid_t pid, pid_t tid, unsigned long* pUtime,
- unsigned long* pStime)
-{
- /*
- int pid;
- char comm[128];
- char state;
- int ppid, pgrp, session, tty_nr, tpgid;
- unsigned long flags, minflt, cminflt, majflt, cmajflt, utime, stime;
- long cutime, cstime, priority, nice, zero, itrealvalue;
- unsigned long starttime, vsize;
- long rss;
- unsigned long rlim, startcode, endcode, startstack, kstkesp, kstkeip;
- unsigned long signal, blocked, sigignore, sigcatch, wchan, nswap, cnswap;
- int exit_signal, processor;
- unsigned long rt_priority, policy;
-
- scanf("%d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld "
- "%ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
- "%lu %lu %lu %d %d %lu %lu",
- &pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid,
- &flags, &minflt, &cminflt, &majflt, &cmajflt, &utime, &stime,
- &cutime, &cstime, &priority, &nice, &zero, &itrealvalue,
- &starttime, &vsize, &rss, &rlim, &startcode, &endcode,
- &startstack, &kstkesp, &kstkeip, &signal, &blocked, &sigignore,
- &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor,
- &rt_priority, &policy);
- */
-
- char nameBuf[64];
- int i, fd;
-
- /*
- * Open and read the appropriate file. This is expected to work on
- * Linux but will fail on other platforms (e.g. Mac sim).
- */
- sprintf(nameBuf, "/proc/%d/task/%d/stat", (int) pid, (int) tid);
- fd = open(nameBuf, O_RDONLY);
- if (fd < 0) {
- LOGV("Unable to open '%s': %s\n", nameBuf, strerror(errno));
- return false;
- }
-
- char lineBuf[512]; // > 2x typical
- int cc;
- cc = read(fd, lineBuf, sizeof(lineBuf)-1);
- if (cc <= 0) {
- const char* msg = (cc == 0) ? "unexpected EOF" : strerror(errno);
- LOGI("Unable to read '%s': %s\n", nameBuf, msg);
- close(fd);
- return false;
- }
- lineBuf[cc] = '\0';
-
- /*
- * Skip whitespace-separated tokens.
- */
- static const char* kWhitespace = " ";
- char* cp = lineBuf;
- for (i = 0; i < 13; i++) {
- cp += strcspn(cp, kWhitespace); // skip token
- cp += strspn(cp, kWhitespace); // skip whitespace
- }
-
- /*
- * Grab the values we want.
- */
- char* endp;
- *pUtime = strtoul(cp, &endp, 10);
- if (endp == cp)
- LOGI("Warning: strtoul failed on utime ('%.30s...')\n", cp);
-
- cp += strcspn(cp, kWhitespace);
- cp += strspn(cp, kWhitespace);
-
- *pStime = strtoul(cp, &endp, 10);
- if (endp == cp)
- LOGI("Warning: strtoul failed on stime ('%.30s...')\n", cp);
-
- close(fd);
- return true;
-}
-
-/*
* Generate the contents of a THST chunk. The data encompasses all known
* threads.
*
@@ -502,14 +413,13 @@
set2BE(buf+2, (u2) threadCount);
buf += kHeaderLen;
- pid_t pid = getpid();
for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
- unsigned long utime, stime;
bool isDaemon = false;
- if (!getThreadStats(pid, thread->systemTid, &utime, &stime)) {
- // failed; drop in empty values
- utime = stime = 0;
+ ProcStatData procStatData;
+ if (!dvmGetThreadStats(&procStatData, thread->systemTid)) {
+ /* failed; show zero */
+ memset(&procStatData, 0, sizeof(procStatData));
}
Object* threadObj = thread->threadObj;
@@ -521,8 +431,8 @@
set4BE(buf+0, thread->threadId);
set1(buf+4, thread->status);
set4BE(buf+5, thread->systemTid);
- set4BE(buf+9, utime);
- set4BE(buf+13, stime);
+ set4BE(buf+9, procStatData.utime);
+ set4BE(buf+13, procStatData.stime);
set1(buf+17, isDaemon);
buf += kBytesPerEntry;
diff --git a/vm/Globals.h b/vm/Globals.h
index 6ba6d5a..3c98872 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -227,8 +227,6 @@
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationFactory;
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMember;
ClassObject* classOrgApacheHarmonyLangAnnotationAnnotationMemberArray;
- ClassObject* classOrgApacheHarmonyNioInternalDirectBuffer;
- jclass jclassOrgApacheHarmonyNioInternalDirectBuffer;
/* synthetic classes for arrays of primitives */
ClassObject* classArrayBoolean;
@@ -328,12 +326,8 @@
/* assorted direct buffer helpers */
Method* methJavaNioReadWriteDirectByteBuffer_init;
- Method* methOrgApacheHarmonyLuniPlatformPlatformAddress_on;
- Method* methOrgApacheHarmonyNioInternalDirectBuffer_getEffectiveAddress;
int offJavaNioBuffer_capacity;
int offJavaNioBuffer_effectiveDirectAddress;
- int offOrgApacheHarmonyLuniPlatformPlatformAddress_osaddr;
- int voffOrgApacheHarmonyLuniPlatformPlatformAddress_toLong;
/*
* VM-synthesized primitive classes, for arrays.
diff --git a/vm/Init.c b/vm/Init.c
index 72f6218..0c7f736 100644
--- a/vm/Init.c
+++ b/vm/Init.c
@@ -190,6 +190,9 @@
#if ANDROID_SMP != 0
" smp"
#endif
+#ifdef WITH_INLINE_PROFILING
+ " inline_profiling"
+#endif
);
#ifdef DVM_SHOW_EXCEPTION
dvmFprintf(stderr, " show_exception=%d", DVM_SHOW_EXCEPTION);
diff --git a/vm/Jni.c b/vm/Jni.c
index ef0749a..71be49c 100644
--- a/vm/Jni.c
+++ b/vm/Jni.c
@@ -342,78 +342,26 @@
* Look up and cache pointers to some direct buffer classes, fields,
* and methods.
*/
- ClassObject* platformAddressClass =
- dvmFindSystemClassNoInit("Lorg/apache/harmony/luni/platform/PlatformAddress;");
- ClassObject* platformAddressFactoryClass =
- dvmFindSystemClassNoInit("Lorg/apache/harmony/luni/platform/PlatformAddressFactory;");
- ClassObject* directBufferClass =
- dvmFindSystemClassNoInit("Lorg/apache/harmony/nio/internal/DirectBuffer;");
ClassObject* readWriteBufferClass =
dvmFindSystemClassNoInit("Ljava/nio/ReadWriteDirectByteBuffer;");
ClassObject* bufferClass =
dvmFindSystemClassNoInit("Ljava/nio/Buffer;");
- if (platformAddressClass == NULL || platformAddressFactoryClass == NULL ||
- directBufferClass == NULL || readWriteBufferClass == NULL ||
- bufferClass == NULL)
- {
+ if (readWriteBufferClass == NULL || bufferClass == NULL) {
LOGE("Unable to find internal direct buffer classes\n");
return false;
}
gDvm.classJavaNioReadWriteDirectByteBuffer = readWriteBufferClass;
- gDvm.classOrgApacheHarmonyNioInternalDirectBuffer = directBufferClass;
- /* need a global reference for extended CheckJNI tests */
- gDvm.jclassOrgApacheHarmonyNioInternalDirectBuffer =
- addGlobalReference((Object*) directBufferClass);
-
- /*
- * We need a Method* here rather than a vtable offset, because
- * DirectBuffer is an interface class.
- */
- meth = dvmFindVirtualMethodByDescriptor(
- gDvm.classOrgApacheHarmonyNioInternalDirectBuffer,
- "getEffectiveAddress",
- "()Lorg/apache/harmony/luni/platform/PlatformAddress;");
- if (meth == NULL) {
- LOGE("Unable to find PlatformAddress.getEffectiveAddress\n");
- return false;
- }
- gDvm.methOrgApacheHarmonyNioInternalDirectBuffer_getEffectiveAddress = meth;
-
- meth = dvmFindVirtualMethodByDescriptor(platformAddressClass,
- "toLong", "()J");
- if (meth == NULL) {
- LOGE("Unable to find PlatformAddress.toLong\n");
- return false;
- }
- gDvm.voffOrgApacheHarmonyLuniPlatformPlatformAddress_toLong =
- meth->methodIndex;
-
- meth = dvmFindDirectMethodByDescriptor(platformAddressFactoryClass,
- "on",
- "(I)Lorg/apache/harmony/luni/platform/PlatformAddress;");
- if (meth == NULL) {
- LOGE("Unable to find PlatformAddressFactory.on\n");
- return false;
- }
- gDvm.methOrgApacheHarmonyLuniPlatformPlatformAddress_on = meth;
meth = dvmFindDirectMethodByDescriptor(readWriteBufferClass,
"<init>",
- "(Lorg/apache/harmony/luni/platform/PlatformAddress;II)V");
+ "(II)V");
if (meth == NULL) {
LOGE("Unable to find ReadWriteDirectByteBuffer.<init>\n");
return false;
}
gDvm.methJavaNioReadWriteDirectByteBuffer_init = meth;
- gDvm.offOrgApacheHarmonyLuniPlatformPlatformAddress_osaddr =
- dvmFindFieldOffset(platformAddressClass, "osaddr", "I");
- if (gDvm.offOrgApacheHarmonyLuniPlatformPlatformAddress_osaddr < 0) {
- LOGE("Unable to find PlatformAddress.osaddr\n");
- return false;
- }
-
gDvm.offJavaNioBuffer_capacity =
dvmFindFieldOffset(bufferClass, "capacity", "I");
if (gDvm.offJavaNioBuffer_capacity < 0) {
@@ -3640,37 +3588,19 @@
JNI_ENTER();
Thread* self = _self /*dvmThreadSelf()*/;
- Object* platformAddress = NULL;
JValue callResult;
jobject result = NULL;
- ClassObject* tmpClazz;
-
- tmpClazz = gDvm.methOrgApacheHarmonyLuniPlatformPlatformAddress_on->clazz;
- if (!dvmIsClassInitialized(tmpClazz) && !dvmInitClass(tmpClazz))
- goto bail;
-
- /* get an instance of PlatformAddress that wraps the provided address */
- dvmCallMethod(self,
- gDvm.methOrgApacheHarmonyLuniPlatformPlatformAddress_on,
- NULL, &callResult, address);
- if (dvmGetException(self) != NULL || callResult.l == NULL)
- goto bail;
-
- /* don't let the GC discard it */
- platformAddress = (Object*) callResult.l;
- dvmAddTrackedAlloc(platformAddress, self);
- LOGV("tracking %p for address=%p\n", platformAddress, address);
+ ClassObject* bufferClazz = gDvm.classJavaNioReadWriteDirectByteBuffer;
/* create an instance of java.nio.ReadWriteDirectByteBuffer */
- tmpClazz = gDvm.classJavaNioReadWriteDirectByteBuffer;
- if (!dvmIsClassInitialized(tmpClazz) && !dvmInitClass(tmpClazz))
+ if (!dvmIsClassInitialized(bufferClazz) && !dvmInitClass(bufferClazz))
goto bail;
- Object* newObj = dvmAllocObject(tmpClazz, ALLOC_DONT_TRACK);
+ Object* newObj = dvmAllocObject(bufferClazz, ALLOC_DONT_TRACK);
if (newObj != NULL) {
- /* call the (PlatformAddress, int, int) constructor */
+ /* call the constructor */
result = addLocalReference(env, newObj);
dvmCallMethod(self, gDvm.methJavaNioReadWriteDirectByteBuffer_init,
- newObj, &callResult, platformAddress, (jint) capacity, (jint) 0);
+ newObj, &callResult, (jint) address, (jint) capacity);
if (dvmGetException(self) != NULL) {
deleteLocalReference(env, result);
result = NULL;
@@ -3679,8 +3609,6 @@
}
bail:
- if (platformAddress != NULL)
- dvmReleaseTrackedAlloc(platformAddress, self);
JNI_EXIT();
return result;
}
@@ -3695,65 +3623,13 @@
JNI_ENTER();
Object* bufObj = dvmDecodeIndirectRef(env, jbuf);
- Thread* self = _self /*dvmThreadSelf()*/;
- void* result;
/*
- * All Buffer objects have an effectiveDirectAddress field. If it's
- * nonzero, we can just return that value. If not, we have to call
- * through DirectBuffer.getEffectiveAddress(), which as a side-effect
- * will set the effectiveDirectAddress field for direct buffers (and
- * things that wrap direct buffers).
+ * All Buffer objects have an effectiveDirectAddress field.
*/
- result = (void*) dvmGetFieldInt(bufObj,
+ void* result = (void*) dvmGetFieldInt(bufObj,
gDvm.offJavaNioBuffer_effectiveDirectAddress);
- if (result != NULL) {
- //LOGI("fast path for %p\n", buf);
- goto bail;
- }
- /*
- * Start by determining if the object supports the DirectBuffer
- * interfaces. Note this does not guarantee that it's a direct buffer.
- */
- if (!dvmInstanceof(bufObj->clazz,
- gDvm.classOrgApacheHarmonyNioInternalDirectBuffer))
- {
- goto bail;
- }
-
- /*
- * Get a PlatformAddress object with the effective address.
- *
- * If this isn't a direct buffer, the result will be NULL and/or an
- * exception will have been thrown.
- */
- JValue callResult;
- const Method* meth = dvmGetVirtualizedMethod(bufObj->clazz,
- gDvm.methOrgApacheHarmonyNioInternalDirectBuffer_getEffectiveAddress);
- dvmCallMethodA(self, meth, bufObj, false, &callResult, NULL);
- if (dvmGetException(self) != NULL) {
- dvmClearException(self);
- callResult.l = NULL;
- }
-
- Object* platformAddr = callResult.l;
- if (platformAddr == NULL) {
- LOGV("Got request for address of non-direct buffer\n");
- goto bail;
- }
-
- /*
- * Extract the address from the PlatformAddress object. Instead of
- * calling the toLong() method, just grab the field directly. This
- * is faster but more fragile.
- */
- result = (void*) dvmGetFieldInt(platformAddr,
- gDvm.offOrgApacheHarmonyLuniPlatformPlatformAddress_osaddr);
-
- //LOGI("slow path for %p --> %p\n", buf, result);
-
-bail:
JNI_EXIT();
return result;
}
diff --git a/vm/Misc.c b/vm/Misc.c
index 93bc7f4..c8736bb 100644
--- a/vm/Misc.c
+++ b/vm/Misc.c
@@ -716,6 +716,122 @@
return base;
}
+/*
+ * Get some per-thread stats.
+ *
+ * This is currently generated by opening the appropriate "stat" file
+ * in /proc and reading the pile of stuff that comes out.
+ */
+bool dvmGetThreadStats(ProcStatData* pData, pid_t tid)
+{
+ /*
+ int pid;
+ char comm[128];
+ char state;
+ int ppid, pgrp, session, tty_nr, tpgid;
+ unsigned long flags, minflt, cminflt, majflt, cmajflt, utime, stime;
+ long cutime, cstime, priority, nice, zero, itrealvalue;
+ unsigned long starttime, vsize;
+ long rss;
+ unsigned long rlim, startcode, endcode, startstack, kstkesp, kstkeip;
+ unsigned long signal, blocked, sigignore, sigcatch, wchan, nswap, cnswap;
+ int exit_signal, processor;
+ unsigned long rt_priority, policy;
+
+ scanf("%d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld "
+ "%ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
+ "%lu %lu %lu %d %d %lu %lu",
+ &pid, comm, &state, &ppid, &pgrp, &session, &tty_nr, &tpgid,
+ &flags, &minflt, &cminflt, &majflt, &cmajflt, &utime, &stime,
+ &cutime, &cstime, &priority, &nice, &zero, &itrealvalue,
+ &starttime, &vsize, &rss, &rlim, &startcode, &endcode,
+ &startstack, &kstkesp, &kstkeip, &signal, &blocked, &sigignore,
+ &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor,
+ &rt_priority, &policy);
+
+ (new: delayacct_blkio_ticks %llu (since Linux 2.6.18))
+ */
+
+ char nameBuf[64];
+ int i, fd;
+
+ /*
+ * Open and read the appropriate file. This is expected to work on
+ * Linux but will fail on other platforms (e.g. Mac sim).
+ */
+ sprintf(nameBuf, "/proc/self/task/%d/stat", (int) tid);
+ fd = open(nameBuf, O_RDONLY);
+ if (fd < 0) {
+ LOGV("Unable to open '%s': %s\n", nameBuf, strerror(errno));
+ return false;
+ }
+
+ char lineBuf[512]; /* > 2x typical */
+ int cc = read(fd, lineBuf, sizeof(lineBuf)-1);
+ if (cc <= 0) {
+ const char* msg = (cc == 0) ? "unexpected EOF" : strerror(errno);
+ LOGI("Unable to read '%s': %s\n", nameBuf, msg);
+ close(fd);
+ return false;
+ }
+ close(fd);
+ lineBuf[cc] = '\0';
+
+ /*
+ * Skip whitespace-separated tokens. For the most part we can assume
+ * that tokens do not contain spaces, and are separated by exactly one
+ * space character. The only exception is the second field ("comm")
+ * which may contain spaces but is surrounded by parenthesis.
+ */
+ char* cp = strchr(lineBuf, ')');
+ if (cp == NULL)
+ goto parse_fail;
+ cp++;
+ for (i = 2; i < 13; i++) {
+ cp = strchr(cp+1, ' ');
+ if (cp == NULL)
+ goto parse_fail;
+ }
+
+ /*
+ * Grab utime/stime.
+ */
+ char* endp;
+ pData->utime = strtoul(cp+1, &endp, 10);
+ if (endp == cp+1)
+ LOGI("Warning: strtoul failed on utime ('%.30s...')\n", cp);
+
+ cp = strchr(cp+1, ' ');
+ if (cp == NULL)
+ goto parse_fail;
+
+ pData->stime = strtoul(cp+1, &endp, 10);
+ if (endp == cp+1)
+ LOGI("Warning: strtoul failed on stime ('%.30s...')\n", cp);
+
+ /*
+ * Skip more stuff we don't care about.
+ */
+ for (i = 14; i < 38; i++) {
+ cp = strchr(cp+1, ' ');
+ if (cp == NULL)
+ goto parse_fail;
+ }
+
+ /*
+ * Grab processor number.
+ */
+ pData->processor = strtol(cp+1, &endp, 10);
+ if (endp == cp+1)
+ LOGI("Warning: strtoul failed on processor ('%.30s...')\n", cp);
+
+ return true;
+
+parse_fail:
+ LOGI("stat parse failed (%s)\n", lineBuf);
+ return false;
+}
+
/* documented in header file */
const char* dvmPathToAbsolutePortion(const char* path) {
if (path == NULL) {
diff --git a/vm/Misc.h b/vm/Misc.h
index fd3c531..ec364d7 100644
--- a/vm/Misc.h
+++ b/vm/Misc.h
@@ -307,6 +307,16 @@
void *dvmAllocRegion(size_t size, int prot, const char *name);
/*
+ * Get some per-thread stats from /proc/self/task/N/stat.
+ */
+typedef struct {
+ unsigned long utime; /* number of jiffies scheduled in user mode */
+ unsigned long stime; /* number of jiffies scheduled in kernel mode */
+ int processor; /* number of CPU that last executed thread */
+} ProcStatData;
+bool dvmGetThreadStats(ProcStatData* pData, pid_t tid);
+
+/*
* Returns the pointer to the "absolute path" part of the given path
* string, treating first (if any) instance of "/./" as a sentinel
* indicating the start of the absolute path. If the path isn't absolute
diff --git a/vm/Profile.c b/vm/Profile.c
index d5dcc36..957ef0d 100644
--- a/vm/Profile.c
+++ b/vm/Profile.c
@@ -665,7 +665,6 @@
dvmUnlockMutex(&state->startStopLock);
}
-
/*
* We just did something with a method. Emit a record.
*
@@ -727,6 +726,48 @@
*ptr++ = (u1) (clockDiff >> 24);
}
+#if defined(WITH_INLINE_PROFILING)
+#include <interp/InterpDefs.h>
+
+/*
+ * Register the METHOD_TRACE_ENTER action for the fast interpreter and
+ * JIT'ed code.
+ */
+void dvmFastMethodTraceEnter(const Method* method,
+ const struct InterpState* interpState)
+{
+ if (gDvm.activeProfilers) {
+ dvmMethodTraceAdd(interpState->self, method, METHOD_TRACE_ENTER);
+ }
+}
+
+/*
+ * Register the METHOD_TRACE_EXIT action for the fast interpreter and
+ * JIT'ed code for Java methods. The about-to-return callee method can be
+ * retrieved from interpState->method.
+ */
+void dvmFastJavaMethodTraceExit(const struct InterpState* interpState)
+{
+ if (gDvm.activeProfilers) {
+ dvmMethodTraceAdd(interpState->self, interpState->method,
+ METHOD_TRACE_EXIT);
+ }
+}
+
+/*
+ * Register the METHOD_TRACE_EXIT action for the fast interpreter and
+ * JIT'ed code for JNI methods. The about-to-return JNI callee method is passed
+ * in explicitly.
+ */
+void dvmFastNativeMethodTraceExit(const Method* method,
+ const struct InterpState* interpState)
+{
+ if (gDvm.activeProfilers) {
+ dvmMethodTraceAdd(interpState->self, method, METHOD_TRACE_EXIT);
+ }
+}
+#endif
+
/*
* We just did something with a method. Emit a record by setting a value
* in a magic memory location.
@@ -858,6 +899,9 @@
*/
void dvmStartInstructionCounting()
{
+#if defined(WITH_INLINE_PROFILING)
+ LOGW("Instruction counting not supported with inline profiling");
+#endif
updateActiveProfilers(1);
/* in theory we should make this an atomic inc; in practice not important */
gDvm.instructionCountEnableCount++;
diff --git a/vm/Profile.h b/vm/Profile.h
index 08bbf61..e2be2e0 100644
--- a/vm/Profile.h
+++ b/vm/Profile.h
@@ -163,6 +163,15 @@
void dvmMethodTraceClassPrepBegin(void);
void dvmMethodTraceClassPrepEnd(void);
+#if defined(WITH_INLINE_PROFILING)
+struct InterpState; // extern
+void dvmFastMethodTraceEnter(const Method* method,
+ const struct InterpState* interpState);
+void dvmFastJavaMethodTraceExit(const struct InterpState* interpState);
+void dvmFastNativeMethodTraceExit(const Method*method,
+ const struct InterpState* interpState);
+#endif
+
/*
* Start/stop alloc counting.
*/
diff --git a/vm/Thread.c b/vm/Thread.c
index f27cc75..50ecdc0 100644
--- a/vm/Thread.c
+++ b/vm/Thread.c
@@ -3505,7 +3505,6 @@
int policy; // pthread policy
struct sched_param sp; // pthread scheduling parameters
char schedstatBuf[64]; // contents of /proc/[pid]/task/[tid]/schedstat
- int schedstatFd;
/*
* Get the java.lang.Thread object. This function gets called from
@@ -3580,19 +3579,33 @@
thread->systemTid, getpriority(PRIO_PROCESS, thread->systemTid),
policy, sp.sched_priority, schedulerGroupBuf, (int)thread->handle);
- snprintf(schedstatBuf, sizeof(schedstatBuf), "/proc/%d/task/%d/schedstat",
- getpid(), thread->systemTid);
- schedstatFd = open(schedstatBuf, O_RDONLY);
+ /* get some bits from /proc/self/stat */
+ ProcStatData procStatData;
+ if (!dvmGetThreadStats(&procStatData, thread->systemTid)) {
+ /* failed, use zeroed values */
+ memset(&procStatData, 0, sizeof(procStatData));
+ }
+
+ /* grab the scheduler stats for this thread */
+ snprintf(schedstatBuf, sizeof(schedstatBuf), "/proc/self/task/%d/schedstat",
+ thread->systemTid);
+ int schedstatFd = open(schedstatBuf, O_RDONLY);
+ strcpy(schedstatBuf, "0 0 0"); /* show this if open/read fails */
if (schedstatFd >= 0) {
- int bytes;
+ ssize_t bytes;
bytes = read(schedstatFd, schedstatBuf, sizeof(schedstatBuf) - 1);
close(schedstatFd);
- if (bytes > 1) {
- schedstatBuf[bytes-1] = 0; // trailing newline
- dvmPrintDebugMessage(target, " | schedstat=( %s )\n", schedstatBuf);
+ if (bytes >= 1) {
+ schedstatBuf[bytes-1] = '\0'; /* remove trailing newline */
}
}
+ /* show what we got */
+ dvmPrintDebugMessage(target,
+ " | schedstat=( %s ) utm=%lu stm=%lu core=%d\n",
+ schedstatBuf, procStatData.utime, procStatData.stime,
+ procStatData.processor);
+
#ifdef WITH_MONITOR_TRACKING
if (!isRunning) {
LockedObjectData* lod = thread->pLockedObjects;
diff --git a/vm/alloc/Copying.c b/vm/alloc/Copying.c
index 940d9f5..01c238a 100644
--- a/vm/alloc/Copying.c
+++ b/vm/alloc/Copying.c
@@ -2154,7 +2154,6 @@
scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationFactory);
scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMember);
scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyLangAnnotationAnnotationMemberArray);
- scavengeReference((Object **)(void *)&gDvm.classOrgApacheHarmonyNioInternalDirectBuffer);
scavengeReference((Object **)(void *)&gDvm.classArrayBoolean);
scavengeReference((Object **)(void *)&gDvm.classArrayChar);
scavengeReference((Object **)(void *)&gDvm.classArrayFloat);
diff --git a/vm/analysis/CodeVerify.c b/vm/analysis/CodeVerify.c
index 3c7be11..042b4c0 100644
--- a/vm/analysis/CodeVerify.c
+++ b/vm/analysis/CodeVerify.c
@@ -5423,6 +5423,7 @@
case OP_INVOKE_VIRTUAL_QUICK_RANGE:
case OP_INVOKE_SUPER_QUICK:
case OP_INVOKE_SUPER_QUICK_RANGE:
+ case OP_RETURN_VOID_BARRIER:
failure = VERIFY_ERROR_GENERIC;
break;
@@ -5437,7 +5438,6 @@
case OP_UNUSED_79:
case OP_UNUSED_7A:
case OP_BREAKPOINT:
- case OP_UNUSED_F1:
case OP_UNUSED_FF:
failure = VERIFY_ERROR_GENERIC;
break;
diff --git a/vm/analysis/DexPrepare.c b/vm/analysis/DexPrepare.c
index 747672d..99aeabe 100644
--- a/vm/analysis/DexPrepare.c
+++ b/vm/analysis/DexPrepare.c
@@ -925,30 +925,38 @@
const char* classDescriptor;
bool verified = false;
+ if (clazz->pDvmDex->pDexFile != pDexFile) {
+ /*
+ * The current DEX file defined a class that is also present in the
+ * bootstrap class path. The class loader favored the bootstrap
+ * version, which means that we have a pointer to a class that is
+ * (a) not the one we want to examine, and (b) mapped read-only,
+ * so we will seg fault if we try to rewrite instructions inside it.
+ */
+ LOGD("DexOpt: not verifying/optimizing '%s': multiple definitions\n",
+ clazz->descriptor);
+ return;
+ }
+
classDescriptor = dexStringByTypeIdx(pDexFile, pClassDef->classIdx);
/*
* First, try to verify it.
*/
if (doVerify) {
- if (clazz->pDvmDex->pDexFile != pDexFile) {
- LOGD("DexOpt: not verifying '%s': multiple definitions\n",
- classDescriptor);
+ if (dvmVerifyClass(clazz)) {
+ /*
+ * Set the "is preverified" flag in the DexClassDef. We
+ * do it here, rather than in the ClassObject structure,
+ * because the DexClassDef is part of the odex file.
+ */
+ assert((clazz->accessFlags & JAVA_FLAGS_MASK) ==
+ pClassDef->accessFlags);
+ ((DexClassDef*)pClassDef)->accessFlags |= CLASS_ISPREVERIFIED;
+ verified = true;
} else {
- if (dvmVerifyClass(clazz)) {
- /*
- * Set the "is preverified" flag in the DexClassDef. We
- * do it here, rather than in the ClassObject structure,
- * because the DexClassDef is part of the odex file.
- */
- assert((clazz->accessFlags & JAVA_FLAGS_MASK) ==
- pClassDef->accessFlags);
- ((DexClassDef*)pClassDef)->accessFlags |= CLASS_ISPREVERIFIED;
- verified = true;
- } else {
- // TODO: log when in verbose mode
- LOGV("DexOpt: '%s' failed verification\n", classDescriptor);
- }
+ // TODO: log when in verbose mode
+ LOGV("DexOpt: '%s' failed verification\n", classDescriptor);
}
}
diff --git a/vm/analysis/Optimize.c b/vm/analysis/Optimize.c
index 7ad4e45..f07addf 100644
--- a/vm/analysis/Optimize.c
+++ b/vm/analysis/Optimize.c
@@ -46,6 +46,8 @@
MethodType methodType);
static bool rewriteExecuteInlineRange(Method* method, u2* insns,
MethodType methodType);
+static void rewriteReturnVoid(Method* method, u2* insns);
+static bool needsReturnBarrier(Method* method);
/*
@@ -180,6 +182,9 @@
if (dvmIsNativeMethod(method) || dvmIsAbstractMethod(method))
return;
+ /* compute this once per method */
+ bool needRetBar = needsReturnBarrier(method);
+
insns = (u2*) method->insns;
assert(insns != NULL);
insnsSize = dvmGetMethodInsnsSize(method);
@@ -274,6 +279,11 @@
rewriteStaticField(method, insns, volatileOpc);
notMatched = false;
break;
+ case OP_RETURN_VOID:
+ if (needRetBar)
+ rewriteReturnVoid(method, insns);
+ notMatched = false;
+ break;
default:
assert(notMatched);
break;
@@ -337,7 +347,7 @@
/*
* Update a 16-bit code unit in "meth".
*/
-static inline void updateCode(const Method* meth, u2* ptr, u2 newVal)
+static inline void updateCodeUnit(const Method* meth, u2* ptr, u2 newVal)
{
if (gDvm.optimizing) {
/* dexopt time, alter the output directly */
@@ -349,6 +359,14 @@
}
/*
+ * Update the 8-bit opcode portion of a 16-bit code unit in "meth".
+ */
+static inline void updateOpCode(const Method* meth, u2* ptr, OpCode opCode)
+{
+ updateCodeUnit(meth, ptr, (ptr[0] & 0xff00) | (u2) opCode);
+}
+
+/*
* If "referrer" and "resClass" don't come from the same DEX file, and
* the DEX we're working on is not destined for the bootstrap class path,
* tweak the class loader so package-access checks work correctly.
@@ -655,12 +673,12 @@
}
if (volatileOpc != OP_NOP && dvmIsVolatileField(&instField->field)) {
- updateCode(method, insns, (insns[0] & 0xff00) | (u2) volatileOpc);
+ updateOpCode(method, insns, volatileOpc);
LOGV("DexOpt: rewrote ifield access %s.%s --> volatile\n",
instField->field.clazz->descriptor, instField->field.name);
} else if (quickOpc != OP_NOP) {
- updateCode(method, insns, (insns[0] & 0xff00) | (u2) quickOpc);
- updateCode(method, insns+1, (u2) instField->byteOffset);
+ updateOpCode(method, insns, quickOpc);
+ updateCodeUnit(method, insns+1, (u2) instField->byteOffset);
LOGV("DexOpt: rewrote ifield access %s.%s --> %d\n",
instField->field.clazz->descriptor, instField->field.name,
instField->byteOffset);
@@ -700,7 +718,7 @@
}
if (dvmIsVolatileField(&staticField->field)) {
- updateCode(method, insns, (insns[0] & 0xff00) | (u2) volatileOpc);
+ updateOpCode(method, insns, volatileOpc);
LOGV("DexOpt: rewrote sfield access %s.%s --> volatile\n",
staticField->field.clazz->descriptor, staticField->field.name);
}
@@ -873,8 +891,8 @@
* Note: Method->methodIndex is a u2 and is range checked during the
* initial load.
*/
- updateCode(method, insns, (insns[0] & 0xff00) | (u2) newOpc);
- updateCode(method, insns+1, baseMethod->methodIndex);
+ updateOpCode(method, insns, newOpc);
+ updateCodeUnit(method, insns+1, baseMethod->methodIndex);
//LOGI("DexOpt: rewrote call to %s.%s --> %s.%s\n",
// method->clazz->descriptor, method->name,
@@ -918,8 +936,7 @@
* OP_INVOKE_DIRECT when debugging is enabled.
*/
assert((insns[0] & 0xff) == OP_INVOKE_DIRECT);
- updateCode(method, insns,
- (insns[0] & 0xff00) | (u2) OP_INVOKE_DIRECT_EMPTY);
+ updateOpCode(method, insns, OP_INVOKE_DIRECT_EMPTY);
//LOGI("DexOpt: marked-empty call to %s.%s --> %s.%s\n",
// method->clazz->descriptor, method->name,
@@ -1050,9 +1067,8 @@
assert((insns[0] & 0xff) == OP_INVOKE_DIRECT ||
(insns[0] & 0xff) == OP_INVOKE_STATIC ||
(insns[0] & 0xff) == OP_INVOKE_VIRTUAL);
- updateCode(method, insns,
- (insns[0] & 0xff00) | (u2) OP_EXECUTE_INLINE);
- updateCode(method, insns+1, (u2) inlineSubs->inlineIdx);
+ updateOpCode(method, insns, OP_EXECUTE_INLINE);
+ updateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
//LOGI("DexOpt: execute-inline %s.%s --> %s.%s\n",
// method->clazz->descriptor, method->name,
@@ -1091,9 +1107,8 @@
assert((insns[0] & 0xff) == OP_INVOKE_DIRECT_RANGE ||
(insns[0] & 0xff) == OP_INVOKE_STATIC_RANGE ||
(insns[0] & 0xff) == OP_INVOKE_VIRTUAL_RANGE);
- updateCode(method, insns,
- (insns[0] & 0xff00) | (u2) OP_EXECUTE_INLINE_RANGE);
- updateCode(method, insns+1, (u2) inlineSubs->inlineIdx);
+ updateOpCode(method, insns, OP_EXECUTE_INLINE_RANGE);
+ updateCodeUnit(method, insns+1, (u2) inlineSubs->inlineIdx);
//LOGI("DexOpt: execute-inline/range %s.%s --> %s.%s\n",
// method->clazz->descriptor, method->name,
@@ -1106,3 +1121,54 @@
return false;
}
+
+/*
+ * Returns "true" if the return-void instructions in this method should
+ * be converted to return-void-barrier.
+ *
+ * This is needed to satisfy a Java Memory Model requirement regarding
+ * the construction of objects with final fields. (This does not apply
+ * to <clinit> or static fields, since appropriate barriers are guaranteed
+ * by the class initialization process.)
+ */
+static bool needsReturnBarrier(Method* method)
+{
+ if (!gDvm.dexOptForSmp)
+ return false;
+ if (strcmp(method->name, "<init>") != 0)
+ return false;
+
+ /*
+ * Check to see if the class has any final fields. If not, we don't
+ * need to generate a barrier instruction.
+ */
+ const ClassObject* clazz = method->clazz;
+ int idx = clazz->ifieldCount;
+ while (--idx >= 0) {
+ if (dvmIsFinalField(&clazz->ifields[idx].field))
+ break;
+ }
+ if (idx < 0)
+ return false;
+
+ /*
+ * In theory, we only need to do this if the method actually modifies
+ * a final field. In practice, non-constructor methods are allowed
+ * to modify final fields by the VM, and there are tools that rely on
+ * this behavior. (The compiler does not allow it.)
+ *
+ * If we alter the verifier to restrict final-field updates to
+ * constructors, we can tighten this up as well.
+ */
+
+ return true;
+}
+
+/*
+ * Convert a return-void to a return-void-barrier.
+ */
+static void rewriteReturnVoid(Method* method, u2* insns)
+{
+ assert((insns[0] & 0xff) == OP_RETURN_VOID);
+ updateOpCode(method, insns, OP_RETURN_VOID_BARRIER);
+}
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index 60f060c..8c26989 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -741,7 +741,11 @@
dvmLockMutex(&gDvmJit.tableLock);
jitActive = gDvmJit.pProfTable != NULL;
- jitActivate = !(gDvm.debuggerActive || (gDvm.activeProfilers > 0));
+ bool disableJit = gDvm.debuggerActive;
+#if !defined(WITH_INLINE_PROFILING)
+ disableJit = disableJit || (gDvm.activeProfilers > 0);
+#endif
+ jitActivate = !disableJit;
if (jitActivate && !jitActive) {
gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
diff --git a/vm/compiler/Dataflow.c b/vm/compiler/Dataflow.c
index 89c5b35..cd1761c 100644
--- a/vm/compiler/Dataflow.c
+++ b/vm/compiler/Dataflow.c
@@ -752,7 +752,7 @@
// F0 OP_INVOKE_DIRECT_EMPTY
DF_NOP,
- // F1 OP_UNUSED_F1
+ // F1 OP_RETURN_VOID_BARRIER
DF_NOP,
// F2 OP_IGET_QUICK
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index 0b6e433..177b1c6 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -31,8 +31,7 @@
{
int regCardBase = dvmCompilerAllocTemp(cUnit);
int regCardNo = dvmCompilerAllocTemp(cUnit);
- opRegImm(cUnit, kOpCmp, valReg, 0); /* storing null? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondEq);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondEq, valReg, 0);
loadWordDisp(cUnit, rGLUE, offsetof(InterpState, cardTable),
regCardBase);
opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
@@ -541,8 +540,7 @@
LOAD_FUNC_ADDR(cUnit, r2, (int)dvmCanPutArrayElement);
/* Are we storing null? If so, avoid check */
- opRegImm(cUnit, kOpCmp, r0, 0);
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondEq);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondEq, r0, 0);
/* Make sure the types are compatible */
loadWordDisp(cUnit, regArray, offsetof(Object, clazz), r1);
@@ -1175,9 +1173,7 @@
loadWordDisp(cUnit, r7, methodIndex * 4, r0);
/* Check if rechain limit is reached */
- opRegImm(cUnit, kOpCmp, r1, 0);
-
- ArmLIR *bypassRechaining = opCondBranch(cUnit, kArmCondGt);
+ ArmLIR *bypassRechaining = genCmpImmBranch(cUnit, kArmCondGt, r1, 0);
loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
jitToInterpEntries.dvmJitToPatchPredictedChain), r7);
@@ -1291,8 +1287,8 @@
LOAD_FUNC_ADDR(cUnit, r2, (int)dvmUnlockObject);
/* Do the call */
opReg(cUnit, kOpBlx, r2);
- opRegImm(cUnit, kOpCmp, r0, 0); /* Did we throw? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ /* Did we throw? */
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
loadConstant(cUnit, r0,
(int) (cUnit->method->insns + mir->offset +
dexGetInstrWidthAbs(gDvm.instrWidth, OP_MONITOR_EXIT)));
@@ -1328,12 +1324,12 @@
}
switch (dalvikOpCode) {
case OP_RETURN_VOID:
+ case OP_RETURN_VOID_BARRIER:
genReturnCommon(cUnit,mir);
break;
case OP_UNUSED_73:
case OP_UNUSED_79:
case OP_UNUSED_7A:
- case OP_UNUSED_F1:
case OP_UNUSED_FF:
LOGE("Codegen: got unused opcode 0x%x\n",dalvikOpCode);
return true;
@@ -1608,8 +1604,7 @@
opReg(cUnit, kOpBlx, r2);
dvmCompilerClobberCallRegs(cUnit);
/* generate a branch over if allocation is successful */
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
/*
* OOM exception needs to be thrown here and cannot re-execute
*/
@@ -1650,8 +1645,9 @@
loadConstant(cUnit, r1, (int) classPtr );
rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0); /* Null? */
- ArmLIR *branch1 = opCondBranch(cUnit, kArmCondEq);
+ /* Null? */
+ ArmLIR *branch1 = genCmpImmBranch(cUnit, kArmCondEq,
+ rlSrc.lowReg, 0);
/*
* rlSrc.lowReg now contains object->clazz. Note that
* it could have been allocated r0, but we're okay so long
@@ -2255,8 +2251,7 @@
opReg(cUnit, kOpBlx, r3);
dvmCompilerClobberCallRegs(cUnit);
/* generate a branch over if allocation is successful */
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
/*
* OOM exception needs to be thrown here and cannot re-execute
*/
@@ -2295,10 +2290,8 @@
dvmCompilerFlushAllRegs(cUnit); /* Everything to home location */
loadValueDirectFixed(cUnit, rlSrc, r0); /* Ref */
loadConstant(cUnit, r2, (int) classPtr );
-//TUNING: compare to 0 primative to allow use of CB[N]Z
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
/* When taken r0 has NULL which can be used for store directly */
- ArmLIR *branch1 = opCondBranch(cUnit, kArmCondEq);
+ ArmLIR *branch1 = genCmpImmBranch(cUnit, kArmCondEq, r0, 0);
/* r1 now contains object->clazz */
loadWordDisp(cUnit, r0, offsetof(Object, clazz), r1);
/* r1 now contains object->clazz */
@@ -2696,8 +2689,7 @@
opReg(cUnit, kOpBlx, r2);
dvmCompilerClobberCallRegs(cUnit);
/* generate a branch over if successful */
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
loadConstant(cUnit, r0,
(int) (cUnit->method->insns + mir->offset));
genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
@@ -3043,8 +3035,7 @@
dvmCompilerClobberCallRegs(cUnit);
/* generate a branch over if the interface method is resolved */
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
/*
* calleeMethod == NULL -> throw
*/
@@ -3060,9 +3051,8 @@
genRegCopy(cUnit, r1, r8);
/* Check if rechain limit is reached */
- opRegImm(cUnit, kOpCmp, r1, 0);
-
- ArmLIR *bypassRechaining = opCondBranch(cUnit, kArmCondGt);
+ ArmLIR *bypassRechaining = genCmpImmBranch(cUnit, kArmCondGt,
+ r1, 0);
loadWordDisp(cUnit, rGLUE, offsetof(InterpState,
jitToInterpEntries.dvmJitToPatchPredictedChain), r7);
@@ -3436,8 +3426,8 @@
}
opReg(cUnit, kOpBlx, r4PC);
opRegImm(cUnit, kOpAdd, r13, 8);
- opRegImm(cUnit, kOpCmp, r0, 0); /* NULL? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ /* NULL? */
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
loadConstant(cUnit, r0,
(int) (cUnit->method->insns + mir->offset));
genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
diff --git a/vm/compiler/codegen/arm/CodegenFactory.c b/vm/compiler/codegen/arm/CodegenFactory.c
index 157bd1f..1de9f90 100644
--- a/vm/compiler/codegen/arm/CodegenFactory.c
+++ b/vm/compiler/codegen/arm/CodegenFactory.c
@@ -257,6 +257,20 @@
}
}
}
+
+/*
+ * Perform a "reg cmp imm" operation and jump to the PCR region if condition
+ * satisfies.
+ */
+static ArmLIR *genRegImmCheck(CompilationUnit *cUnit,
+ ArmConditionCode cond, int reg,
+ int checkValue, int dOffset,
+ ArmLIR *pcrLabel)
+{
+ ArmLIR *branch = genCmpImmBranch(cUnit, cond, reg, checkValue);
+ return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+}
+
/*
* Perform null-check on a register. sReg is the ssa register being checked,
* and mReg is the machine register holding the actual value. If internal state
diff --git a/vm/compiler/codegen/arm/Thumb/Factory.c b/vm/compiler/codegen/arm/Thumb/Factory.c
index 85f612e..e18aa75 100644
--- a/vm/compiler/codegen/arm/Thumb/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb/Factory.c
@@ -859,21 +859,18 @@
}
}
-static inline ArmLIR *genRegImmCheck(CompilationUnit *cUnit,
+static ArmLIR *genCmpImmBranch(CompilationUnit *cUnit,
ArmConditionCode cond, int reg,
- int checkValue, int dOffset,
- ArmLIR *pcrLabel)
+ int checkValue)
{
- int tReg;
- ArmLIR *res;
if ((checkValue & 0xff) != checkValue) {
- tReg = dvmCompilerAllocTemp(cUnit);
+ int tReg = dvmCompilerAllocTemp(cUnit);
loadConstant(cUnit, tReg, checkValue);
- res = genRegRegCheck(cUnit, cond, reg, tReg, dOffset, pcrLabel);
+ newLIR2(cUnit, kThumbCmpRR, reg, tReg);
dvmCompilerFreeTemp(cUnit, tReg);
- return res;
+ } else {
+ newLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
}
- newLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
ArmLIR *branch = newLIR2(cUnit, kThumbBCond, 0, cond);
- return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+ return branch;
}
diff --git a/vm/compiler/codegen/arm/Thumb2/Factory.c b/vm/compiler/codegen/arm/Thumb2/Factory.c
index 2bf2940..5074e42 100644
--- a/vm/compiler/codegen/arm/Thumb2/Factory.c
+++ b/vm/compiler/codegen/arm/Thumb2/Factory.c
@@ -1088,15 +1088,13 @@
loadBaseDispWide(cUnit, NULL, base, 0, lowReg, highReg, INVALID_SREG);
}
-
/*
- * Perform a "reg cmp imm" operation and jump to the PCR region if condition
- * satisfies.
+ * Generate a register comparison to an immediate and branch. Caller
+ * is responsible for setting branch target field.
*/
-static ArmLIR *genRegImmCheck(CompilationUnit *cUnit,
+static ArmLIR *genCmpImmBranch(CompilationUnit *cUnit,
ArmConditionCode cond, int reg,
- int checkValue, int dOffset,
- ArmLIR *pcrLabel)
+ int checkValue)
{
ArmLIR *branch;
int modImm;
@@ -1118,7 +1116,7 @@
}
branch = newLIR2(cUnit, kThumbBCond, 0, cond);
}
- return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
+ return branch;
}
static ArmLIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
diff --git a/vm/compiler/codegen/arm/Thumb2/Gen.c b/vm/compiler/codegen/arm/Thumb2/Gen.c
index 3632388..8858bb0 100644
--- a/vm/compiler/codegen/arm/Thumb2/Gen.c
+++ b/vm/compiler/codegen/arm/Thumb2/Gen.c
@@ -220,11 +220,6 @@
hopTarget->defMask = ENCODE_ALL;
hopBranch->generic.target = (LIR *)hopTarget;
- // Clear the lock
- ArmLIR *inst = newLIR0(cUnit, kThumb2Clrex);
- // ...and make it a scheduling barrier
- inst->defMask = ENCODE_ALL;
-
// Export PC (part 1)
loadConstant(cUnit, r3, (int) (cUnit->method->insns + mir->offset));
@@ -290,8 +285,8 @@
sizeof(StackSaveArea) -
offsetof(StackSaveArea, xtra.currentPc));
opReg(cUnit, kOpBlx, r7);
- opRegImm(cUnit, kOpCmp, r0, 0); /* Did we throw? */
- ArmLIR *branchOver = opCondBranch(cUnit, kArmCondNe);
+ /* Did we throw? */
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
loadConstant(cUnit, r0,
(int) (cUnit->method->insns + mir->offset +
dexGetInstrWidthAbs(gDvm.instrWidth, OP_MONITOR_EXIT)));
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index a137d22..aaadc00 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -41,5 +41,12 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
bx lr @ return to the callee-chaining cell
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 2557863..eeac2b0 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -41,9 +41,24 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rGLUE
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
blx r8 @ off to the native code
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ native return; r9=self, r10=newSaveArea
@ equivalent to dvmPopJniLocals
ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index 5be6978..044d0ee 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -46,6 +46,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
@ Start executing the callee
#if defined(WITH_JIT_TUNING)
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index b7ab971..b2e71ee 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -5,6 +5,13 @@
* address in the code cache following the invoke instruction. Otherwise
* return to the special dvmJitToInterpNoChain entry point.
*/
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rGlue
+ LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index 73fc3d7..a391dbe 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -22,9 +22,22 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2: methodToCall, r6: rGLUE
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3}
+#endif
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1}
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ Refresh Jit's on/off status
ldr r3, [rGLUE, #offGlue_ppJitProfTable]
@@ -96,6 +109,14 @@
.LdvmSelfVerificationMemOpDecode:
.word dvmSelfVerificationMemOpDecode
#endif
+#if defined(WITH_INLINE_PROFILING)
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastJavaMethodTraceExit:
+ .word dvmFastJavaMethodTraceExit
+#endif
.L__aeabi_cdcmple:
.word __aeabi_cdcmple
.L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 60664fa..655bc54 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -177,6 +177,13 @@
* address in the code cache following the invoke instruction. Otherwise
* return to the special dvmJitToInterpNoChain entry point.
*/
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rGlue
+ LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
@@ -274,6 +281,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
@ Start executing the callee
#if defined(WITH_JIT_TUNING)
@@ -329,6 +343,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
bx lr @ return to the callee-chaining cell
@@ -436,9 +457,24 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rGLUE
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
blx r8 @ off to the native code
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ native return; r9=self, r10=newSaveArea
@ equivalent to dvmPopJniLocals
ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
@@ -1458,9 +1494,22 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2: methodToCall, r6: rGLUE
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3}
+#endif
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1}
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ Refresh Jit's on/off status
ldr r3, [rGLUE, #offGlue_ppJitProfTable]
@@ -1532,6 +1581,14 @@
.LdvmSelfVerificationMemOpDecode:
.word dvmSelfVerificationMemOpDecode
#endif
+#if defined(WITH_INLINE_PROFILING)
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastJavaMethodTraceExit:
+ .word dvmFastJavaMethodTraceExit
+#endif
.L__aeabi_cdcmple:
.word __aeabi_cdcmple
.L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index ccdbcca..ff552bb 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -177,6 +177,13 @@
* address in the code cache following the invoke instruction. Otherwise
* return to the special dvmJitToInterpNoChain entry point.
*/
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rGlue
+ LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
@@ -274,6 +281,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
@ Start executing the callee
#if defined(WITH_JIT_TUNING)
@@ -329,6 +343,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
bx lr @ return to the callee-chaining cell
@@ -436,9 +457,24 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rGLUE
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
blx r8 @ off to the native code
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ native return; r9=self, r10=newSaveArea
@ equivalent to dvmPopJniLocals
ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
@@ -1181,9 +1217,22 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2: methodToCall, r6: rGLUE
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3}
+#endif
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1}
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ Refresh Jit's on/off status
ldr r3, [rGLUE, #offGlue_ppJitProfTable]
@@ -1255,6 +1304,14 @@
.LdvmSelfVerificationMemOpDecode:
.word dvmSelfVerificationMemOpDecode
#endif
+#if defined(WITH_INLINE_PROFILING)
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastJavaMethodTraceExit:
+ .word dvmFastJavaMethodTraceExit
+#endif
.L__aeabi_cdcmple:
.word __aeabi_cdcmple
.L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index e520056..34931f8 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -177,6 +177,13 @@
* address in the code cache following the invoke instruction. Otherwise
* return to the special dvmJitToInterpNoChain entry point.
*/
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rGlue
+ LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
@@ -274,6 +281,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
@ Start executing the callee
#if defined(WITH_JIT_TUNING)
@@ -329,6 +343,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
bx lr @ return to the callee-chaining cell
@@ -436,9 +457,24 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rGLUE
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
blx r8 @ off to the native code
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ native return; r9=self, r10=newSaveArea
@ equivalent to dvmPopJniLocals
ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
@@ -1458,9 +1494,22 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2: methodToCall, r6: rGLUE
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3}
+#endif
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1}
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ Refresh Jit's on/off status
ldr r3, [rGLUE, #offGlue_ppJitProfTable]
@@ -1532,6 +1581,14 @@
.LdvmSelfVerificationMemOpDecode:
.word dvmSelfVerificationMemOpDecode
#endif
+#if defined(WITH_INLINE_PROFILING)
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastJavaMethodTraceExit:
+ .word dvmFastJavaMethodTraceExit
+#endif
.L__aeabi_cdcmple:
.word __aeabi_cdcmple
.L__aeabi_cfcmple:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 87a0691..b10beef 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -177,6 +177,13 @@
* address in the code cache following the invoke instruction. Otherwise
* return to the special dvmJitToInterpNoChain entry point.
*/
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rGlue
+ LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount
@@ -274,6 +281,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
@ Start executing the callee
#if defined(WITH_JIT_TUNING)
@@ -329,6 +343,13 @@
str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
mov rFP, r1 @ fp = newFp
str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
bx lr @ return to the callee-chaining cell
@@ -436,9 +457,24 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rGLUE
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
blx r8 @ off to the native code
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rGlue
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ native return; r9=self, r10=newSaveArea
@ equivalent to dvmPopJniLocals
ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
@@ -1458,9 +1494,22 @@
mov r2, r0 @ r2<- methodToCall
mov r0, r1 @ r0<- newFP
add r1, rGLUE, #offGlue_retval @ r1<- &retval
+#if defined(WITH_INLINE_PROFILING)
+ @ r2: methodToCall, r6: rGLUE
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ LDR_PC_LR ".LdvmFastMethodTraceEnter"
+ ldmfd sp!, {r0-r3}
+#endif
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1}
+ LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+#endif
@ Refresh Jit's on/off status
ldr r3, [rGLUE, #offGlue_ppJitProfTable]
@@ -1532,6 +1581,14 @@
.LdvmSelfVerificationMemOpDecode:
.word dvmSelfVerificationMemOpDecode
#endif
+#if defined(WITH_INLINE_PROFILING)
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastJavaMethodTraceExit:
+ .word dvmFastJavaMethodTraceExit
+#endif
.L__aeabi_cdcmple:
.word __aeabi_cdcmple
.L__aeabi_cfcmple:
diff --git a/vm/interp/InterpDefs.h b/vm/interp/InterpDefs.h
index a01ee68..ec1175f 100644
--- a/vm/interp/InterpDefs.h
+++ b/vm/interp/InterpDefs.h
@@ -233,7 +233,11 @@
*/
static inline bool dvmDebuggerOrProfilerActive(void)
{
- return gDvm.debuggerActive || gDvm.activeProfilers != 0;
+ bool result = gDvm.debuggerActive;
+#if !defined(WITH_INLINE_PROFILING)
+ result = result || (gDvm.activeProfilers != 0);
+#endif
+ return result;
}
#if defined(WITH_JIT)
@@ -243,9 +247,11 @@
*/
static inline bool dvmJitDebuggerOrProfilerActive()
{
- return gDvmJit.pProfTable != NULL
- || gDvm.activeProfilers != 0
- || gDvm.debuggerActive;
+ bool result = (gDvmJit.pProfTable != NULL) || gDvm.debuggerActive;
+#if !defined(WITH_INLINE_PROFILING)
+ result = result || (gDvm.activeProfilers != 0);
+#endif
+ return result;
}
#endif
diff --git a/vm/mterp/Mterp.c b/vm/mterp/Mterp.c
index dbf5003..4a3fc34 100644
--- a/vm/mterp/Mterp.c
+++ b/vm/mterp/Mterp.c
@@ -82,6 +82,17 @@
glue->ppJitProfTable = &gDvmJit.pProfTable;
glue->jitThreshold = gDvmJit.threshold;
#endif
+#if defined(WITH_INLINE_PROFILING)
+ /*
+ * If WITH_INLINE_PROFILING is defined, we won't switch to the debug
+ * interpreter when a new method is entered. So we need to register the
+ * METHOD_ENTER action here.
+ */
+ if (glue->debugIsMethodEntry) {
+ glue->debugIsMethodEntry = false;
+ TRACE_METHOD_ENTER(self, glue->method);
+ }
+#endif
if (gDvm.jdwpConfigured) {
glue->pDebuggerActive = &gDvm.debuggerActive;
} else {
diff --git a/vm/mterp/armv5te/OP_RETURN_VOID_BARRIER.S b/vm/mterp/armv5te/OP_RETURN_VOID_BARRIER.S
new file mode 100644
index 0000000..3358941
--- /dev/null
+++ b/vm/mterp/armv5te/OP_RETURN_VOID_BARRIER.S
@@ -0,0 +1,3 @@
+%verify "executed"
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
diff --git a/vm/mterp/armv5te/OP_UNUSED_F1.S b/vm/mterp/armv5te/OP_UNUSED_F1.S
deleted file mode 100644
index faa7246..0000000
--- a/vm/mterp/armv5te/OP_UNUSED_F1.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "armv5te/unused.S"
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 2ba5357..41bcb24 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -446,8 +446,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -484,16 +494,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -620,6 +635,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -689,10 +711,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -765,6 +798,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/c/OP_RETURN_VOID_BARRIER.c b/vm/mterp/c/OP_RETURN_VOID_BARRIER.c
new file mode 100644
index 0000000..2c031eb
--- /dev/null
+++ b/vm/mterp/c/OP_RETURN_VOID_BARRIER.c
@@ -0,0 +1,8 @@
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
+OP_END
diff --git a/vm/mterp/c/OP_UNUSED_F1.c b/vm/mterp/c/OP_UNUSED_F1.c
deleted file mode 100644
index af26195..0000000
--- a/vm/mterp/c/OP_UNUSED_F1.c
+++ /dev/null
@@ -1,2 +0,0 @@
-HANDLE_OPCODE(OP_UNUSED_F1)
-OP_END
diff --git a/vm/mterp/config-x86 b/vm/mterp/config-x86
index 627e06c..f137198 100644
--- a/vm/mterp/config-x86
+++ b/vm/mterp/config-x86
@@ -39,6 +39,7 @@
op OP_IPUT_WIDE_VOLATILE c
op OP_SGET_WIDE_VOLATILE c
op OP_SPUT_WIDE_VOLATILE c
+ op OP_RETURN_VOID_BARRIER c
op-end
# arch-specific entry point to interpreter
diff --git a/vm/mterp/config-x86-atom b/vm/mterp/config-x86-atom
index ca880fd..b46663d 100644
--- a/vm/mterp/config-x86-atom
+++ b/vm/mterp/config-x86-atom
@@ -297,6 +297,7 @@
op OP_IPUT_WIDE_VOLATILE c
op OP_SGET_WIDE_VOLATILE c
op OP_SPUT_WIDE_VOLATILE c
+op OP_RETURN_VOID_BARRIER c
op-end
# arch-specific entry point to interpreter
diff --git a/vm/mterp/out/InterpAsm-armv4t.S b/vm/mterp/out/InterpAsm-armv4t.S
index 3593a6e..e1218d1 100644
--- a/vm/mterp/out/InterpAsm-armv4t.S
+++ b/vm/mterp/out/InterpAsm-armv4t.S
@@ -7772,11 +7772,10 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: armv5te/OP_UNUSED_F1.S */
-/* File: armv5te/unused.S */
- bl common_abort
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
/* ------------------------------ */
.balign 64
@@ -10294,8 +10293,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -10332,16 +10341,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -10468,6 +10482,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -10537,10 +10558,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -10613,6 +10645,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 1ae9636..0d9cb26 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -7452,11 +7452,10 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: armv5te/OP_UNUSED_F1.S */
-/* File: armv5te/unused.S */
- bl common_abort
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
/* ------------------------------ */
.balign 64
@@ -9832,8 +9831,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -9870,16 +9879,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -10006,6 +10020,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -10075,10 +10096,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -10151,6 +10183,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 9b083b2..1b6a104 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -7774,11 +7774,10 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: armv5te/OP_UNUSED_F1.S */
-/* File: armv5te/unused.S */
- bl common_abort
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
/* ------------------------------ */
.balign 64
@@ -10290,8 +10289,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -10328,16 +10337,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -10464,6 +10478,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -10533,10 +10554,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -10609,6 +10641,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 173b478..0359279 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -7406,11 +7406,10 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: armv5te/OP_UNUSED_F1.S */
-/* File: armv5te/unused.S */
- bl common_abort
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
/* ------------------------------ */
.balign 64
@@ -9766,8 +9765,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -9804,16 +9813,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -9940,6 +9954,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -10009,10 +10030,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -10085,6 +10117,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 15e48a4..744e29b 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -7406,11 +7406,10 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: armv5te/OP_UNUSED_F1.S */
-/* File: armv5te/unused.S */
- bl common_abort
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB @ TODO: want "DMB ST"
+ b common_returnFromMethod
/* ------------------------------ */
.balign 64
@@ -9766,8 +9765,18 @@
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
ldr r2, [r2] @ r2<- activeProfilers (int)
- orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive
+ /*
+ * Don't switch the interpreter in the libdvm_traceview build even if the
+ * profiler is active.
+ * The code here is opted for less intrusion instead of performance.
+ * That is, *pActiveProfilers is still loaded into r2 even though it is not
+ * used when WITH_INLINE_PROFILING is defined.
+ */
+#if !defined(WITH_INLINE_PROFILING)
orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z
+#endif
+
bxeq lr @ all zero, return
@@ -9804,16 +9813,21 @@
* Reload the debugger/profiler enable flags. We're checking to see
* if either of these got set while we were suspended.
*
- * We can't really avoid the #ifdefs here, because the fields don't
- * exist when the feature is disabled.
+ * If WITH_INLINE_PROFILING is configured, don't check whether the profiler
+ * is enabled or not as the profiling will be done inline.
*/
ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
cmp r1, #0 @ debugger enabled?
ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean)
+
+#if !defined(WITH_INLINE_PROFILING)
ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
ldr r2, [r2] @ r2<- activeProfilers (int)
-
orrs r1, r1, r2
+#else
+ cmp r1, #0 @ only consult the debuggerActive flag
+#endif
+
beq 2f
1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above
@@ -9940,6 +9954,13 @@
mov r9, #0
str r9, [r10, #offStackSaveArea_returnAddr]
#endif
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rGlue
+ bl dvmFastMethodTraceEnter
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
str r0, [r10, #offStackSaveArea_method]
tst r3, #ACC_NATIVE
bne .LinvokeNative
@@ -10009,10 +10030,21 @@
.Lskip:
#endif
+#if defined(WITH_INLINE_PROFILING)
+ @ r2=JNIMethod, r6=rGLUE
+ stmfd sp!, {r2,r6}
+#endif
+
@mov lr, pc @ set return addr
@ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+#if defined(WITH_INLINE_PROFILING)
+ @ r0=JNIMethod, r1=rGLUE
+ ldmfd sp!, {r0-r1}
+ bl dvmFastNativeMethodTraceExit
+#endif
+
#if defined(WITH_JIT)
ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
#endif
@@ -10085,6 +10117,13 @@
mov r9, #0
bl common_periodicChecks
+#if defined(WITH_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r6
+ @ r0=rGlue
+ bl dvmFastJavaMethodTraceExit
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
diff --git a/vm/mterp/out/InterpAsm-x86-atom.S b/vm/mterp/out/InterpAsm-x86-atom.S
index 4d9942d..e3ea070 100644
--- a/vm/mterp/out/InterpAsm-x86-atom.S
+++ b/vm/mterp/out/InterpAsm-x86-atom.S
@@ -14701,8 +14701,7 @@
FINISH 3
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: x86-atom/OP_UNUSED_F1.S */
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
/* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -14719,42 +14718,15 @@
*/
/*
- * File: OP_UNUSED_F1.S
+ * File: stub.S
*/
-/* File: x86-atom/unused.S */
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: unused.S
- *
- * Code: Common code for unused bytecodes. Uses no subtitutions.
- *
- * For: all unused bytecodes
- *
- * Description: aborts if executed.
- *
- * Format: ØØ|op (10x)
- *
- * Syntax: op
- */
-
- call common_abort
-
-
+ SAVE_PC_FP_TO_GLUE %edx # save program counter and frame pointer
+ pushl rGLUE # push parameter glue
+ call dvmMterp_OP_RETURN_VOID_BARRIER # call c-based implementation
+ lea 4(%esp), %esp
+ LOAD_PC_FP_FROM_GLUE # restore program counter and frame pointer
+ FINISH_A # jump to next instruction
/* ------------------------------ */
.balign 64
.L_OP_IGET_QUICK: /* 0xf2 */
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
index 2f79476..82fd5fb 100644
--- a/vm/mterp/out/InterpAsm-x86.S
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -5955,12 +5955,18 @@
/* ------------------------------ */
.balign 64
-.L_OP_UNUSED_F1: /* 0xf1 */
-/* File: x86/OP_UNUSED_F1.S */
-/* File: x86/unused.S */
- jmp common_abort
-
-
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+ /* (stub) */
+ GET_GLUE(%ecx)
+ SAVE_PC_TO_GLUE(%ecx) # only need to export these two
+ SAVE_FP_TO_GLUE(%ecx) # only need to export these two
+ movl %ecx,OUT_ARG0(%esp) # glue is first arg to function
+ call dvmMterp_OP_RETURN_VOID_BARRIER # do the real work
+ GET_GLUE(%ecx)
+ LOAD_PC_FROM_GLUE(%ecx) # retrieve updated values
+ LOAD_FP_FROM_GLUE(%ecx) # retrieve updated values
+ FETCH_INST()
+ GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IGET_QUICK: /* 0xf2 */
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
index ce6192c..419c265 100644
--- a/vm/mterp/out/InterpC-allstubs.c
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -2973,8 +2973,14 @@
#endif
OP_END
-/* File: c/OP_UNUSED_F1.c */
-HANDLE_OPCODE(OP_UNUSED_F1)
+/* File: c/OP_RETURN_VOID_BARRIER.c */
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
OP_END
/* File: c/OP_IGET_QUICK.c */
diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c
index e909db4..39bb057 100644
--- a/vm/mterp/out/InterpC-portdbg.c
+++ b/vm/mterp/out/InterpC-portdbg.c
@@ -3334,8 +3334,14 @@
#endif
OP_END
-/* File: c/OP_UNUSED_F1.c */
-HANDLE_OPCODE(OP_UNUSED_F1)
+/* File: c/OP_RETURN_VOID_BARRIER.c */
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
OP_END
/* File: c/OP_IGET_QUICK.c */
diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c
index f82c97d..7a9ef41 100644
--- a/vm/mterp/out/InterpC-portstd.c
+++ b/vm/mterp/out/InterpC-portstd.c
@@ -3084,8 +3084,14 @@
#endif
OP_END
-/* File: c/OP_UNUSED_F1.c */
-HANDLE_OPCODE(OP_UNUSED_F1)
+/* File: c/OP_RETURN_VOID_BARRIER.c */
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
OP_END
/* File: c/OP_IGET_QUICK.c */
diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c
index ec431df..5165bc4 100644
--- a/vm/mterp/out/InterpC-x86-atom.c
+++ b/vm/mterp/out/InterpC-x86-atom.c
@@ -1231,6 +1231,16 @@
HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
OP_END
+/* File: c/OP_RETURN_VOID_BARRIER.c */
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
+OP_END
+
/* File: c/OP_IPUT_OBJECT_VOLATILE.c */
HANDLE_IPUT_X(OP_IPUT_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT)
OP_END
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
index 4882184..4e3effc 100644
--- a/vm/mterp/out/InterpC-x86.c
+++ b/vm/mterp/out/InterpC-x86.c
@@ -1256,6 +1256,16 @@
FINISH(3);
OP_END
+/* File: c/OP_RETURN_VOID_BARRIER.c */
+HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; /* placate valgrind */
+#endif
+ ANDROID_MEMBAR_FULL(); /* TODO: use a store/store barrier */
+ GOTO_returnFromMethod();
+OP_END
+
/* File: c/gotoTargets.c */
/*
* C footer. This has some common code shared by the various targets.
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E3.S b/vm/mterp/x86-atom/OP_UNUSED_E3.S
deleted file mode 100644
index 2921274..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_E3.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_E3.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E4.S b/vm/mterp/x86-atom/OP_UNUSED_E4.S
deleted file mode 100644
index 69eb419..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_E4.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_E4.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E5.S b/vm/mterp/x86-atom/OP_UNUSED_E5.S
deleted file mode 100644
index 2172369..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_E5.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_E5.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E6.S b/vm/mterp/x86-atom/OP_UNUSED_E6.S
deleted file mode 100644
index 1464cbd..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_E6.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_E6.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_E7.S b/vm/mterp/x86-atom/OP_UNUSED_E7.S
deleted file mode 100644
index 67029e4..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_E7.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_E7.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_F1.S b/vm/mterp/x86-atom/OP_UNUSED_F1.S
deleted file mode 100644
index b6c264a..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_F1.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_F1.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_FC.S b/vm/mterp/x86-atom/OP_UNUSED_FC.S
deleted file mode 100644
index 24b104c..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_FC.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_FC.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_FD.S b/vm/mterp/x86-atom/OP_UNUSED_FD.S
deleted file mode 100644
index b3cc6fb..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_FD.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_FD.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/OP_UNUSED_FE.S b/vm/mterp/x86-atom/OP_UNUSED_FE.S
deleted file mode 100644
index 435624f..0000000
--- a/vm/mterp/x86-atom/OP_UNUSED_FE.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /* Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*
- * File: OP_UNUSED_FE.S
- */
-
-%include "x86-atom/unused.S"
diff --git a/vm/mterp/x86-atom/TODO.txt b/vm/mterp/x86-atom/TODO.txt
index c2e9fc4..91a9522 100644
--- a/vm/mterp/x86-atom/TODO.txt
+++ b/vm/mterp/x86-atom/TODO.txt
@@ -15,3 +15,4 @@
(lo) Implement OP_BREAKPOINT
(lo) Implement OP_EXECUTE_INLINE_RANGE
(lo) Implement OP_*_VOLATILE (12 instructions)
+(lo) Implement OP_RETURN_VOID_BARRIER
diff --git a/vm/mterp/x86/OP_UNUSED_E3.S b/vm/mterp/x86/OP_UNUSED_E3.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_E3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E4.S b/vm/mterp/x86/OP_UNUSED_E4.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_E4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E5.S b/vm/mterp/x86/OP_UNUSED_E5.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_E5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E6.S b/vm/mterp/x86/OP_UNUSED_E6.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_E6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_E7.S b/vm/mterp/x86/OP_UNUSED_E7.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_E7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_F1.S b/vm/mterp/x86/OP_UNUSED_F1.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_F1.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_FC.S b/vm/mterp/x86/OP_UNUSED_FC.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_FC.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_FD.S b/vm/mterp/x86/OP_UNUSED_FD.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_FD.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/mterp/x86/OP_UNUSED_FE.S b/vm/mterp/x86/OP_UNUSED_FE.S
deleted file mode 100644
index 31d98c1..0000000
--- a/vm/mterp/x86/OP_UNUSED_FE.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/vm/native/java_lang_reflect_Field.c b/vm/native/java_lang_reflect_Field.c
index cb9f2bf..2c4e8a2 100644
--- a/vm/native/java_lang_reflect_Field.c
+++ b/vm/native/java_lang_reflect_Field.c
@@ -22,8 +22,7 @@
/*
- * Get the address of a field from an object. This can be used with "get"
- * or "set".
+ * Validate access to a field. Returns a pointer to the Field struct.
*
* "declaringClass" is the class in which the field was declared. For an
* instance field, "obj" is the object that holds the field data; for a
@@ -35,12 +34,12 @@
* On failure, throws an exception and returns NULL.
*
* The documentation lists exceptional conditions and the exceptions that
- * should be thrown, but doesn't say which exception previals when two or
+ * should be thrown, but doesn't say which exception prevails when two or
* more exceptional conditions exist at the same time. For example,
* attempting to set a protected field from an unrelated class causes an
* IllegalAccessException, while passing in a data type that doesn't match
* the field causes an IllegalArgumentException. If code does both at the
- * same time, we have to choose one or othe other.
+ * same time, we have to choose one or the other.
*
* The expected order is:
* (1) Check for illegal access. Throw IllegalAccessException.
@@ -56,11 +55,10 @@
* widening conversion until we're actually extracting the value from the
* object (which won't work well if it's a null reference).
*/
-static JValue* getFieldDataAddr(Object* obj, ClassObject* declaringClass,
+static Field* validateFieldAccess(Object* obj, ClassObject* declaringClass,
int slot, bool isSetOperation, bool noAccessCheck)
{
Field* field;
- JValue* result;
field = dvmSlotToField(declaringClass, slot);
assert(field != NULL);
@@ -118,7 +116,6 @@
}
}
- result = dvmStaticFieldPtr((StaticField*) field);
} else {
/*
* Verify object is of correct type (i.e. it actually has the
@@ -128,24 +125,324 @@
if (!dvmVerifyObjectInClass(obj, declaringClass)) {
assert(dvmCheckException(dvmThreadSelf()));
if (obj != NULL) {
- LOGD("Wrong type of object for field lookup: %s %s\n",
+ LOGD("Wrong object type for field access: %s is not a %s\n",
obj->clazz->descriptor, declaringClass->descriptor);
}
return NULL;
}
- result = dvmFieldPtr(obj, ((InstField*) field)->byteOffset);
}
- return result;
+ return field;
}
/*
+ * Extracts the value of a static field. Provides appropriate barriers
+ * for volatile fields.
+ *
+ * Sub-32-bit values are sign- or zero-extended to fill out 32 bits.
+ */
+static void getStaticFieldValue(const StaticField* sfield, JValue* value)
+{
+ if (!dvmIsVolatileField(&sfield->field)) {
+ /* just copy the whole thing */
+ *value = sfield->value;
+ } else {
+ /* need memory barriers and/or 64-bit atomic ops */
+ switch (sfield->field.signature[0]) {
+ case 'Z':
+ value->i = dvmGetStaticFieldBooleanVolatile(sfield);
+ break;
+ case 'B':
+ value->i = dvmGetStaticFieldByteVolatile(sfield);
+ break;
+ case 'S':
+ value->i = dvmGetStaticFieldShortVolatile(sfield);
+ break;
+ case 'C':
+ value->i = dvmGetStaticFieldCharVolatile(sfield);
+ break;
+ case 'I':
+ value->i = dvmGetStaticFieldIntVolatile(sfield);
+ break;
+ case 'F':
+ value->f = dvmGetStaticFieldFloatVolatile(sfield);
+ break;
+ case 'J':
+ value->j = dvmGetStaticFieldLongVolatile(sfield);
+ break;
+ case 'D':
+ value->d = dvmGetStaticFieldDoubleVolatile(sfield);
+ break;
+ case 'L':
+ case '[':
+ value->l = dvmGetStaticFieldObjectVolatile(sfield);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", sfield->field.signature);
+ dvmAbort();
+ }
+ }
+}
+
+/*
+ * Extracts the value of an instance field. Provides appropriate barriers
+ * for volatile fields.
+ *
+ * Sub-32-bit values are sign- or zero-extended to fill out 32 bits.
+ */
+static void getInstFieldValue(const InstField* ifield, Object* obj,
+ JValue* value)
+{
+ if (!dvmIsVolatileField(&ifield->field)) {
+ /* use type-specific get; really just 32-bit vs. 64-bit */
+ switch (ifield->field.signature[0]) {
+ case 'Z':
+ value->i = dvmGetFieldBoolean(obj, ifield->byteOffset);
+ break;
+ case 'B':
+ value->i = dvmGetFieldByte(obj, ifield->byteOffset);
+ break;
+ case 'S':
+ value->i = dvmGetFieldShort(obj, ifield->byteOffset);
+ break;
+ case 'C':
+ value->i = dvmGetFieldChar(obj, ifield->byteOffset);
+ break;
+ case 'I':
+ value->i = dvmGetFieldInt(obj, ifield->byteOffset);
+ break;
+ case 'F':
+ value->f = dvmGetFieldFloat(obj, ifield->byteOffset);
+ break;
+ case 'J':
+ value->j = dvmGetFieldLong(obj, ifield->byteOffset);
+ break;
+ case 'D':
+ value->d = dvmGetFieldDouble(obj, ifield->byteOffset);
+ break;
+ case 'L':
+ case '[':
+ value->l = dvmGetFieldObject(obj, ifield->byteOffset);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
+ dvmAbort();
+ }
+ } else {
+ /* need memory barriers and/or 64-bit atomic ops */
+ switch (ifield->field.signature[0]) {
+ case 'Z':
+ value->i = dvmGetFieldBooleanVolatile(obj, ifield->byteOffset);
+ break;
+ case 'B':
+ value->i = dvmGetFieldByteVolatile(obj, ifield->byteOffset);
+ break;
+ case 'S':
+ value->i = dvmGetFieldShortVolatile(obj, ifield->byteOffset);
+ break;
+ case 'C':
+ value->i = dvmGetFieldCharVolatile(obj, ifield->byteOffset);
+ break;
+ case 'I':
+ value->i = dvmGetFieldIntVolatile(obj, ifield->byteOffset);
+ break;
+ case 'F':
+ value->f = dvmGetFieldFloatVolatile(obj, ifield->byteOffset);
+ break;
+ case 'J':
+ value->j = dvmGetFieldLongVolatile(obj, ifield->byteOffset);
+ break;
+ case 'D':
+ value->d = dvmGetFieldDoubleVolatile(obj, ifield->byteOffset);
+ break;
+ case 'L':
+ case '[':
+ value->l = dvmGetFieldObjectVolatile(obj, ifield->byteOffset);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
+ dvmAbort();
+ }
+ }
+}
+
+/*
+ * Copies the value of the static or instance field into "*value".
+ */
+static void getFieldValue(const Field* field, Object* obj, JValue* value)
+{
+ if (dvmIsStaticField(field)) {
+ return getStaticFieldValue((const StaticField*) field, value);
+ } else {
+ return getInstFieldValue((const InstField*) field, obj, value);
+ }
+}
+
+/*
+ * Sets the value of a static field. Provides appropriate barriers
+ * for volatile fields.
+ */
+static void setStaticFieldValue(StaticField* sfield, const JValue* value)
+{
+ if (!dvmIsVolatileField(&sfield->field)) {
+ switch (sfield->field.signature[0]) {
+ case 'L':
+ case '[':
+ dvmSetStaticFieldObject(sfield, value->l);
+ break;
+ default:
+ /* just copy the whole thing */
+ sfield->value = *value;
+ break;
+ }
+ } else {
+ /* need memory barriers and/or 64-bit atomic ops */
+ switch (sfield->field.signature[0]) {
+ case 'Z':
+ dvmSetStaticFieldBooleanVolatile(sfield, value->z);
+ break;
+ case 'B':
+ dvmSetStaticFieldByteVolatile(sfield, value->b);
+ break;
+ case 'S':
+ dvmSetStaticFieldShortVolatile(sfield, value->s);
+ break;
+ case 'C':
+ dvmSetStaticFieldCharVolatile(sfield, value->c);
+ break;
+ case 'I':
+ dvmSetStaticFieldIntVolatile(sfield, value->i);
+ break;
+ case 'F':
+ dvmSetStaticFieldFloatVolatile(sfield, value->f);
+ break;
+ case 'J':
+ dvmSetStaticFieldLongVolatile(sfield, value->j);
+ break;
+ case 'D':
+ dvmSetStaticFieldDoubleVolatile(sfield, value->d);
+ break;
+ case 'L':
+ case '[':
+ dvmSetStaticFieldObjectVolatile(sfield, value->l);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", sfield->field.signature);
+ dvmAbort();
+ }
+ }
+}
+
+/*
+ * Sets the value of an instance field. Provides appropriate barriers
+ * for volatile fields.
+ */
+static void setInstFieldValue(InstField* ifield, Object* obj,
+ const JValue* value)
+{
+ if (!dvmIsVolatileField(&ifield->field)) {
+ /* use type-specific set; really just 32-bit vs. 64-bit */
+ switch (ifield->field.signature[0]) {
+ case 'Z':
+ dvmSetFieldBoolean(obj, ifield->byteOffset, value->z);
+ break;
+ case 'B':
+ dvmSetFieldByte(obj, ifield->byteOffset, value->b);
+ break;
+ case 'S':
+ dvmSetFieldShort(obj, ifield->byteOffset, value->s);
+ break;
+ case 'C':
+ dvmSetFieldChar(obj, ifield->byteOffset, value->c);
+ break;
+ case 'I':
+ dvmSetFieldInt(obj, ifield->byteOffset, value->i);
+ break;
+ case 'F':
+ dvmSetFieldFloat(obj, ifield->byteOffset, value->f);
+ break;
+ case 'J':
+ dvmSetFieldLong(obj, ifield->byteOffset, value->j);
+ break;
+ case 'D':
+ dvmSetFieldDouble(obj, ifield->byteOffset, value->d);
+ break;
+ case 'L':
+ case '[':
+ dvmSetFieldObject(obj, ifield->byteOffset, value->l);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
+ dvmAbort();
+ }
+#if ANDROID_SMP != 0
+ /*
+ * Special handling for final fields on SMP systems. We need a
+ * store/store barrier here (JMM requirement).
+ */
+ if (dvmIsFinalField(&ifield->field)) {
+ ANDROID_MEMBAR_FULL(); /* TODO: replace full bar with store/store */
+ }
+#endif
+ } else {
+ /* need memory barriers and/or 64-bit atomic ops */
+ switch (ifield->field.signature[0]) {
+ case 'Z':
+ dvmSetFieldBooleanVolatile(obj, ifield->byteOffset, value->z);
+ break;
+ case 'B':
+ dvmSetFieldByteVolatile(obj, ifield->byteOffset, value->b);
+ break;
+ case 'S':
+ dvmSetFieldShortVolatile(obj, ifield->byteOffset, value->s);
+ break;
+ case 'C':
+ dvmSetFieldCharVolatile(obj, ifield->byteOffset, value->c);
+ break;
+ case 'I':
+ dvmSetFieldIntVolatile(obj, ifield->byteOffset, value->i);
+ break;
+ case 'F':
+ dvmSetFieldFloatVolatile(obj, ifield->byteOffset, value->f);
+ break;
+ case 'J':
+ dvmSetFieldLongVolatile(obj, ifield->byteOffset, value->j);
+ break;
+ case 'D':
+ dvmSetFieldDoubleVolatile(obj, ifield->byteOffset, value->d);
+ break;
+ case 'L':
+ case '[':
+ dvmSetFieldObjectVolatile(obj, ifield->byteOffset, value->l);
+ break;
+ default:
+ LOGE("Unhandled field signature '%s'\n", ifield->field.signature);
+ dvmAbort();
+ }
+ }
+}
+
+/*
+ * Copy "*value" into the static or instance field.
+ */
+static void setFieldValue(Field* field, Object* obj, const JValue* value)
+{
+ if (dvmIsStaticField(field)) {
+ return setStaticFieldValue((StaticField*) field, value);
+ } else {
+ return setInstFieldValue((InstField*) field, obj, value);
+ }
+}
+
+
+
+/*
* public int getFieldModifiers(Class declaringClass, int slot)
*/
-static void Dalvik_java_lang_reflect_Field_getFieldModifiers(
- const u4* args, JValue* pResult)
+static void Dalvik_java_lang_reflect_Field_getFieldModifiers(const u4* args,
+ JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
ClassObject* declaringClass = (ClassObject*) args[1];
int slot = args[2];
Field* field;
@@ -163,32 +460,26 @@
static void Dalvik_java_lang_reflect_Field_getField(const u4* args,
JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
Object* obj = (Object*) args[1];
ClassObject* declaringClass = (ClassObject*) args[2];
ClassObject* fieldType = (ClassObject*) args[3];
int slot = args[4];
bool noAccessCheck = (args[5] != 0);
+ Field* field;
JValue value;
- const JValue* fieldPtr;
DataObject* result;
//dvmDumpClass(obj->clazz, kDumpClassFullDetail);
- /* get a pointer to the field's data; performs access checks */
- fieldPtr = getFieldDataAddr(obj, declaringClass, slot, false,noAccessCheck);
- if (fieldPtr == NULL)
+ /* get a pointer to the Field after validating access */
+ field = validateFieldAccess(obj, declaringClass, slot, false,noAccessCheck);
+ if (field == NULL)
RETURN_VOID();
- /* copy 4 or 8 bytes out */
- if (fieldType->primitiveType == PRIM_LONG ||
- fieldType->primitiveType == PRIM_DOUBLE)
- {
- value.j = fieldPtr->j;
- } else {
- value.i = fieldPtr->i;
- }
+ getFieldValue(field, obj, &value);
+ /* if it's primitive, box it up */
result = dvmWrapPrimitive(value, fieldType);
dvmReleaseTrackedAlloc((Object*) result, NULL);
RETURN_PTR(result);
@@ -204,14 +495,14 @@
static void Dalvik_java_lang_reflect_Field_setField(const u4* args,
JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
Object* obj = (Object*) args[1];
ClassObject* declaringClass = (ClassObject*) args[2];
ClassObject* fieldType = (ClassObject*) args[3];
int slot = args[4];
bool noAccessCheck = (args[5] != 0);
Object* valueObj = (Object*) args[6];
- JValue* fieldPtr;
+ Field* field;
JValue value;
/* unwrap primitive, or verify object type */
@@ -221,31 +512,12 @@
RETURN_VOID();
}
- /* get a pointer to the field's data; performs access checks */
- fieldPtr = getFieldDataAddr(obj, declaringClass, slot, true, noAccessCheck);
- if (fieldPtr == NULL)
- RETURN_VOID();
+ /* get a pointer to the Field after validating access */
+ field = validateFieldAccess(obj, declaringClass, slot, true, noAccessCheck);
- /* store 4 or 8 bytes */
- if (fieldType->primitiveType == PRIM_LONG ||
- fieldType->primitiveType == PRIM_DOUBLE)
- {
- fieldPtr->j = value.j;
- } else if (fieldType->primitiveType == PRIM_NOT) {
- if (slot < 0) {
- StaticField *sfield;
- sfield = (StaticField *)dvmSlotToField(declaringClass, slot);
- assert(fieldPtr == &sfield->value);
- dvmSetStaticFieldObject(sfield, value.l);
- } else {
- int offset = declaringClass->ifields[slot].byteOffset;
- assert(fieldPtr == (JValue *)BYTE_OFFSET(obj, offset));
- dvmSetFieldObject(obj, offset, value.l);
- }
- } else {
- fieldPtr->i = value.i;
+ if (field != NULL) {
+ setFieldValue(field, obj, &value);
}
-
RETURN_VOID();
}
@@ -274,7 +546,7 @@
static void Dalvik_java_lang_reflect_Field_getPrimitiveField(const u4* args,
JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
Object* obj = (Object*) args[1];
ClassObject* declaringClass = (ClassObject*) args[2];
ClassObject* fieldType = (ClassObject*) args[3];
@@ -282,7 +554,7 @@
bool noAccessCheck = (args[5] != 0);
int typeNum = args[6];
PrimitiveType targetType = convPrimType(typeNum);
- const JValue* fieldPtr;
+ const Field* field;
JValue value;
if (!dvmIsPrimitiveClass(fieldType)) {
@@ -291,19 +563,12 @@
RETURN_VOID();
}
- /* get a pointer to the field's data; performs access checks */
- fieldPtr = getFieldDataAddr(obj, declaringClass, slot, false,noAccessCheck);
- if (fieldPtr == NULL)
+ /* get a pointer to the Field after validating access */
+ field = validateFieldAccess(obj, declaringClass, slot, false,noAccessCheck);
+ if (field == NULL)
RETURN_VOID();
- /* copy 4 or 8 bytes out */
- if (fieldType->primitiveType == PRIM_LONG ||
- fieldType->primitiveType == PRIM_DOUBLE)
- {
- value.j = fieldPtr->j;
- } else {
- value.i = fieldPtr->i;
- }
+ getFieldValue(field, obj, &value);
/* retrieve value, performing a widening conversion if necessary */
if (dvmConvertPrimitiveValue(fieldType->primitiveType, targetType,
@@ -325,16 +590,16 @@
static void Dalvik_java_lang_reflect_Field_setPrimitiveField(const u4* args,
JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
Object* obj = (Object*) args[1];
ClassObject* declaringClass = (ClassObject*) args[2];
ClassObject* fieldType = (ClassObject*) args[3];
int slot = args[4];
bool noAccessCheck = (args[5] != 0);
int typeNum = args[6];
- const s4* valuePtr = (s4*) &args[7];
+ const s4* valuePtr = (s4*) &args[7]; /* 64-bit vars spill into args[8] */
PrimitiveType srcType = convPrimType(typeNum);
- JValue* fieldPtr;
+ Field* field;
JValue value;
if (!dvmIsPrimitiveClass(fieldType)) {
@@ -352,20 +617,12 @@
RETURN_VOID();
}
- /* get a pointer to the field's data; performs access checks */
- fieldPtr = getFieldDataAddr(obj, declaringClass, slot, true, noAccessCheck);
- if (fieldPtr == NULL)
- RETURN_VOID();
+ /* get a pointer to the Field after validating access */
+ field = validateFieldAccess(obj, declaringClass, slot, true, noAccessCheck);
- /* store 4 or 8 bytes */
- if (fieldType->primitiveType == PRIM_LONG ||
- fieldType->primitiveType == PRIM_DOUBLE)
- {
- fieldPtr->j = value.j;
- } else {
- fieldPtr->i = value.i;
+ if (field != NULL) {
+ setFieldValue(field, obj, &value);
}
-
RETURN_VOID();
}
@@ -377,7 +634,7 @@
static void Dalvik_java_lang_reflect_Field_getDeclaredAnnotations(
const u4* args, JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
ClassObject* declaringClass = (ClassObject*) args[1];
int slot = args[2];
Field* field;
@@ -398,7 +655,7 @@
static void Dalvik_java_lang_reflect_Field_getSignatureAnnotation(const u4* args,
JValue* pResult)
{
- // ignore thisPtr in args[0]
+ /* ignore thisPtr in args[0] */
ClassObject* declaringClass = (ClassObject*) args[1];
int slot = args[2];
Field* field;
diff --git a/vm/oo/Array.c b/vm/oo/Array.c
index 9749f77..a2f21e6 100644
--- a/vm/oo/Array.c
+++ b/vm/oo/Array.c
@@ -513,9 +513,9 @@
* Another thread must have loaded the class after we
* started but before we finished. Discard what we've
* done and leave some hints for the GC.
+ *
+ * (Yes, this happens.)
*/
- LOGI("WOW: somebody generated %s simultaneously\n",
- newClass->descriptor);
/* Clean up the class before letting the
* GC get its hands on it.
diff --git a/vm/reflect/Reflect.c b/vm/reflect/Reflect.c
index 7e93f19..912ff47 100644
--- a/vm/reflect/Reflect.c
+++ b/vm/reflect/Reflect.c
@@ -220,8 +220,6 @@
if (clazz == NULL) {
LOGW("Unable to match class for part: '%s'\n", *pSignature);
- dvmClearException(dvmThreadSelf());
- dvmThrowException("Ljava/lang/NoSuchMethodException;", NULL);
}
*pSignature = signature;
return clazz;