| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #ifdef WITH_JIT |
| 17 | |
| 18 | /* |
| 19 | * Target independent portion of Android's Jit |
| 20 | */ |
| 21 | |
| 22 | #include "Dalvik.h" |
| 23 | #include "Jit.h" |
| 24 | |
| Dan Bornstein | df4daaf | 2010-12-01 14:23:44 -0800 | [diff] [blame] | 25 | #include "libdex/DexOpcodes.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 26 | #include <unistd.h> |
| 27 | #include <pthread.h> |
| 28 | #include <sys/time.h> |
| 29 | #include <signal.h> |
| 30 | #include "compiler/Compiler.h" |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 31 | #include "compiler/CompilerUtility.h" |
| 32 | #include "compiler/CompilerIR.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 33 | #include <errno.h> |
| 34 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 35 | #if defined(WITH_SELF_VERIFICATION) |
| 36 | /* Allocate space for per-thread ShadowSpace data structures */ |
| 37 | void* dvmSelfVerificationShadowSpaceAlloc(Thread* self) |
| 38 | { |
| 39 | self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace)); |
| 40 | if (self->shadowSpace == NULL) |
| 41 | return NULL; |
| 42 | |
| 43 | self->shadowSpace->registerSpaceSize = REG_SPACE; |
| 44 | self->shadowSpace->registerSpace = |
| 45 | (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int)); |
| 46 | |
| 47 | return self->shadowSpace->registerSpace; |
| 48 | } |
| 49 | |
| 50 | /* Free per-thread ShadowSpace data structures */ |
| 51 | void dvmSelfVerificationShadowSpaceFree(Thread* self) |
| 52 | { |
| 53 | free(self->shadowSpace->registerSpace); |
| 54 | free(self->shadowSpace); |
| 55 | } |
| 56 | |
| 57 | /* |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 58 | * Save out PC, FP, thread state, and registers to shadow space. |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 59 | * Return a pointer to the shadow space for JIT to use. |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 60 | * |
| 61 | * The set of saved state from the Thread structure is: |
| 62 | * pc (Dalvik PC) |
| 63 | * fp (Dalvik FP) |
| 64 | * retval |
| 65 | * method |
| 66 | * methodClassDex |
| 67 | * interpStackEnd |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 68 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 69 | void* dvmSelfVerificationSaveState(const u2* pc, u4* fp, |
| 70 | Thread* self, int targetTrace) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 71 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 72 | ShadowSpace *shadowSpace = self->shadowSpace; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 73 | unsigned preBytes = self->interpSave.method->outsSize*4 + |
| 74 | sizeof(StackSaveArea); |
| 75 | unsigned postBytes = self->interpSave.method->registersSize*4; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 76 | |
| 77 | //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x", |
| 78 | // self->threadId, (int)pc, (int)fp); |
| 79 | |
| 80 | if (shadowSpace->selfVerificationState != kSVSIdle) { |
| 81 | LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d", |
| 82 | self->threadId, shadowSpace->selfVerificationState); |
| 83 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 84 | LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 85 | } |
| 86 | shadowSpace->selfVerificationState = kSVSStart; |
| 87 | |
| 88 | // Dynamically grow shadow register space if necessary |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 89 | if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 90 | free(shadowSpace->registerSpace); |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 91 | shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 92 | shadowSpace->registerSpace = |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 93 | (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | // Remember original state |
| 97 | shadowSpace->startPC = pc; |
| 98 | shadowSpace->fp = fp; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 99 | shadowSpace->retval = self->retval; |
| 100 | shadowSpace->interpStackEnd = self->interpStackEnd; |
| 101 | |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 102 | /* |
| 103 | * Store the original method here in case the trace ends with a |
| 104 | * return/invoke, the last method. |
| 105 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 106 | shadowSpace->method = self->interpSave.method; |
| 107 | shadowSpace->methodClassDex = self->interpSave.methodClassDex; |
| 108 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 109 | shadowSpace->shadowFP = shadowSpace->registerSpace + |
| 110 | shadowSpace->registerSpaceSize - postBytes/4; |
| 111 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 112 | self->interpSave.fp = (u4*)shadowSpace->shadowFP; |
| 113 | self->interpStackEnd = (u1*)shadowSpace->registerSpace; |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 114 | self->curFrame = self->interpSave.fp; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 115 | |
| 116 | // Create a copy of the stack |
| 117 | memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes, |
| 118 | preBytes+postBytes); |
| 119 | |
| 120 | // Setup the shadowed heap space |
| 121 | shadowSpace->heapSpaceTail = shadowSpace->heapSpace; |
| 122 | |
| 123 | // Reset trace length |
| 124 | shadowSpace->traceLength = 0; |
| 125 | |
| 126 | return shadowSpace; |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * Save ending PC, FP and compiled code exit point to shadow space. |
| 131 | * Return a pointer to the shadow space for JIT to restore state. |
| 132 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 133 | void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp, |
| 134 | SelfVerificationState exitState, |
| 135 | Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 136 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 137 | ShadowSpace *shadowSpace = self->shadowSpace; |
| 138 | shadowSpace->endPC = pc; |
| 139 | shadowSpace->endShadowFP = fp; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 140 | shadowSpace->jitExitState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 141 | |
| 142 | //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x", |
| 143 | // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp, |
| 144 | // (int)pc); |
| 145 | |
| 146 | if (shadowSpace->selfVerificationState != kSVSStart) { |
| 147 | LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d", |
| 148 | self->threadId, shadowSpace->selfVerificationState); |
| 149 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 150 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 151 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 152 | LOGD("Interp FP: 0x%x", (int)shadowSpace->fp); |
| 153 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 154 | (int)shadowSpace->endShadowFP); |
| 155 | } |
| 156 | |
| 157 | // Special case when punting after a single instruction |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 158 | if (exitState == kSVSPunt && pc == shadowSpace->startPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 159 | shadowSpace->selfVerificationState = kSVSIdle; |
| 160 | } else { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 161 | shadowSpace->selfVerificationState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 164 | /* Restore state before returning */ |
| 165 | self->interpSave.pc = shadowSpace->startPC; |
| 166 | self->interpSave.fp = shadowSpace->fp; |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 167 | self->curFrame = self->interpSave.fp; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 168 | self->interpSave.method = shadowSpace->method; |
| 169 | self->interpSave.methodClassDex = shadowSpace->methodClassDex; |
| 170 | self->retval = shadowSpace->retval; |
| 171 | self->interpStackEnd = shadowSpace->interpStackEnd; |
| 172 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 173 | return shadowSpace; |
| 174 | } |
| 175 | |
| 176 | /* Print contents of virtual registers */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 177 | static void selfVerificationPrintRegisters(int* addr, int* addrRef, |
| 178 | int numWords) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 179 | { |
| 180 | int i; |
| 181 | for (i = 0; i < numWords; i++) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 182 | LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : ""); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | |
| 186 | /* Print values maintained in shadowSpace */ |
| 187 | static void selfVerificationDumpState(const u2* pc, Thread* self) |
| 188 | { |
| 189 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 190 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 191 | int frameBytes = (int) shadowSpace->registerSpace + |
| 192 | shadowSpace->registerSpaceSize*4 - |
| 193 | (int) shadowSpace->shadowFP; |
| 194 | int localRegs = 0; |
| 195 | int frameBytes2 = 0; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 196 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 197 | localRegs = (stackSave->method->registersSize - |
| 198 | stackSave->method->insSize)*4; |
| 199 | frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs; |
| 200 | } |
| 201 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 202 | LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 203 | (int)(pc - stackSave->method->insns)); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 204 | LOGD("Class: %s", shadowSpace->method->clazz->descriptor); |
| 205 | LOGD("Method: %s", shadowSpace->method->name); |
| 206 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 207 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 208 | LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 209 | (int)self->curFrame); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 210 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 211 | (int)shadowSpace->endShadowFP); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 212 | LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 213 | localRegs, frameBytes2); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 214 | LOGD("Trace length: %d State: %d", shadowSpace->traceLength, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 215 | shadowSpace->selfVerificationState); |
| 216 | } |
| 217 | |
| 218 | /* Print decoded instructions in the current trace */ |
| 219 | static void selfVerificationDumpTrace(const u2* pc, Thread* self) |
| 220 | { |
| 221 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 222 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 223 | int i, addr, offset; |
| 224 | DecodedInstruction *decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 225 | |
| 226 | LOGD("********** SHADOW TRACE DUMP **********"); |
| 227 | for (i = 0; i < shadowSpace->traceLength; i++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 228 | addr = shadowSpace->trace[i].addr; |
| 229 | offset = (int)((u2*)addr - stackSave->method->insns); |
| 230 | decInsn = &(shadowSpace->trace[i].decInsn); |
| 231 | /* Not properly decoding instruction, some registers may be garbage */ |
| Andy McFadden | c6b25c7 | 2010-06-22 11:01:20 -0700 | [diff] [blame] | 232 | LOGD("0x%x: (0x%04x) %s", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 233 | addr, offset, dexGetOpcodeName(decInsn->opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 234 | } |
| 235 | } |
| 236 | |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 237 | /* Code is forced into this spin loop when a divergence is detected */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 238 | static void selfVerificationSpinLoop(ShadowSpace *shadowSpace) |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 239 | { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 240 | const u2 *startPC = shadowSpace->startPC; |
| Ben Cheng | 88a0f97 | 2010-02-24 15:00:40 -0800 | [diff] [blame] | 241 | JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 242 | if (desc) { |
| 243 | dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc); |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 244 | /* |
| 245 | * This function effectively terminates the VM right here, so not |
| 246 | * freeing the desc pointer when the enqueuing fails is acceptable. |
| 247 | */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 248 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 249 | gDvmJit.selfVerificationSpin = true; |
| 250 | while(gDvmJit.selfVerificationSpin) sleep(10); |
| 251 | } |
| 252 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 253 | /* |
| 254 | * If here, we're re-interpreting an instruction that was included |
| 255 | * in a trace that was just executed. This routine is called for |
| 256 | * each instruction in the original trace, and compares state |
| 257 | * when it reaches the end point. |
| 258 | * |
| 259 | * TUNING: the interpretation mechanism now supports a counted |
| 260 | * single-step mechanism. If we were to associate an instruction |
| 261 | * count with each trace exit, we could just single-step the right |
| 262 | * number of cycles and then compare. This would improve detection |
| 263 | * of control divergences, as well as (slightly) simplify this code. |
| 264 | */ |
| 265 | void dvmCheckSelfVerification(const u2* pc, Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 266 | { |
| 267 | ShadowSpace *shadowSpace = self->shadowSpace; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 268 | SelfVerificationState state = shadowSpace->selfVerificationState; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 269 | |
| 270 | DecodedInstruction decInsn; |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 271 | dexDecodeInstruction(pc, &decInsn); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 272 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 273 | //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s", |
| 274 | // self->threadId, (int)pc, (int)shadowSpace->endPC, state, |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 275 | // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 276 | |
| 277 | if (state == kSVSIdle || state == kSVSStart) { |
| 278 | LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d", |
| 279 | self->threadId, state); |
| 280 | selfVerificationDumpState(pc, self); |
| 281 | selfVerificationDumpTrace(pc, self); |
| 282 | } |
| 283 | |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 284 | /* |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 285 | * Generalize the self verification state to kSVSDebugInterp unless the |
| 286 | * entry reason is kSVSBackwardBranch or kSVSSingleStep. |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 287 | */ |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 288 | if (state != kSVSBackwardBranch && state != kSVSSingleStep) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 289 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 290 | } |
| 291 | |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 292 | /* |
| 293 | * Check that the current pc is the end of the trace when at least one |
| 294 | * instruction is interpreted. |
| 295 | */ |
| 296 | if ((state == kSVSDebugInterp || state == kSVSSingleStep || |
| 297 | state == kSVSBackwardBranch) && |
| 298 | shadowSpace->traceLength != 0 && |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 299 | pc == shadowSpace->endPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 300 | |
| 301 | shadowSpace->selfVerificationState = kSVSIdle; |
| 302 | |
| 303 | /* Check register space */ |
| 304 | int frameBytes = (int) shadowSpace->registerSpace + |
| 305 | shadowSpace->registerSpaceSize*4 - |
| 306 | (int) shadowSpace->shadowFP; |
| 307 | if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) { |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 308 | if (state == kSVSBackwardBranch) { |
| 309 | /* State mismatch on backward branch - try one more iteration */ |
| 310 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 311 | goto log_and_continue; |
| 312 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 313 | LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 314 | selfVerificationDumpState(pc, self); |
| 315 | selfVerificationDumpTrace(pc, self); |
| 316 | LOGD("*** Interp Registers: addr: 0x%x bytes: %d", |
| 317 | (int)shadowSpace->fp, frameBytes); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 318 | selfVerificationPrintRegisters((int*)shadowSpace->fp, |
| 319 | (int*)shadowSpace->shadowFP, |
| 320 | frameBytes/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 321 | LOGD("*** Shadow Registers: addr: 0x%x bytes: %d", |
| 322 | (int)shadowSpace->shadowFP, frameBytes); |
| 323 | selfVerificationPrintRegisters((int*)shadowSpace->shadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 324 | (int*)shadowSpace->fp, |
| 325 | frameBytes/4); |
| 326 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 327 | } |
| 328 | /* Check new frame if it exists (invokes only) */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 329 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 330 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 331 | int localRegs = (stackSave->method->registersSize - |
| 332 | stackSave->method->insSize)*4; |
| 333 | int frameBytes2 = (int) shadowSpace->fp - |
| 334 | (int) self->curFrame - localRegs; |
| 335 | if (memcmp(((char*)self->curFrame)+localRegs, |
| 336 | ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) { |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 337 | if (state == kSVSBackwardBranch) { |
| 338 | /* |
| 339 | * State mismatch on backward branch - try one more |
| 340 | * iteration. |
| 341 | */ |
| 342 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 343 | goto log_and_continue; |
| 344 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 345 | LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 346 | self->threadId); |
| 347 | selfVerificationDumpState(pc, self); |
| 348 | selfVerificationDumpTrace(pc, self); |
| 349 | LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d", |
| 350 | (int)self->curFrame, localRegs, frameBytes2); |
| 351 | selfVerificationPrintRegisters((int*)self->curFrame, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 352 | (int*)shadowSpace->endShadowFP, |
| 353 | (frameBytes2+localRegs)/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 354 | LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d", |
| 355 | (int)shadowSpace->endShadowFP, localRegs, frameBytes2); |
| 356 | selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 357 | (int*)self->curFrame, |
| 358 | (frameBytes2+localRegs)/4); |
| 359 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 360 | } |
| 361 | } |
| 362 | |
| 363 | /* Check memory space */ |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 364 | bool memDiff = false; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 365 | ShadowHeap* heapSpacePtr; |
| 366 | for (heapSpacePtr = shadowSpace->heapSpace; |
| 367 | heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 368 | int memData = *((unsigned int*) heapSpacePtr->addr); |
| 369 | if (heapSpacePtr->data != memData) { |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 370 | if (state == kSVSBackwardBranch) { |
| 371 | /* |
| 372 | * State mismatch on backward branch - try one more |
| 373 | * iteration. |
| 374 | */ |
| 375 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 376 | goto log_and_continue; |
| 377 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 378 | LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId); |
| 379 | LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x", |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 380 | heapSpacePtr->addr, memData, heapSpacePtr->data); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 381 | selfVerificationDumpState(pc, self); |
| 382 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 383 | memDiff = true; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 384 | } |
| 385 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 386 | if (memDiff) selfVerificationSpinLoop(shadowSpace); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 387 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 388 | |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 389 | /* |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 390 | * Success. If this shadowed trace included a single-stepped |
| 391 | * instruction, we need to stay in the interpreter for one |
| 392 | * more interpretation before resuming. |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 393 | */ |
| 394 | if (state == kSVSSingleStep) { |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 395 | assert(self->jitResumeNPC != NULL); |
| 396 | assert(self->singleStepCount == 0); |
| 397 | self->singleStepCount = 1; |
| 398 | dvmUpdateInterpBreak(self, kInterpSingleStep, kSubModeNormal, |
| 399 | true /* enable */); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 400 | } |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 401 | |
| 402 | /* |
| 403 | * Switch off shadow replay mode. The next shadowed trace |
| 404 | * execution will turn it back on. |
| 405 | */ |
| 406 | dvmUpdateInterpBreak(self, kInterpJitBreak, kSubModeJitSV, |
| 407 | false /* disable */); |
| 408 | self->jitState = kJitDone; |
| 409 | return; |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 410 | } |
| 411 | log_and_continue: |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 412 | /* If end not been reached, make sure max length not exceeded */ |
| Ben Cheng | f04b62a | 2011-04-13 17:08:29 -0700 | [diff] [blame] | 413 | if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 414 | LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 415 | LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 416 | (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc); |
| 417 | selfVerificationDumpState(pc, self); |
| 418 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 419 | selfVerificationSpinLoop(shadowSpace); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 420 | return; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 421 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 422 | /* Log the instruction address and decoded instruction for debug */ |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 423 | shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 424 | shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 425 | shadowSpace->traceLength++; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 426 | } |
| 427 | #endif |
| 428 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 429 | /* |
| 430 | * If one of our fixed tables or the translation buffer fills up, |
| 431 | * call this routine to avoid wasting cycles on future translation requests. |
| 432 | */ |
| 433 | void dvmJitStopTranslationRequests() |
| 434 | { |
| 435 | /* |
| 436 | * Note 1: This won't necessarily stop all translation requests, and |
| 437 | * operates on a delayed mechanism. Running threads look to the copy |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 438 | * of this value in their private thread structures and won't see |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 439 | * this change until it is refreshed (which happens on interpreter |
| 440 | * entry). |
| 441 | * Note 2: This is a one-shot memory leak on this table. Because this is a |
| 442 | * permanent off switch for Jit profiling, it is a one-time leak of 1K |
| 443 | * bytes, and no further attempt will be made to re-allocate it. Can't |
| 444 | * free it because some thread may be holding a reference. |
| 445 | */ |
| Bill Buzbee | b1d8044 | 2009-12-17 14:55:21 -0800 | [diff] [blame] | 446 | gDvmJit.pProfTable = NULL; |
| buzbee | 99e3e6e | 2011-03-29 10:26:07 -0700 | [diff] [blame] | 447 | dvmJitUpdateThreadStateAll(); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 448 | } |
| 449 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 450 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 451 | /* Convenience function to increment counter from assembly code */ |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 452 | void dvmBumpNoChain(int from) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 453 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 454 | gDvmJit.noChainExit[from]++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | /* Convenience function to increment counter from assembly code */ |
| 458 | void dvmBumpNormal() |
| 459 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 460 | gDvmJit.normalExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | /* Convenience function to increment counter from assembly code */ |
| 464 | void dvmBumpPunt(int from) |
| 465 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 466 | gDvmJit.puntExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 467 | } |
| 468 | #endif |
| 469 | |
| 470 | /* Dumps debugging & tuning stats to the log */ |
| 471 | void dvmJitStats() |
| 472 | { |
| 473 | int i; |
| 474 | int hit; |
| 475 | int not_hit; |
| 476 | int chains; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 477 | int stubs; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 478 | if (gDvmJit.pJitEntryTable) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 479 | for (i=0, stubs=chains=hit=not_hit=0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 480 | i < (int) gDvmJit.jitTableSize; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 481 | i++) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 482 | if (gDvmJit.pJitEntryTable[i].dPC != 0) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 483 | hit++; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 484 | if (gDvmJit.pJitEntryTable[i].codeAddress == |
| Bill Buzbee | bd04724 | 2010-05-13 13:02:53 -0700 | [diff] [blame] | 485 | dvmCompilerGetInterpretTemplate()) |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 486 | stubs++; |
| 487 | } else |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 488 | not_hit++; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 489 | if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 490 | chains++; |
| 491 | } |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 492 | LOGD("JIT: table size is %d, entries used is %d", |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 493 | gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed); |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 494 | LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s", |
| 495 | hit, not_hit + hit, chains, gDvmJit.threshold, |
| 496 | gDvmJit.blockingMode ? "Blocking" : "Non-blocking"); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 497 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 498 | #if defined(WITH_JIT_TUNING) |
| 499 | LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches); |
| 500 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 501 | LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt", |
| 502 | gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound, |
| 503 | gDvmJit.normalExit, gDvmJit.puntExit); |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 504 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 505 | LOGD("JIT: ICHits: %d", gDvmICHitCount); |
| 506 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 507 | LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, " |
| 508 | "%d switch overflow", |
| 509 | gDvmJit.noChainExit[kInlineCacheMiss], |
| 510 | gDvmJit.noChainExit[kCallsiteInterpreted], |
| 511 | gDvmJit.noChainExit[kSwitchOverflow]); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 512 | |
| Ben Cheng | b88ec3c | 2010-05-17 12:50:33 -0700 | [diff] [blame] | 513 | LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, " |
| 514 | "%d dropped", |
| 515 | gDvmJit.icPatchInit, gDvmJit.icPatchRejected, |
| 516 | gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued, |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 517 | gDvmJit.icPatchDropped); |
| 518 | |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 519 | LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return", |
| 520 | gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic, |
| 521 | gDvmJit.invokeNative, gDvmJit.returnOp); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 522 | LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter", |
| 523 | gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined, |
| 524 | gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 525 | LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000); |
| 526 | LOGD("JIT: Avg unit compilation time: %llu us", |
| Andy McFadden | b7a797d | 2011-02-24 16:55:40 -0800 | [diff] [blame] | 527 | gDvmJit.numCompilations == 0 ? 0 : |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 528 | gDvmJit.jitTime / gDvmJit.numCompilations); |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 529 | LOGD("JIT: Potential GC blocked by compiler: max %llu us / " |
| 530 | "avg %llu us (%d)", |
| 531 | gDvmJit.maxCompilerThreadBlockGCTime, |
| 532 | gDvmJit.numCompilerThreadBlockGC == 0 ? |
| 533 | 0 : gDvmJit.compilerThreadBlockGCTime / |
| 534 | gDvmJit.numCompilerThreadBlockGC, |
| 535 | gDvmJit.numCompilerThreadBlockGC); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 536 | #endif |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 537 | |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 538 | LOGD("JIT: %d Translation chains, %d interp stubs", |
| 539 | gDvmJit.translationChains, stubs); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 540 | if (gDvmJit.profileMode == kTraceProfilingContinuous) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 541 | dvmCompilerSortAndPrintTraceProfiles(); |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 542 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 543 | } |
| 544 | } |
| 545 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 546 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 547 | /* End current trace now & don't include current instruction */ |
| 548 | void dvmJitEndTraceSelect(Thread* self, const u2* dPC) |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 549 | { |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 550 | if (self->jitState == kJitTSelect) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 551 | self->jitState = kJitTSelectEnd; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 552 | } |
| 553 | if (self->jitState == kJitTSelectEnd) { |
| 554 | // Clean up and finish now. |
| 555 | dvmCheckJit(dPC, self); |
| 556 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 557 | } |
| 558 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 559 | /* |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 560 | * Find an entry in the JitTable, creating if necessary. |
| 561 | * Returns null if table is full. |
| 562 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 563 | static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked, |
| 564 | bool isMethodEntry) |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 565 | { |
| 566 | u4 chainEndMarker = gDvmJit.jitTableSize; |
| 567 | u4 idx = dvmJitHash(dPC); |
| 568 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 569 | /* |
| 570 | * Walk the bucket chain to find an exact match for our PC and trace/method |
| 571 | * type |
| 572 | */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 573 | while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) && |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 574 | ((gDvmJit.pJitEntryTable[idx].dPC != dPC) || |
| 575 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != |
| 576 | isMethodEntry))) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 577 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 578 | } |
| 579 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 580 | if (gDvmJit.pJitEntryTable[idx].dPC != dPC || |
| 581 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 582 | /* |
| 583 | * No match. Aquire jitTableLock and find the last |
| 584 | * slot in the chain. Possibly continue the chain walk in case |
| 585 | * some other thread allocated the slot we were looking |
| 586 | * at previuosly (perhaps even the dPC we're trying to enter). |
| 587 | */ |
| 588 | if (!callerLocked) |
| 589 | dvmLockMutex(&gDvmJit.tableLock); |
| 590 | /* |
| 591 | * At this point, if .dPC is NULL, then the slot we're |
| 592 | * looking at is the target slot from the primary hash |
| 593 | * (the simple, and common case). Otherwise we're going |
| 594 | * to have to find a free slot and chain it. |
| 595 | */ |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 596 | ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 597 | if (gDvmJit.pJitEntryTable[idx].dPC != NULL) { |
| 598 | u4 prev; |
| 599 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 600 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 601 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 602 | isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 603 | /* Another thread got there first for this dPC */ |
| 604 | if (!callerLocked) |
| 605 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 606 | return &gDvmJit.pJitEntryTable[idx]; |
| 607 | } |
| 608 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 609 | } |
| 610 | /* Here, idx should be pointing to the last cell of an |
| 611 | * active chain whose last member contains a valid dPC */ |
| 612 | assert(gDvmJit.pJitEntryTable[idx].dPC != NULL); |
| 613 | /* Linear walk to find a free cell and add it to the end */ |
| 614 | prev = idx; |
| 615 | while (true) { |
| 616 | idx++; |
| 617 | if (idx == chainEndMarker) |
| 618 | idx = 0; /* Wraparound */ |
| 619 | if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) || |
| 620 | (idx == prev)) |
| 621 | break; |
| 622 | } |
| 623 | if (idx != prev) { |
| 624 | JitEntryInfoUnion oldValue; |
| 625 | JitEntryInfoUnion newValue; |
| 626 | /* |
| 627 | * Although we hold the lock so that noone else will |
| 628 | * be trying to update a chain field, the other fields |
| 629 | * packed into the word may be in use by other threads. |
| 630 | */ |
| 631 | do { |
| 632 | oldValue = gDvmJit.pJitEntryTable[prev].u; |
| 633 | newValue = oldValue; |
| 634 | newValue.info.chain = idx; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 635 | } while (android_atomic_release_cas(oldValue.infoWord, |
| 636 | newValue.infoWord, |
| 637 | &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 638 | } |
| 639 | } |
| 640 | if (gDvmJit.pJitEntryTable[idx].dPC == NULL) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 641 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 642 | /* |
| 643 | * Initialize codeAddress and allocate the slot. Must |
| 644 | * happen in this order (since dPC is set, the entry is live. |
| 645 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 646 | android_atomic_release_store((int32_t)dPC, |
| 647 | (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 648 | gDvmJit.pJitEntryTable[idx].dPC = dPC; |
| 649 | gDvmJit.jitTableEntriesUsed++; |
| 650 | } else { |
| 651 | /* Table is full */ |
| 652 | idx = chainEndMarker; |
| 653 | } |
| 654 | if (!callerLocked) |
| 655 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 656 | } |
| 657 | return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx]; |
| 658 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 659 | |
| buzbee | d82cebc | 2011-03-14 12:25:24 -0700 | [diff] [blame] | 660 | /* Dump a trace description */ |
| 661 | void dvmJitDumpTraceDesc(JitTraceDescription *trace) |
| 662 | { |
| 663 | int i; |
| 664 | bool done = false; |
| 665 | const u2* dpc; |
| 666 | const u2* dpcBase; |
| 667 | int curFrag = 0; |
| 668 | LOGD("==========================================="); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 669 | LOGD("Trace dump 0x%x, Method %s off 0x%x",(int)trace, |
| buzbee | d82cebc | 2011-03-14 12:25:24 -0700 | [diff] [blame] | 670 | trace->method->name,trace->trace[curFrag].info.frag.startOffset); |
| 671 | dpcBase = trace->method->insns; |
| 672 | while (!done) { |
| 673 | DecodedInstruction decInsn; |
| 674 | if (trace->trace[curFrag].isCode) { |
| 675 | LOGD("Frag[%d]- Insts: %d, start: 0x%x, hint: 0x%x, end: %d", |
| 676 | curFrag, trace->trace[curFrag].info.frag.numInsts, |
| 677 | trace->trace[curFrag].info.frag.startOffset, |
| 678 | trace->trace[curFrag].info.frag.hint, |
| 679 | trace->trace[curFrag].info.frag.runEnd); |
| 680 | dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset; |
| 681 | for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) { |
| 682 | dexDecodeInstruction(dpc, &decInsn); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 683 | LOGD(" 0x%04x - %s 0x%x",(dpc-dpcBase), |
| 684 | dexGetOpcodeName(decInsn.opcode),(int)dpc); |
| buzbee | d82cebc | 2011-03-14 12:25:24 -0700 | [diff] [blame] | 685 | dpc += dexGetWidthFromOpcode(decInsn.opcode); |
| 686 | } |
| 687 | if (trace->trace[curFrag].info.frag.runEnd) { |
| 688 | done = true; |
| 689 | } |
| 690 | } else { |
| 691 | LOGD("Frag[%d]- META info: 0x%08x", curFrag, |
| 692 | (int)trace->trace[curFrag].info.meta); |
| 693 | } |
| 694 | curFrag++; |
| 695 | } |
| 696 | LOGD("-------------------------------------------"); |
| 697 | } |
| 698 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 699 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 700 | * Append the class ptr of "this" and the current method ptr to the current |
| 701 | * trace. That is, the trace runs will contain the following components: |
| 702 | * + trace run that ends with an invoke (existing entry) |
| 703 | * + thisClass (new) |
| 704 | * + calleeMethod (new) |
| 705 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 706 | static void insertClassMethodInfo(Thread* self, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 707 | const ClassObject* thisClass, |
| 708 | const Method* calleeMethod, |
| 709 | const DecodedInstruction* insn) |
| 710 | { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 711 | int currTraceRun = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 712 | self->trace[currTraceRun].info.meta = thisClass ? |
| 713 | (void *) thisClass->descriptor : NULL; |
| 714 | self->trace[currTraceRun].isCode = false; |
| 715 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 716 | currTraceRun = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 717 | self->trace[currTraceRun].info.meta = thisClass ? |
| 718 | (void *) thisClass->classLoader : NULL; |
| 719 | self->trace[currTraceRun].isCode = false; |
| 720 | |
| 721 | currTraceRun = ++self->currTraceRun; |
| 722 | self->trace[currTraceRun].info.meta = (void *) calleeMethod; |
| 723 | self->trace[currTraceRun].isCode = false; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | /* |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 727 | * Check if the next instruction following the invoke is a move-result and if |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 728 | * so add it to the trace. That is, this will add the trace run that includes |
| 729 | * the move-result to the trace list. |
| 730 | * |
| 731 | * + trace run that ends with an invoke (existing entry) |
| 732 | * + thisClass (existing entry) |
| 733 | * + calleeMethod (existing entry) |
| 734 | * + move result (new) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 735 | * |
| 736 | * lastPC, len, offset are all from the preceding invoke instruction |
| 737 | */ |
| 738 | static void insertMoveResult(const u2 *lastPC, int len, int offset, |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 739 | Thread *self) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 740 | { |
| 741 | DecodedInstruction nextDecInsn; |
| 742 | const u2 *moveResultPC = lastPC + len; |
| 743 | |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 744 | dexDecodeInstruction(moveResultPC, &nextDecInsn); |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 745 | if ((nextDecInsn.opcode != OP_MOVE_RESULT) && |
| 746 | (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) && |
| 747 | (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT)) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 748 | return; |
| 749 | |
| 750 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 751 | int currTraceRun = ++self->currTraceRun; |
| 752 | self->currRunHead = moveResultPC; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 753 | self->trace[currTraceRun].info.frag.startOffset = offset + len; |
| 754 | self->trace[currTraceRun].info.frag.numInsts = 1; |
| 755 | self->trace[currTraceRun].info.frag.runEnd = false; |
| 756 | self->trace[currTraceRun].info.frag.hint = kJitHintNone; |
| 757 | self->trace[currTraceRun].isCode = true; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 758 | self->totalTraceLen++; |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 759 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 760 | self->currRunLen = dexGetWidthFromInstruction(moveResultPC); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 761 | } |
| 762 | |
| 763 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 764 | * Adds to the current trace request one instruction at a time, just |
| 765 | * before that instruction is interpreted. This is the primary trace |
| 766 | * selection function. NOTE: return instruction are handled a little |
| 767 | * differently. In general, instructions are "proposed" to be added |
| 768 | * to the current trace prior to interpretation. If the interpreter |
| 769 | * then successfully completes the instruction, is will be considered |
| 770 | * part of the request. This allows us to examine machine state prior |
| 771 | * to interpretation, and also abort the trace request if the instruction |
| 772 | * throws or does something unexpected. However, return instructions |
| 773 | * will cause an immediate end to the translation request - which will |
| 774 | * be passed to the compiler before the return completes. This is done |
| 775 | * in response to special handling of returns by the interpreter (and |
| 776 | * because returns cannot throw in a way that causes problems for the |
| 777 | * translated code. |
| 778 | */ |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 779 | void dvmCheckJit(const u2* pc, Thread* self) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 780 | { |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 781 | const ClassObject *thisClass = self->callsiteClass; |
| 782 | const Method* curMethod = self->methodToCall; |
| Carl Shapiro | e3c01da | 2010-05-20 22:54:18 -0700 | [diff] [blame] | 783 | int flags, len; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 784 | int allDone = false; |
| 785 | /* Stay in break/single-stop mode for the next instruction */ |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 786 | bool stayOneMoreInst = false; |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 787 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 788 | /* Prepare to handle last PC and stage the current PC & method*/ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 789 | const u2 *lastPC = self->lastPC; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 790 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 791 | self->lastPC = pc; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 792 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 793 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 794 | int offset; |
| 795 | DecodedInstruction decInsn; |
| 796 | case kJitTSelect: |
| Ben Cheng | dc84bb2 | 2009-10-02 12:58:52 -0700 | [diff] [blame] | 797 | /* First instruction - just remember the PC and exit */ |
| 798 | if (lastPC == NULL) break; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 799 | /* Grow the trace around the last PC if jitState is kJitTSelect */ |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 800 | dexDecodeInstruction(lastPC, &decInsn); |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 801 | |
| 802 | /* |
| 803 | * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due |
| 804 | * to the amount of space it takes to generate the chaining |
| 805 | * cells. |
| 806 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 807 | if (self->totalTraceLen != 0 && |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 808 | (decInsn.opcode == OP_PACKED_SWITCH || |
| 809 | decInsn.opcode == OP_SPARSE_SWITCH)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 810 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 811 | break; |
| 812 | } |
| 813 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 814 | #if defined(SHOW_TRACE) |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 815 | LOGD("TraceGen: adding %s. lpc:0x%x, pc:0x%x", |
| 816 | dexGetOpcodeName(decInsn.opcode), (int)lastPC, (int)pc); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 817 | #endif |
| Dan Bornstein | e485276 | 2010-12-02 12:45:00 -0800 | [diff] [blame] | 818 | flags = dexGetFlagsFromOpcode(decInsn.opcode); |
| 819 | len = dexGetWidthFromInstruction(lastPC); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 820 | offset = lastPC - self->traceMethod->insns; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 821 | assert((unsigned) offset < |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 822 | dvmGetMethodInsnsSize(self->traceMethod)); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 823 | if (lastPC != self->currRunHead + self->currRunLen) { |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 824 | int currTraceRun; |
| 825 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 826 | currTraceRun = ++self->currTraceRun; |
| 827 | self->currRunLen = 0; |
| 828 | self->currRunHead = (u2*)lastPC; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 829 | self->trace[currTraceRun].info.frag.startOffset = offset; |
| 830 | self->trace[currTraceRun].info.frag.numInsts = 0; |
| 831 | self->trace[currTraceRun].info.frag.runEnd = false; |
| 832 | self->trace[currTraceRun].info.frag.hint = kJitHintNone; |
| 833 | self->trace[currTraceRun].isCode = true; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 834 | } |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 835 | self->trace[self->currTraceRun].info.frag.numInsts++; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 836 | self->totalTraceLen++; |
| 837 | self->currRunLen += len; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 838 | |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 839 | /* |
| 840 | * If the last instruction is an invoke, we will try to sneak in |
| 841 | * the move-result* (if existent) into a separate trace run. |
| 842 | */ |
| Carl Shapiro | 1813ab2 | 2011-04-15 15:48:54 -0700 | [diff] [blame^] | 843 | { |
| 844 | int needReservedRun = (flags & kInstrInvoke) ? 1 : 0; |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 845 | |
| Carl Shapiro | 1813ab2 | 2011-04-15 15:48:54 -0700 | [diff] [blame^] | 846 | /* Will probably never hit this with the current trace builder */ |
| 847 | if (self->currTraceRun == |
| 848 | (MAX_JIT_RUN_LEN - 1 - needReservedRun)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 849 | self->jitState = kJitTSelectEnd; |
| Carl Shapiro | 1813ab2 | 2011-04-15 15:48:54 -0700 | [diff] [blame^] | 850 | } |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 851 | } |
| 852 | |
| Dan Bornstein | c2b486f | 2010-11-12 16:07:16 -0800 | [diff] [blame] | 853 | if (!dexIsGoto(flags) && |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 854 | ((flags & (kInstrCanBranch | |
| 855 | kInstrCanSwitch | |
| 856 | kInstrCanReturn | |
| 857 | kInstrInvoke)) != 0)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 858 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 859 | #if defined(SHOW_TRACE) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 860 | LOGD("TraceGen: ending on %s, basic block end", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 861 | dexGetOpcodeName(decInsn.opcode)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 862 | #endif |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 863 | |
| 864 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 865 | * If the current invoke is a {virtual,interface}, get the |
| 866 | * current class/method pair into the trace as well. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 867 | * If the next instruction is a variant of move-result, insert |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 868 | * it to the trace too. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 869 | */ |
| 870 | if (flags & kInstrInvoke) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 871 | insertClassMethodInfo(self, thisClass, curMethod, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 872 | &decInsn); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 873 | insertMoveResult(lastPC, len, offset, self); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 874 | } |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 875 | } |
| Bill Buzbee | 2ce8a6c | 2009-12-03 15:09:32 -0800 | [diff] [blame] | 876 | /* Break on throw or self-loop */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 877 | if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 878 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 879 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 880 | if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) { |
| 881 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 882 | } |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 883 | if ((flags & kInstrCanReturn) != kInstrCanReturn) { |
| 884 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 885 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 886 | else { |
| 887 | /* |
| 888 | * Last instruction is a return - stay in the dbg interpreter |
| 889 | * for one more instruction if it is a non-void return, since |
| 890 | * we don't want to start a trace with move-result as the first |
| 891 | * instruction (which is already included in the trace |
| 892 | * containing the invoke. |
| 893 | */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 894 | if (decInsn.opcode != OP_RETURN_VOID) { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 895 | stayOneMoreInst = true; |
| 896 | } |
| 897 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 898 | /* NOTE: intentional fallthrough for returns */ |
| 899 | case kJitTSelectEnd: |
| 900 | { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 901 | /* Empty trace - set to bail to interpreter */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 902 | if (self->totalTraceLen == 0) { |
| 903 | dvmJitSetCodeAddr(self->currTraceHead, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 904 | dvmCompilerGetInterpretTemplate(), |
| 905 | dvmCompilerGetInterpretTemplateSet(), |
| 906 | false /* Not method entry */, 0); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 907 | self->jitState = kJitDone; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 908 | allDone = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 909 | break; |
| 910 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 911 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 912 | int lastTraceDesc = self->currTraceRun; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 913 | |
| 914 | /* Extend a new empty desc if the last slot is meta info */ |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 915 | if (!self->trace[lastTraceDesc].isCode) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 916 | lastTraceDesc = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 917 | self->trace[lastTraceDesc].info.frag.startOffset = 0; |
| 918 | self->trace[lastTraceDesc].info.frag.numInsts = 0; |
| 919 | self->trace[lastTraceDesc].info.frag.hint = kJitHintNone; |
| 920 | self->trace[lastTraceDesc].isCode = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 921 | } |
| 922 | |
| 923 | /* Mark the end of the trace runs */ |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 924 | self->trace[lastTraceDesc].info.frag.runEnd = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 925 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 926 | JitTraceDescription* desc = |
| 927 | (JitTraceDescription*)malloc(sizeof(JitTraceDescription) + |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 928 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 929 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 930 | if (desc == NULL) { |
| 931 | LOGE("Out of memory in trace selection"); |
| 932 | dvmJitStopTranslationRequests(); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 933 | self->jitState = kJitDone; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 934 | allDone = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 935 | break; |
| 936 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 937 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 938 | desc->method = self->traceMethod; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 939 | memcpy((char*)&(desc->trace[0]), |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 940 | (char*)&(self->trace[0]), |
| 941 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 942 | #if defined(SHOW_TRACE) |
| 943 | LOGD("TraceGen: trace done, adding to queue"); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 944 | dvmJitDumpTraceDesc(desc); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 945 | #endif |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 946 | if (dvmCompilerWorkEnqueue( |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 947 | self->currTraceHead,kWorkOrderTrace,desc)) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 948 | /* Work order successfully enqueued */ |
| 949 | if (gDvmJit.blockingMode) { |
| 950 | dvmCompilerDrainQueue(); |
| 951 | } |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 952 | } else { |
| 953 | /* |
| 954 | * Make sure the descriptor for the abandoned work order is |
| 955 | * freed. |
| 956 | */ |
| 957 | free(desc); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 958 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 959 | self->jitState = kJitDone; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 960 | allDone = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 961 | } |
| 962 | break; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 963 | case kJitDone: |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 964 | allDone = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 965 | break; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 966 | case kJitNot: |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 967 | allDone = true; |
| Ben Cheng | ed79ff0 | 2009-10-13 13:26:40 -0700 | [diff] [blame] | 968 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 969 | default: |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 970 | LOGE("Unexpected JIT state: %d", self->jitState); |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 971 | dvmAbort(); |
| Ben Cheng | 9c147b8 | 2009-10-07 16:41:46 -0700 | [diff] [blame] | 972 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 973 | } |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 974 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 975 | /* |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 976 | * If we're done with trace selection, switch off the control flags. |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 977 | */ |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 978 | if (allDone) { |
| 979 | dvmUpdateInterpBreak(self, kInterpJitBreak, |
| 980 | kSubModeJitTraceBuild, false); |
| 981 | if (stayOneMoreInst) { |
| 982 | // Keep going in single-step mode for at least one more inst |
| 983 | assert(self->jitResumeNPC == NULL); |
| 984 | self->singleStepCount = MIN(1, self->singleStepCount); |
| 985 | dvmUpdateInterpBreak(self, kInterpSingleStep, kSubModeNormal, |
| 986 | true /* enable */); |
| 987 | } |
| 988 | } |
| 989 | return; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 990 | } |
| 991 | |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 992 | JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 993 | { |
| 994 | int idx = dvmJitHash(pc); |
| 995 | |
| 996 | /* Expect a high hit rate on 1st shot */ |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 997 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 998 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 999 | return &gDvmJit.pJitEntryTable[idx]; |
| 1000 | else { |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1001 | int chainEndMarker = gDvmJit.jitTableSize; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1002 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 1003 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1004 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 1005 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 1006 | isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1007 | return &gDvmJit.pJitEntryTable[idx]; |
| 1008 | } |
| 1009 | } |
| 1010 | return NULL; |
| 1011 | } |
| 1012 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1013 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1014 | * Walk through the JIT profile table and find the corresponding JIT code, in |
| 1015 | * the specified format (ie trace vs method). This routine needs to be fast. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1016 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1017 | void* getCodeAddrCommon(const u2* dPC, bool methodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1018 | { |
| 1019 | int idx = dvmJitHash(dPC); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1020 | const u2* pc = gDvmJit.pJitEntryTable[idx].dPC; |
| 1021 | if (pc != NULL) { |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 1022 | bool hideTranslation = dvmJitHideTranslation(); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1023 | if (pc == dPC && |
| 1024 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1025 | int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? |
| 1026 | 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 1027 | intptr_t codeAddress = |
| 1028 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1029 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1030 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1031 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1032 | return hideTranslation || !codeAddress ? NULL : |
| 1033 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1034 | } else { |
| 1035 | int chainEndMarker = gDvmJit.jitTableSize; |
| 1036 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 1037 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1038 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 1039 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 1040 | methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1041 | int offset = (gDvmJit.profileMode >= |
| 1042 | kTraceProfilingContinuous) ? 0 : |
| 1043 | gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 1044 | intptr_t codeAddress = |
| 1045 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1046 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1047 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1048 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1049 | return hideTranslation || !codeAddress ? NULL : |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1050 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1051 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1052 | } |
| 1053 | } |
| 1054 | } |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1055 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1056 | gDvmJit.addrLookupsNotFound++; |
| 1057 | #endif |
| 1058 | return NULL; |
| 1059 | } |
| 1060 | |
| 1061 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1062 | * If a translated code address, in trace format, exists for the davik byte code |
| 1063 | * pointer return it. |
| 1064 | */ |
| 1065 | void* dvmJitGetTraceAddr(const u2* dPC) |
| 1066 | { |
| 1067 | return getCodeAddrCommon(dPC, false /* method entry */); |
| 1068 | } |
| 1069 | |
| 1070 | /* |
| 1071 | * If a translated code address, in whole-method format, exists for the davik |
| 1072 | * byte code pointer return it. |
| 1073 | */ |
| 1074 | void* dvmJitGetMethodAddr(const u2* dPC) |
| 1075 | { |
| 1076 | return getCodeAddrCommon(dPC, true /* method entry */); |
| 1077 | } |
| 1078 | |
| 1079 | /* |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1080 | * Similar to dvmJitGetTraceAddr, but returns null if the calling |
| 1081 | * thread is in a single-step mode. |
| 1082 | */ |
| 1083 | void* dvmJitGetTraceAddrThread(const u2* dPC, Thread* self) |
| 1084 | { |
| 1085 | return (self->interpBreak.ctl.breakFlags != 0) ? NULL : |
| 1086 | getCodeAddrCommon(dPC, false /* method entry */); |
| 1087 | } |
| 1088 | |
| 1089 | /* |
| 1090 | * Similar to dvmJitGetMethodAddr, but returns null if the calling |
| 1091 | * thread is in a single-step mode. |
| 1092 | */ |
| 1093 | void* dvmJitGetMethodAddrThread(const u2* dPC, Thread* self) |
| 1094 | { |
| 1095 | return (self->interpBreak.ctl.breakFlags != 0) ? NULL : |
| 1096 | getCodeAddrCommon(dPC, true /* method entry */); |
| 1097 | } |
| 1098 | |
| 1099 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1100 | * Register the translated code pointer into the JitTable. |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 1101 | * NOTE: Once a codeAddress field transitions from initial state to |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1102 | * JIT'd code, it must not be altered without first halting all |
| buzbee | 5867bea | 2011-04-09 14:47:32 -0700 | [diff] [blame] | 1103 | * threads. We defer the setting of the profile prefix size until |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1104 | * after the new code address is set to ensure that the prefix offset |
| 1105 | * is never applied to the initial interpret-only translation. All |
| 1106 | * translations with non-zero profile prefixes will still be correct |
| 1107 | * if entered as if the profile offset is 0, but the interpret-only |
| 1108 | * template cannot handle a non-zero prefix. |
| buzbee | 5867bea | 2011-04-09 14:47:32 -0700 | [diff] [blame] | 1109 | * NOTE: JitTable must not be in danger of reset while this |
| 1110 | * code is executing. see Issue 4271784 for details. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1111 | */ |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1112 | void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1113 | bool isMethodEntry, int profilePrefixSize) |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1114 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1115 | JitEntryInfoUnion oldValue; |
| 1116 | JitEntryInfoUnion newValue; |
| Ben Cheng | 20d7e6c | 2011-02-18 17:12:42 -0800 | [diff] [blame] | 1117 | /* |
| buzbee | 5867bea | 2011-04-09 14:47:32 -0700 | [diff] [blame] | 1118 | * Get the JitTable slot for this dPC (or create one if JitTable |
| 1119 | * has been reset between the time the trace was requested and |
| 1120 | * now. |
| Ben Cheng | 20d7e6c | 2011-02-18 17:12:42 -0800 | [diff] [blame] | 1121 | */ |
| 1122 | JitEntry *jitEntry = isMethodEntry ? |
| buzbee | 5867bea | 2011-04-09 14:47:32 -0700 | [diff] [blame] | 1123 | lookupAndAdd(dPC, false /* caller holds tableLock */, isMethodEntry) : |
| 1124 | dvmJitFindEntry(dPC, isMethodEntry); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1125 | assert(jitEntry); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1126 | /* Note: order of update is important */ |
| 1127 | do { |
| 1128 | oldValue = jitEntry->u; |
| 1129 | newValue = oldValue; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1130 | newValue.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1131 | newValue.info.instructionSet = set; |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1132 | newValue.info.profileOffset = profilePrefixSize; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 1133 | } while (android_atomic_release_cas( |
| 1134 | oldValue.infoWord, newValue.infoWord, |
| 1135 | &jitEntry->u.infoWord) != 0); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1136 | jitEntry->codeAddress = nPC; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1137 | } |
| 1138 | |
| 1139 | /* |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1140 | * Determine if valid trace-bulding request is active. If so, set |
| 1141 | * the proper flags in interpBreak and return. Trace selection will |
| 1142 | * then begin normally via dvmCheckBefore. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1143 | */ |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1144 | void dvmJitCheckTraceRequest(Thread* self) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1145 | { |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1146 | int i; |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1147 | /* |
| 1148 | * A note on trace "hotness" filtering: |
| 1149 | * |
| 1150 | * Our first level trigger is intentionally loose - we need it to |
| 1151 | * fire easily not just to identify potential traces to compile, but |
| 1152 | * also to allow re-entry into the code cache. |
| 1153 | * |
| 1154 | * The 2nd level filter (done here) exists to be selective about |
| 1155 | * what we actually compile. It works by requiring the same |
| 1156 | * trace head "key" (defined as filterKey below) to appear twice in |
| 1157 | * a relatively short period of time. The difficulty is defining the |
| 1158 | * shape of the filterKey. Unfortunately, there is no "one size fits |
| 1159 | * all" approach. |
| 1160 | * |
| 1161 | * For spiky execution profiles dominated by a smallish |
| 1162 | * number of very hot loops, we would want the second-level filter |
| 1163 | * to be very selective. A good selective filter is requiring an |
| 1164 | * exact match of the Dalvik PC. In other words, defining filterKey as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1165 | * intptr_t filterKey = (intptr_t)self->interpSave.pc |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1166 | * |
| 1167 | * However, for flat execution profiles we do best when aggressively |
| 1168 | * translating. A heuristically decent proxy for this is to use |
| 1169 | * the value of the method pointer containing the trace as the filterKey. |
| 1170 | * Intuitively, this is saying that once any trace in a method appears hot, |
| 1171 | * immediately translate any other trace from that same method that |
| 1172 | * survives the first-level filter. Here, filterKey would be defined as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1173 | * intptr_t filterKey = (intptr_t)self->interpSave.method |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1174 | * |
| 1175 | * The problem is that we can't easily detect whether we're dealing |
| 1176 | * with a spiky or flat profile. If we go with the "pc" match approach, |
| 1177 | * flat profiles perform poorly. If we go with the loose "method" match, |
| 1178 | * we end up generating a lot of useless translations. Probably the |
| 1179 | * best approach in the future will be to retain profile information |
| 1180 | * across runs of each application in order to determine it's profile, |
| 1181 | * and then choose once we have enough history. |
| 1182 | * |
| 1183 | * However, for now we've decided to chose a compromise filter scheme that |
| 1184 | * includes elements of both. The high order bits of the filter key |
| 1185 | * are drawn from the enclosing method, and are combined with a slice |
| 1186 | * of the low-order bits of the Dalvik pc of the trace head. The |
| 1187 | * looseness of the filter can be adjusted by changing with width of |
| 1188 | * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider |
| 1189 | * the slice, the tighter the filter. |
| 1190 | * |
| 1191 | * Note: the fixed shifts in the function below reflect assumed word |
| 1192 | * alignment for method pointers, and half-word alignment of the Dalvik pc. |
| 1193 | * for method pointers and half-word alignment for dalvik pc. |
| 1194 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1195 | u4 methodKey = (u4)self->interpSave.method << |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1196 | (JIT_TRACE_THRESH_FILTER_PC_BITS - 2); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1197 | u4 pcKey = ((u4)self->interpSave.pc >> 1) & |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1198 | ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1); |
| 1199 | intptr_t filterKey = (intptr_t)(methodKey | pcKey); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1200 | |
| 1201 | // Shouldn't be here if already building a trace. |
| 1202 | assert((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)==0); |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1203 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1204 | /* Check if the JIT request can be handled now */ |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1205 | if ((gDvmJit.pJitEntryTable != NULL) && |
| 1206 | ((self->interpBreak.ctl.breakFlags & kInterpSingleStep) == 0)){ |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1207 | /* Bypass the filter for hot trace requests or during stress mode */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1208 | if (self->jitState == kJitTSelectRequest && |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1209 | gDvmJit.threshold > 6) { |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1210 | /* Two-level filtering scheme */ |
| 1211 | for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1212 | if (filterKey == self->threshFilter[i]) { |
| 1213 | self->threshFilter[i] = 0; // Reset filter entry |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1214 | break; |
| 1215 | } |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1216 | } |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1217 | if (i == JIT_TRACE_THRESH_FILTER_SIZE) { |
| 1218 | /* |
| 1219 | * Use random replacement policy - otherwise we could miss a |
| 1220 | * large loop that contains more traces than the size of our |
| 1221 | * filter array. |
| 1222 | */ |
| 1223 | i = rand() % JIT_TRACE_THRESH_FILTER_SIZE; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1224 | self->threshFilter[i] = filterKey; |
| 1225 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1226 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1227 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1228 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1229 | /* If the compiler is backlogged, cancel any JIT actions */ |
| 1230 | if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1231 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1232 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1233 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1234 | /* |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1235 | * Check for additional reasons that might force the trace select |
| 1236 | * request to be dropped |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1237 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1238 | if (self->jitState == kJitTSelectRequest || |
| 1239 | self->jitState == kJitTSelectRequestHot) { |
| 1240 | if (dvmJitFindEntry(self->interpSave.pc, false)) { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1241 | /* In progress - nothing do do */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1242 | self->jitState = kJitDone; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1243 | } else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1244 | JitEntry *slot = lookupAndAdd(self->interpSave.pc, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1245 | false /* lock */, |
| 1246 | false /* method entry */); |
| 1247 | if (slot == NULL) { |
| 1248 | /* |
| 1249 | * Table is full. This should have been |
| 1250 | * detected by the compiler thread and the table |
| 1251 | * resized before we run into it here. Assume bad things |
| 1252 | * are afoot and disable profiling. |
| 1253 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1254 | self->jitState = kJitDone; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1255 | LOGD("JIT: JitTable full, disabling profiling"); |
| 1256 | dvmJitStopTranslationRequests(); |
| 1257 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1258 | } |
| 1259 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1260 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1261 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1262 | case kJitTSelectRequest: |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1263 | case kJitTSelectRequestHot: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1264 | self->jitState = kJitTSelect; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1265 | self->traceMethod = self->interpSave.method; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1266 | self->currTraceHead = self->interpSave.pc; |
| 1267 | self->currTraceRun = 0; |
| 1268 | self->totalTraceLen = 0; |
| 1269 | self->currRunHead = self->interpSave.pc; |
| 1270 | self->currRunLen = 0; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 1271 | self->trace[0].info.frag.startOffset = |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1272 | self->interpSave.pc - self->interpSave.method->insns; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame] | 1273 | self->trace[0].info.frag.numInsts = 0; |
| 1274 | self->trace[0].info.frag.runEnd = false; |
| 1275 | self->trace[0].info.frag.hint = kJitHintNone; |
| 1276 | self->trace[0].isCode = true; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1277 | self->lastPC = 0; |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1278 | /* Turn on trace selection mode */ |
| 1279 | dvmUpdateInterpBreak(self, kInterpJitBreak, |
| 1280 | kSubModeJitTraceBuild, true); |
| 1281 | #if defined(SHOW_TRACE) |
| 1282 | LOGD("Starting trace for %s at 0x%x", |
| 1283 | self->interpSave.method->name, (int)self->interpSave.pc); |
| 1284 | #endif |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1285 | break; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1286 | case kJitDone: |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1287 | break; |
| 1288 | default: |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1289 | LOGE("Unexpected JIT state: %d", self->jitState); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1290 | dvmAbort(); |
| 1291 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1292 | } else { |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1293 | /* Cannot build trace this time */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1294 | self->jitState = kJitDone; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1295 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1296 | } |
| 1297 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1298 | /* |
| 1299 | * Resizes the JitTable. Must be a power of 2, and returns true on failure. |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1300 | * Stops all threads, and thus is a heavyweight operation. May only be called |
| 1301 | * by the compiler thread. |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1302 | */ |
| 1303 | bool dvmJitResizeJitTable( unsigned int size ) |
| 1304 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1305 | JitEntry *pNewTable; |
| 1306 | JitEntry *pOldTable; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1307 | JitEntry tempEntry; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1308 | u4 newMask; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1309 | unsigned int oldSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1310 | unsigned int i; |
| 1311 | |
| Ben Cheng | 3f02aa4 | 2009-08-14 13:52:09 -0700 | [diff] [blame] | 1312 | assert(gDvmJit.pJitEntryTable != NULL); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1313 | assert(size && !(size & (size - 1))); /* Is power of 2? */ |
| 1314 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1315 | LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1316 | |
| 1317 | newMask = size - 1; |
| 1318 | |
| 1319 | if (size <= gDvmJit.jitTableSize) { |
| 1320 | return true; |
| 1321 | } |
| 1322 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1323 | /* Make sure requested size is compatible with chain field width */ |
| 1324 | tempEntry.u.info.chain = size; |
| 1325 | if (tempEntry.u.info.chain != size) { |
| 1326 | LOGD("Jit: JitTable request of %d too big", size); |
| 1327 | return true; |
| 1328 | } |
| 1329 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1330 | pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable)); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1331 | if (pNewTable == NULL) { |
| 1332 | return true; |
| 1333 | } |
| 1334 | for (i=0; i< size; i++) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1335 | pNewTable[i].u.info.chain = size; /* Initialize chain termination */ |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1336 | } |
| 1337 | |
| 1338 | /* Stop all other interpreting/jit'ng threads */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1339 | dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1340 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1341 | pOldTable = gDvmJit.pJitEntryTable; |
| 1342 | oldSize = gDvmJit.jitTableSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1343 | |
| 1344 | dvmLockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1345 | gDvmJit.pJitEntryTable = pNewTable; |
| 1346 | gDvmJit.jitTableSize = size; |
| 1347 | gDvmJit.jitTableMask = size - 1; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1348 | gDvmJit.jitTableEntriesUsed = 0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1349 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1350 | for (i=0; i < oldSize; i++) { |
| 1351 | if (pOldTable[i].dPC) { |
| 1352 | JitEntry *p; |
| 1353 | u2 chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1354 | p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/, |
| 1355 | pOldTable[i].u.info.isMethodEntry); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1356 | p->codeAddress = pOldTable[i].codeAddress; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1357 | /* We need to preserve the new chain field, but copy the rest */ |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1358 | chain = p->u.info.chain; |
| 1359 | p->u = pOldTable[i].u; |
| 1360 | p->u.info.chain = chain; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1361 | } |
| 1362 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1363 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1364 | dvmUnlockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1365 | |
| 1366 | free(pOldTable); |
| 1367 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1368 | /* Restart the world */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1369 | dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1370 | |
| 1371 | return false; |
| 1372 | } |
| 1373 | |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1374 | /* |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1375 | * Reset the JitTable to the initial clean state. |
| 1376 | */ |
| 1377 | void dvmJitResetTable(void) |
| 1378 | { |
| 1379 | JitEntry *jitEntry = gDvmJit.pJitEntryTable; |
| 1380 | unsigned int size = gDvmJit.jitTableSize; |
| 1381 | unsigned int i; |
| 1382 | |
| 1383 | dvmLockMutex(&gDvmJit.tableLock); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1384 | |
| 1385 | /* Note: If need to preserve any existing counts. Do so here. */ |
| buzbee | 38c4134 | 2011-01-11 15:45:49 -0800 | [diff] [blame] | 1386 | if (gDvmJit.pJitTraceProfCounters) { |
| 1387 | for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) { |
| 1388 | if (gDvmJit.pJitTraceProfCounters->buckets[i]) |
| 1389 | memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i], |
| 1390 | 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES); |
| 1391 | } |
| 1392 | gDvmJit.pJitTraceProfCounters->next = 0; |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1393 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1394 | |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1395 | memset((void *) jitEntry, 0, sizeof(JitEntry) * size); |
| 1396 | for (i=0; i< size; i++) { |
| 1397 | jitEntry[i].u.info.chain = size; /* Initialize chain termination */ |
| 1398 | } |
| 1399 | gDvmJit.jitTableEntriesUsed = 0; |
| 1400 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 1401 | } |
| 1402 | |
| 1403 | /* |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1404 | * Return the address of the next trace profile counter. This address |
| 1405 | * will be embedded in the generated code for the trace, and thus cannot |
| 1406 | * change while the trace exists. |
| 1407 | */ |
| 1408 | JitTraceCounter_t *dvmJitNextTraceCounter() |
| 1409 | { |
| 1410 | int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES; |
| 1411 | int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES; |
| 1412 | JitTraceCounter_t *res; |
| 1413 | /* Lazily allocate blocks of counters */ |
| 1414 | if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) { |
| 1415 | JitTraceCounter_t *p = |
| 1416 | (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p)); |
| 1417 | if (!p) { |
| 1418 | LOGE("Failed to allocate block of trace profile counters"); |
| 1419 | dvmAbort(); |
| 1420 | } |
| 1421 | gDvmJit.pJitTraceProfCounters->buckets[idx] = p; |
| 1422 | } |
| 1423 | res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem]; |
| 1424 | gDvmJit.pJitTraceProfCounters->next++; |
| 1425 | return res; |
| 1426 | } |
| 1427 | |
| 1428 | /* |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1429 | * Float/double conversion requires clamping to min and max of integer form. If |
| 1430 | * target doesn't support this normally, use these. |
| 1431 | */ |
| 1432 | s8 dvmJitd2l(double d) |
| 1433 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1434 | static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL; |
| 1435 | static const double kMinLong = (double)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1436 | if (d >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1437 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1438 | else if (d <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1439 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1440 | else if (d != d) // NaN case |
| 1441 | return 0; |
| 1442 | else |
| 1443 | return (s8)d; |
| 1444 | } |
| 1445 | |
| 1446 | s8 dvmJitf2l(float f) |
| 1447 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1448 | static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL; |
| 1449 | static const float kMinLong = (float)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1450 | if (f >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1451 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1452 | else if (f <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1453 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1454 | else if (f != f) // NaN case |
| 1455 | return 0; |
| 1456 | else |
| 1457 | return (s8)f; |
| 1458 | } |
| 1459 | |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1460 | /* Should only be called by the compiler thread */ |
| 1461 | void dvmJitChangeProfileMode(TraceProfilingModes newState) |
| 1462 | { |
| 1463 | if (gDvmJit.profileMode != newState) { |
| 1464 | gDvmJit.profileMode = newState; |
| 1465 | dvmJitUnchainAll(); |
| 1466 | } |
| 1467 | } |
| 1468 | |
| 1469 | void dvmJitTraceProfilingOn() |
| 1470 | { |
| 1471 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOff) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1472 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1473 | (void*) kTraceProfilingPeriodicOn); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1474 | else if (gDvmJit.profileMode == kTraceProfilingDisabled) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1475 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1476 | (void*) kTraceProfilingContinuous); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1477 | } |
| 1478 | |
| 1479 | void dvmJitTraceProfilingOff() |
| 1480 | { |
| 1481 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOn) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1482 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1483 | (void*) kTraceProfilingPeriodicOff); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1484 | else if (gDvmJit.profileMode == kTraceProfilingContinuous) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1485 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1486 | (void*) kTraceProfilingDisabled); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1487 | } |
| 1488 | |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1489 | /* |
| buzbee | 99e3e6e | 2011-03-29 10:26:07 -0700 | [diff] [blame] | 1490 | * Update JIT-specific info in Thread structure for a single thread |
| 1491 | */ |
| 1492 | void dvmJitUpdateThreadStateSingle(Thread* thread) |
| 1493 | { |
| 1494 | thread->pJitProfTable = gDvmJit.pProfTable; |
| 1495 | thread->jitThreshold = gDvmJit.threshold; |
| 1496 | } |
| 1497 | |
| 1498 | /* |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1499 | * Walk through the thread list and refresh all local copies of |
| 1500 | * JIT global state (which was placed there for fast access). |
| 1501 | */ |
| buzbee | 99e3e6e | 2011-03-29 10:26:07 -0700 | [diff] [blame] | 1502 | void dvmJitUpdateThreadStateAll() |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1503 | { |
| 1504 | Thread* self = dvmThreadSelf(); |
| 1505 | Thread* thread; |
| 1506 | |
| 1507 | dvmLockThreadList(self); |
| 1508 | for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { |
| buzbee | 99e3e6e | 2011-03-29 10:26:07 -0700 | [diff] [blame] | 1509 | dvmJitUpdateThreadStateSingle(thread); |
| buzbee | 9a3147c | 2011-03-02 15:43:48 -0800 | [diff] [blame] | 1510 | } |
| 1511 | dvmUnlockThreadList(); |
| 1512 | |
| 1513 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1514 | #endif /* WITH_JIT */ |