| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #ifdef WITH_JIT |
| 17 | |
| 18 | /* |
| 19 | * Target independent portion of Android's Jit |
| 20 | */ |
| 21 | |
| 22 | #include "Dalvik.h" |
| 23 | #include "Jit.h" |
| 24 | |
| 25 | |
| Dan Bornstein | df4daaf | 2010-12-01 14:23:44 -0800 | [diff] [blame] | 26 | #include "libdex/DexOpcodes.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 27 | #include <unistd.h> |
| 28 | #include <pthread.h> |
| 29 | #include <sys/time.h> |
| 30 | #include <signal.h> |
| 31 | #include "compiler/Compiler.h" |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 32 | #include "compiler/CompilerUtility.h" |
| 33 | #include "compiler/CompilerIR.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 34 | #include <errno.h> |
| 35 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 36 | #if defined(WITH_SELF_VERIFICATION) |
| 37 | /* Allocate space for per-thread ShadowSpace data structures */ |
| 38 | void* dvmSelfVerificationShadowSpaceAlloc(Thread* self) |
| 39 | { |
| 40 | self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace)); |
| 41 | if (self->shadowSpace == NULL) |
| 42 | return NULL; |
| 43 | |
| 44 | self->shadowSpace->registerSpaceSize = REG_SPACE; |
| 45 | self->shadowSpace->registerSpace = |
| 46 | (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int)); |
| 47 | |
| 48 | return self->shadowSpace->registerSpace; |
| 49 | } |
| 50 | |
| 51 | /* Free per-thread ShadowSpace data structures */ |
| 52 | void dvmSelfVerificationShadowSpaceFree(Thread* self) |
| 53 | { |
| 54 | free(self->shadowSpace->registerSpace); |
| 55 | free(self->shadowSpace); |
| 56 | } |
| 57 | |
| 58 | /* |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 59 | * Save out PC, FP, thread state, and registers to shadow space. |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 60 | * Return a pointer to the shadow space for JIT to use. |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 61 | * |
| 62 | * The set of saved state from the Thread structure is: |
| 63 | * pc (Dalvik PC) |
| 64 | * fp (Dalvik FP) |
| 65 | * retval |
| 66 | * method |
| 67 | * methodClassDex |
| 68 | * interpStackEnd |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 69 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 70 | void* dvmSelfVerificationSaveState(const u2* pc, u4* fp, |
| 71 | Thread* self, int targetTrace) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 72 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 73 | ShadowSpace *shadowSpace = self->shadowSpace; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 74 | unsigned preBytes = self->interpSave.method->outsSize*4 + |
| 75 | sizeof(StackSaveArea); |
| 76 | unsigned postBytes = self->interpSave.method->registersSize*4; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 77 | |
| 78 | //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x", |
| 79 | // self->threadId, (int)pc, (int)fp); |
| 80 | |
| 81 | if (shadowSpace->selfVerificationState != kSVSIdle) { |
| 82 | LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d", |
| 83 | self->threadId, shadowSpace->selfVerificationState); |
| 84 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 85 | LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 86 | } |
| 87 | shadowSpace->selfVerificationState = kSVSStart; |
| 88 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 89 | if (self->entryPoint == kInterpEntryResume) { |
| 90 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 91 | #if 0 |
| 92 | /* Tracking the success rate of resume after single-stepping */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 93 | if (self->jitResumeDPC == pc) { |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 94 | LOGD("SV single step resumed at %p", pc); |
| 95 | } |
| 96 | else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 97 | LOGD("real %p DPC %p NPC %p", pc, self->jitResumeDPC, |
| 98 | self->jitResumeNPC); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 99 | } |
| 100 | #endif |
| 101 | } |
| 102 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 103 | // Dynamically grow shadow register space if necessary |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 104 | if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 105 | free(shadowSpace->registerSpace); |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 106 | shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 107 | shadowSpace->registerSpace = |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 108 | (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | // Remember original state |
| 112 | shadowSpace->startPC = pc; |
| 113 | shadowSpace->fp = fp; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 114 | shadowSpace->retval = self->retval; |
| 115 | shadowSpace->interpStackEnd = self->interpStackEnd; |
| 116 | |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Store the original method here in case the trace ends with a |
| 119 | * return/invoke, the last method. |
| 120 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 121 | shadowSpace->method = self->interpSave.method; |
| 122 | shadowSpace->methodClassDex = self->interpSave.methodClassDex; |
| 123 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 124 | shadowSpace->shadowFP = shadowSpace->registerSpace + |
| 125 | shadowSpace->registerSpaceSize - postBytes/4; |
| 126 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 127 | self->interpSave.fp = (u4*)shadowSpace->shadowFP; |
| 128 | self->interpStackEnd = (u1*)shadowSpace->registerSpace; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 129 | |
| 130 | // Create a copy of the stack |
| 131 | memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes, |
| 132 | preBytes+postBytes); |
| 133 | |
| 134 | // Setup the shadowed heap space |
| 135 | shadowSpace->heapSpaceTail = shadowSpace->heapSpace; |
| 136 | |
| 137 | // Reset trace length |
| 138 | shadowSpace->traceLength = 0; |
| 139 | |
| 140 | return shadowSpace; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Save ending PC, FP and compiled code exit point to shadow space. |
| 145 | * Return a pointer to the shadow space for JIT to restore state. |
| 146 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 147 | void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp, |
| 148 | SelfVerificationState exitState, |
| 149 | Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 150 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 151 | ShadowSpace *shadowSpace = self->shadowSpace; |
| 152 | shadowSpace->endPC = pc; |
| 153 | shadowSpace->endShadowFP = fp; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 154 | shadowSpace->jitExitState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 155 | |
| 156 | //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x", |
| 157 | // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp, |
| 158 | // (int)pc); |
| 159 | |
| 160 | if (shadowSpace->selfVerificationState != kSVSStart) { |
| 161 | LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d", |
| 162 | self->threadId, shadowSpace->selfVerificationState); |
| 163 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 164 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 165 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 166 | LOGD("Interp FP: 0x%x", (int)shadowSpace->fp); |
| 167 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 168 | (int)shadowSpace->endShadowFP); |
| 169 | } |
| 170 | |
| 171 | // Special case when punting after a single instruction |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 172 | if (exitState == kSVSPunt && pc == shadowSpace->startPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 173 | shadowSpace->selfVerificationState = kSVSIdle; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 174 | } else if (exitState == kSVSBackwardBranch && pc < shadowSpace->startPC) { |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 175 | /* |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 176 | * Consider a trace with a backward branch: |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 177 | * 1: .. |
| 178 | * 2: .. |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 179 | * 3: .. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 180 | * 4: .. |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 181 | * 5: Goto {1 or 2 or 3 or 4} |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 182 | * |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 183 | * If there instruction 5 goes to 1 and there is no single-step |
| 184 | * instruction in the loop, pc is equal to shadowSpace->startPC and |
| 185 | * we will honor the backward branch condition. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 186 | * |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 187 | * If the single-step instruction is outside the loop, then after |
| 188 | * resuming in the trace the startPC will be less than pc so we will |
| 189 | * also honor the backward branch condition. |
| 190 | * |
| 191 | * If the single-step is inside the loop, we won't hit the same endPC |
| 192 | * twice when the interpreter is re-executing the trace so we want to |
| 193 | * cancel the backward branch condition. In this case it can be |
| 194 | * detected as the endPC (ie pc) will be less than startPC. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 195 | */ |
| 196 | shadowSpace->selfVerificationState = kSVSNormal; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 197 | } else { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 198 | shadowSpace->selfVerificationState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 199 | } |
| 200 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 201 | /* Restore state before returning */ |
| 202 | self->interpSave.pc = shadowSpace->startPC; |
| 203 | self->interpSave.fp = shadowSpace->fp; |
| 204 | self->interpSave.method = shadowSpace->method; |
| 205 | self->interpSave.methodClassDex = shadowSpace->methodClassDex; |
| 206 | self->retval = shadowSpace->retval; |
| 207 | self->interpStackEnd = shadowSpace->interpStackEnd; |
| 208 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 209 | return shadowSpace; |
| 210 | } |
| 211 | |
| 212 | /* Print contents of virtual registers */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 213 | static void selfVerificationPrintRegisters(int* addr, int* addrRef, |
| 214 | int numWords) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 215 | { |
| 216 | int i; |
| 217 | for (i = 0; i < numWords; i++) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 218 | LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : ""); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
| 222 | /* Print values maintained in shadowSpace */ |
| 223 | static void selfVerificationDumpState(const u2* pc, Thread* self) |
| 224 | { |
| 225 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 226 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 227 | int frameBytes = (int) shadowSpace->registerSpace + |
| 228 | shadowSpace->registerSpaceSize*4 - |
| 229 | (int) shadowSpace->shadowFP; |
| 230 | int localRegs = 0; |
| 231 | int frameBytes2 = 0; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 232 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 233 | localRegs = (stackSave->method->registersSize - |
| 234 | stackSave->method->insSize)*4; |
| 235 | frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs; |
| 236 | } |
| 237 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 238 | LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 239 | (int)(pc - stackSave->method->insns)); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 240 | LOGD("Class: %s", shadowSpace->method->clazz->descriptor); |
| 241 | LOGD("Method: %s", shadowSpace->method->name); |
| 242 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 243 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 244 | LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 245 | (int)self->curFrame); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 246 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 247 | (int)shadowSpace->endShadowFP); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 248 | LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 249 | localRegs, frameBytes2); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 250 | LOGD("Trace length: %d State: %d", shadowSpace->traceLength, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 251 | shadowSpace->selfVerificationState); |
| 252 | } |
| 253 | |
| 254 | /* Print decoded instructions in the current trace */ |
| 255 | static void selfVerificationDumpTrace(const u2* pc, Thread* self) |
| 256 | { |
| 257 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 258 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 259 | int i, addr, offset; |
| 260 | DecodedInstruction *decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 261 | |
| 262 | LOGD("********** SHADOW TRACE DUMP **********"); |
| 263 | for (i = 0; i < shadowSpace->traceLength; i++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 264 | addr = shadowSpace->trace[i].addr; |
| 265 | offset = (int)((u2*)addr - stackSave->method->insns); |
| 266 | decInsn = &(shadowSpace->trace[i].decInsn); |
| 267 | /* Not properly decoding instruction, some registers may be garbage */ |
| Andy McFadden | c6b25c7 | 2010-06-22 11:01:20 -0700 | [diff] [blame] | 268 | LOGD("0x%x: (0x%04x) %s", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 269 | addr, offset, dexGetOpcodeName(decInsn->opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 270 | } |
| 271 | } |
| 272 | |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 273 | /* Code is forced into this spin loop when a divergence is detected */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 274 | static void selfVerificationSpinLoop(ShadowSpace *shadowSpace) |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 275 | { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 276 | const u2 *startPC = shadowSpace->startPC; |
| Ben Cheng | 88a0f97 | 2010-02-24 15:00:40 -0800 | [diff] [blame] | 277 | JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 278 | if (desc) { |
| 279 | dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc); |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 280 | /* |
| 281 | * This function effectively terminates the VM right here, so not |
| 282 | * freeing the desc pointer when the enqueuing fails is acceptable. |
| 283 | */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 284 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 285 | gDvmJit.selfVerificationSpin = true; |
| 286 | while(gDvmJit.selfVerificationSpin) sleep(10); |
| 287 | } |
| 288 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 289 | /* Manage self verification while in the debug interpreter */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 290 | static bool selfVerificationDebugInterp(const u2* pc, Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 291 | { |
| 292 | ShadowSpace *shadowSpace = self->shadowSpace; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 293 | SelfVerificationState state = shadowSpace->selfVerificationState; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 294 | |
| 295 | DecodedInstruction decInsn; |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 296 | dexDecodeInstruction(pc, &decInsn); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 297 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 298 | //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s", |
| 299 | // self->threadId, (int)pc, (int)shadowSpace->endPC, state, |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 300 | // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 301 | |
| 302 | if (state == kSVSIdle || state == kSVSStart) { |
| 303 | LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d", |
| 304 | self->threadId, state); |
| 305 | selfVerificationDumpState(pc, self); |
| 306 | selfVerificationDumpTrace(pc, self); |
| 307 | } |
| 308 | |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 309 | /* |
| 310 | * Skip endPC once when trace has a backward branch. If the SV state is |
| 311 | * single step, keep it that way. |
| 312 | */ |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 313 | if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) || |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 314 | (state != kSVSBackwardBranch && state != kSVSSingleStep)) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 315 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 316 | } |
| 317 | |
| 318 | /* Check that the current pc is the end of the trace */ |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 319 | if ((state == kSVSDebugInterp || state == kSVSSingleStep) && |
| 320 | pc == shadowSpace->endPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 321 | |
| 322 | shadowSpace->selfVerificationState = kSVSIdle; |
| 323 | |
| 324 | /* Check register space */ |
| 325 | int frameBytes = (int) shadowSpace->registerSpace + |
| 326 | shadowSpace->registerSpaceSize*4 - |
| 327 | (int) shadowSpace->shadowFP; |
| 328 | if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 329 | LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 330 | selfVerificationDumpState(pc, self); |
| 331 | selfVerificationDumpTrace(pc, self); |
| 332 | LOGD("*** Interp Registers: addr: 0x%x bytes: %d", |
| 333 | (int)shadowSpace->fp, frameBytes); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 334 | selfVerificationPrintRegisters((int*)shadowSpace->fp, |
| 335 | (int*)shadowSpace->shadowFP, |
| 336 | frameBytes/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 337 | LOGD("*** Shadow Registers: addr: 0x%x bytes: %d", |
| 338 | (int)shadowSpace->shadowFP, frameBytes); |
| 339 | selfVerificationPrintRegisters((int*)shadowSpace->shadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 340 | (int*)shadowSpace->fp, |
| 341 | frameBytes/4); |
| 342 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 343 | } |
| 344 | /* Check new frame if it exists (invokes only) */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 345 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 346 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 347 | int localRegs = (stackSave->method->registersSize - |
| 348 | stackSave->method->insSize)*4; |
| 349 | int frameBytes2 = (int) shadowSpace->fp - |
| 350 | (int) self->curFrame - localRegs; |
| 351 | if (memcmp(((char*)self->curFrame)+localRegs, |
| 352 | ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 353 | LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 354 | self->threadId); |
| 355 | selfVerificationDumpState(pc, self); |
| 356 | selfVerificationDumpTrace(pc, self); |
| 357 | LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d", |
| 358 | (int)self->curFrame, localRegs, frameBytes2); |
| 359 | selfVerificationPrintRegisters((int*)self->curFrame, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 360 | (int*)shadowSpace->endShadowFP, |
| 361 | (frameBytes2+localRegs)/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 362 | LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d", |
| 363 | (int)shadowSpace->endShadowFP, localRegs, frameBytes2); |
| 364 | selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 365 | (int*)self->curFrame, |
| 366 | (frameBytes2+localRegs)/4); |
| 367 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 368 | } |
| 369 | } |
| 370 | |
| 371 | /* Check memory space */ |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 372 | bool memDiff = false; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 373 | ShadowHeap* heapSpacePtr; |
| 374 | for (heapSpacePtr = shadowSpace->heapSpace; |
| 375 | heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 376 | int memData = *((unsigned int*) heapSpacePtr->addr); |
| 377 | if (heapSpacePtr->data != memData) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 378 | LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId); |
| 379 | LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x", |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 380 | heapSpacePtr->addr, memData, heapSpacePtr->data); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 381 | selfVerificationDumpState(pc, self); |
| 382 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 383 | memDiff = true; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 384 | } |
| 385 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 386 | if (memDiff) selfVerificationSpinLoop(shadowSpace); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * Switch to JIT single step mode to stay in the debug interpreter for |
| 390 | * one more instruction |
| 391 | */ |
| 392 | if (state == kSVSSingleStep) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 393 | self->jitState = kJitSingleStepEnd; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 394 | } |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 395 | return true; |
| 396 | |
| 397 | /* If end not been reached, make sure max length not exceeded */ |
| 398 | } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) { |
| 399 | LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 400 | LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 401 | (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc); |
| 402 | selfVerificationDumpState(pc, self); |
| 403 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 404 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 405 | |
| 406 | return true; |
| 407 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 408 | /* Log the instruction address and decoded instruction for debug */ |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 409 | shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 410 | shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 411 | shadowSpace->traceLength++; |
| 412 | |
| 413 | return false; |
| 414 | } |
| 415 | #endif |
| 416 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 417 | /* |
| 418 | * If one of our fixed tables or the translation buffer fills up, |
| 419 | * call this routine to avoid wasting cycles on future translation requests. |
| 420 | */ |
| 421 | void dvmJitStopTranslationRequests() |
| 422 | { |
| 423 | /* |
| 424 | * Note 1: This won't necessarily stop all translation requests, and |
| 425 | * operates on a delayed mechanism. Running threads look to the copy |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 426 | * of this value in their private thread structures and won't see |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 427 | * this change until it is refreshed (which happens on interpreter |
| 428 | * entry). |
| 429 | * Note 2: This is a one-shot memory leak on this table. Because this is a |
| 430 | * permanent off switch for Jit profiling, it is a one-time leak of 1K |
| 431 | * bytes, and no further attempt will be made to re-allocate it. Can't |
| 432 | * free it because some thread may be holding a reference. |
| 433 | */ |
| Bill Buzbee | b1d8044 | 2009-12-17 14:55:21 -0800 | [diff] [blame] | 434 | gDvmJit.pProfTable = NULL; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 435 | } |
| 436 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 437 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 438 | /* Convenience function to increment counter from assembly code */ |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 439 | void dvmBumpNoChain(int from) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 440 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 441 | gDvmJit.noChainExit[from]++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | /* Convenience function to increment counter from assembly code */ |
| 445 | void dvmBumpNormal() |
| 446 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 447 | gDvmJit.normalExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* Convenience function to increment counter from assembly code */ |
| 451 | void dvmBumpPunt(int from) |
| 452 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 453 | gDvmJit.puntExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 454 | } |
| 455 | #endif |
| 456 | |
| 457 | /* Dumps debugging & tuning stats to the log */ |
| 458 | void dvmJitStats() |
| 459 | { |
| 460 | int i; |
| 461 | int hit; |
| 462 | int not_hit; |
| 463 | int chains; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 464 | int stubs; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 465 | if (gDvmJit.pJitEntryTable) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 466 | for (i=0, stubs=chains=hit=not_hit=0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 467 | i < (int) gDvmJit.jitTableSize; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 468 | i++) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 469 | if (gDvmJit.pJitEntryTable[i].dPC != 0) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 470 | hit++; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 471 | if (gDvmJit.pJitEntryTable[i].codeAddress == |
| Bill Buzbee | bd04724 | 2010-05-13 13:02:53 -0700 | [diff] [blame] | 472 | dvmCompilerGetInterpretTemplate()) |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 473 | stubs++; |
| 474 | } else |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 475 | not_hit++; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 476 | if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 477 | chains++; |
| 478 | } |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 479 | LOGD("JIT: table size is %d, entries used is %d", |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 480 | gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed); |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 481 | LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s", |
| 482 | hit, not_hit + hit, chains, gDvmJit.threshold, |
| 483 | gDvmJit.blockingMode ? "Blocking" : "Non-blocking"); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 484 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 485 | #if defined(WITH_JIT_TUNING) |
| 486 | LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches); |
| 487 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 488 | LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt", |
| 489 | gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound, |
| 490 | gDvmJit.normalExit, gDvmJit.puntExit); |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 491 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 492 | LOGD("JIT: ICHits: %d", gDvmICHitCount); |
| 493 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 494 | LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, " |
| 495 | "%d switch overflow", |
| 496 | gDvmJit.noChainExit[kInlineCacheMiss], |
| 497 | gDvmJit.noChainExit[kCallsiteInterpreted], |
| 498 | gDvmJit.noChainExit[kSwitchOverflow]); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 499 | |
| Ben Cheng | b88ec3c | 2010-05-17 12:50:33 -0700 | [diff] [blame] | 500 | LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, " |
| 501 | "%d dropped", |
| 502 | gDvmJit.icPatchInit, gDvmJit.icPatchRejected, |
| 503 | gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued, |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 504 | gDvmJit.icPatchDropped); |
| 505 | |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 506 | LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return", |
| 507 | gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic, |
| 508 | gDvmJit.invokeNative, gDvmJit.returnOp); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 509 | LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter", |
| 510 | gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined, |
| 511 | gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 512 | LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000); |
| 513 | LOGD("JIT: Avg unit compilation time: %llu us", |
| Andy McFadden | b7a797d | 2011-02-24 16:55:40 -0800 | [diff] [blame^] | 514 | gDvmJit.numCompilations == 0 ? 0 : |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 515 | gDvmJit.jitTime / gDvmJit.numCompilations); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 516 | #endif |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 517 | |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 518 | LOGD("JIT: %d Translation chains, %d interp stubs", |
| 519 | gDvmJit.translationChains, stubs); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 520 | if (gDvmJit.profileMode == kTraceProfilingContinuous) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 521 | dvmCompilerSortAndPrintTraceProfiles(); |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 522 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 523 | } |
| 524 | } |
| 525 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 526 | |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 527 | /* End current trace after last successful instruction */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 528 | void dvmJitEndTraceSelect(Thread* self) |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 529 | { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 530 | if (self->jitState == kJitTSelect) |
| 531 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 532 | } |
| 533 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 534 | /* |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 535 | * Find an entry in the JitTable, creating if necessary. |
| 536 | * Returns null if table is full. |
| 537 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 538 | static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked, |
| 539 | bool isMethodEntry) |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 540 | { |
| 541 | u4 chainEndMarker = gDvmJit.jitTableSize; |
| 542 | u4 idx = dvmJitHash(dPC); |
| 543 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 544 | /* |
| 545 | * Walk the bucket chain to find an exact match for our PC and trace/method |
| 546 | * type |
| 547 | */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 548 | while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) && |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 549 | ((gDvmJit.pJitEntryTable[idx].dPC != dPC) || |
| 550 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != |
| 551 | isMethodEntry))) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 552 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 553 | } |
| 554 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 555 | if (gDvmJit.pJitEntryTable[idx].dPC != dPC || |
| 556 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 557 | /* |
| 558 | * No match. Aquire jitTableLock and find the last |
| 559 | * slot in the chain. Possibly continue the chain walk in case |
| 560 | * some other thread allocated the slot we were looking |
| 561 | * at previuosly (perhaps even the dPC we're trying to enter). |
| 562 | */ |
| 563 | if (!callerLocked) |
| 564 | dvmLockMutex(&gDvmJit.tableLock); |
| 565 | /* |
| 566 | * At this point, if .dPC is NULL, then the slot we're |
| 567 | * looking at is the target slot from the primary hash |
| 568 | * (the simple, and common case). Otherwise we're going |
| 569 | * to have to find a free slot and chain it. |
| 570 | */ |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 571 | ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 572 | if (gDvmJit.pJitEntryTable[idx].dPC != NULL) { |
| 573 | u4 prev; |
| 574 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 575 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 576 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 577 | isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 578 | /* Another thread got there first for this dPC */ |
| 579 | if (!callerLocked) |
| 580 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 581 | return &gDvmJit.pJitEntryTable[idx]; |
| 582 | } |
| 583 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 584 | } |
| 585 | /* Here, idx should be pointing to the last cell of an |
| 586 | * active chain whose last member contains a valid dPC */ |
| 587 | assert(gDvmJit.pJitEntryTable[idx].dPC != NULL); |
| 588 | /* Linear walk to find a free cell and add it to the end */ |
| 589 | prev = idx; |
| 590 | while (true) { |
| 591 | idx++; |
| 592 | if (idx == chainEndMarker) |
| 593 | idx = 0; /* Wraparound */ |
| 594 | if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) || |
| 595 | (idx == prev)) |
| 596 | break; |
| 597 | } |
| 598 | if (idx != prev) { |
| 599 | JitEntryInfoUnion oldValue; |
| 600 | JitEntryInfoUnion newValue; |
| 601 | /* |
| 602 | * Although we hold the lock so that noone else will |
| 603 | * be trying to update a chain field, the other fields |
| 604 | * packed into the word may be in use by other threads. |
| 605 | */ |
| 606 | do { |
| 607 | oldValue = gDvmJit.pJitEntryTable[prev].u; |
| 608 | newValue = oldValue; |
| 609 | newValue.info.chain = idx; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 610 | } while (android_atomic_release_cas(oldValue.infoWord, |
| 611 | newValue.infoWord, |
| 612 | &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 613 | } |
| 614 | } |
| 615 | if (gDvmJit.pJitEntryTable[idx].dPC == NULL) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 616 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 617 | /* |
| 618 | * Initialize codeAddress and allocate the slot. Must |
| 619 | * happen in this order (since dPC is set, the entry is live. |
| 620 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 621 | android_atomic_release_store((int32_t)dPC, |
| 622 | (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 623 | gDvmJit.pJitEntryTable[idx].dPC = dPC; |
| 624 | gDvmJit.jitTableEntriesUsed++; |
| 625 | } else { |
| 626 | /* Table is full */ |
| 627 | idx = chainEndMarker; |
| 628 | } |
| 629 | if (!callerLocked) |
| 630 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 631 | } |
| 632 | return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx]; |
| 633 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 634 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 635 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 636 | * Append the class ptr of "this" and the current method ptr to the current |
| 637 | * trace. That is, the trace runs will contain the following components: |
| 638 | * + trace run that ends with an invoke (existing entry) |
| 639 | * + thisClass (new) |
| 640 | * + calleeMethod (new) |
| 641 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 642 | static void insertClassMethodInfo(Thread* self, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 643 | const ClassObject* thisClass, |
| 644 | const Method* calleeMethod, |
| 645 | const DecodedInstruction* insn) |
| 646 | { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 647 | int currTraceRun = ++self->currTraceRun; |
| 648 | self->trace[currTraceRun].meta = (void *) thisClass; |
| 649 | currTraceRun = ++self->currTraceRun; |
| 650 | self->trace[currTraceRun].meta = (void *) calleeMethod; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 651 | } |
| 652 | |
| 653 | /* |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 654 | * Check if the next instruction following the invoke is a move-result and if |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 655 | * so add it to the trace. That is, this will add the trace run that includes |
| 656 | * the move-result to the trace list. |
| 657 | * |
| 658 | * + trace run that ends with an invoke (existing entry) |
| 659 | * + thisClass (existing entry) |
| 660 | * + calleeMethod (existing entry) |
| 661 | * + move result (new) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 662 | * |
| 663 | * lastPC, len, offset are all from the preceding invoke instruction |
| 664 | */ |
| 665 | static void insertMoveResult(const u2 *lastPC, int len, int offset, |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 666 | Thread *self) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 667 | { |
| 668 | DecodedInstruction nextDecInsn; |
| 669 | const u2 *moveResultPC = lastPC + len; |
| 670 | |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 671 | dexDecodeInstruction(moveResultPC, &nextDecInsn); |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 672 | if ((nextDecInsn.opcode != OP_MOVE_RESULT) && |
| 673 | (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) && |
| 674 | (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT)) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 675 | return; |
| 676 | |
| 677 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 678 | int currTraceRun = ++self->currTraceRun; |
| 679 | self->currRunHead = moveResultPC; |
| 680 | self->trace[currTraceRun].frag.startOffset = offset + len; |
| 681 | self->trace[currTraceRun].frag.numInsts = 1; |
| 682 | self->trace[currTraceRun].frag.runEnd = false; |
| 683 | self->trace[currTraceRun].frag.hint = kJitHintNone; |
| 684 | self->trace[currTraceRun].frag.isCode = true; |
| 685 | self->totalTraceLen++; |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 686 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 687 | self->currRunLen = dexGetWidthFromInstruction(moveResultPC); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 691 | * Adds to the current trace request one instruction at a time, just |
| 692 | * before that instruction is interpreted. This is the primary trace |
| 693 | * selection function. NOTE: return instruction are handled a little |
| 694 | * differently. In general, instructions are "proposed" to be added |
| 695 | * to the current trace prior to interpretation. If the interpreter |
| 696 | * then successfully completes the instruction, is will be considered |
| 697 | * part of the request. This allows us to examine machine state prior |
| 698 | * to interpretation, and also abort the trace request if the instruction |
| 699 | * throws or does something unexpected. However, return instructions |
| 700 | * will cause an immediate end to the translation request - which will |
| 701 | * be passed to the compiler before the return completes. This is done |
| 702 | * in response to special handling of returns by the interpreter (and |
| 703 | * because returns cannot throw in a way that causes problems for the |
| 704 | * translated code. |
| 705 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 706 | int dvmCheckJit(const u2* pc, Thread* self, const ClassObject* thisClass, |
| 707 | const Method* curMethod) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 708 | { |
| Carl Shapiro | e3c01da | 2010-05-20 22:54:18 -0700 | [diff] [blame] | 709 | int flags, len; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 710 | int switchInterp = false; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 711 | bool debugOrProfile = dvmDebuggerOrProfilerActive(); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 712 | /* Stay in the dbg interpreter for the next instruction */ |
| 713 | bool stayOneMoreInst = false; |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 714 | |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 715 | /* |
| 716 | * Bug 2710533 - dalvik crash when disconnecting debugger |
| 717 | * |
| 718 | * Reset the entry point to the default value. If needed it will be set to a |
| 719 | * specific value in the corresponding case statement (eg kJitSingleStepEnd) |
| 720 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 721 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 722 | |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 723 | /* Prepare to handle last PC and stage the current PC */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 724 | const u2 *lastPC = self->lastPC; |
| 725 | self->lastPC = pc; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 726 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 727 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 728 | int offset; |
| 729 | DecodedInstruction decInsn; |
| 730 | case kJitTSelect: |
| Ben Cheng | dc84bb2 | 2009-10-02 12:58:52 -0700 | [diff] [blame] | 731 | /* First instruction - just remember the PC and exit */ |
| 732 | if (lastPC == NULL) break; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 733 | /* Grow the trace around the last PC if jitState is kJitTSelect */ |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 734 | dexDecodeInstruction(lastPC, &decInsn); |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 735 | |
| 736 | /* |
| 737 | * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due |
| 738 | * to the amount of space it takes to generate the chaining |
| 739 | * cells. |
| 740 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 741 | if (self->totalTraceLen != 0 && |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 742 | (decInsn.opcode == OP_PACKED_SWITCH || |
| 743 | decInsn.opcode == OP_SPARSE_SWITCH)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 744 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 745 | break; |
| 746 | } |
| 747 | |
| Bill Buzbee | f9f3328 | 2009-11-22 12:45:30 -0800 | [diff] [blame] | 748 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 749 | #if defined(SHOW_TRACE) |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 750 | LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opcode)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 751 | #endif |
| Dan Bornstein | e485276 | 2010-12-02 12:45:00 -0800 | [diff] [blame] | 752 | flags = dexGetFlagsFromOpcode(decInsn.opcode); |
| 753 | len = dexGetWidthFromInstruction(lastPC); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 754 | offset = lastPC - self->interpSave.method->insns; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 755 | assert((unsigned) offset < |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 756 | dvmGetMethodInsnsSize(self->interpSave.method)); |
| 757 | if (lastPC != self->currRunHead + self->currRunLen) { |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 758 | int currTraceRun; |
| 759 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 760 | currTraceRun = ++self->currTraceRun; |
| 761 | self->currRunLen = 0; |
| 762 | self->currRunHead = (u2*)lastPC; |
| 763 | self->trace[currTraceRun].frag.startOffset = offset; |
| 764 | self->trace[currTraceRun].frag.numInsts = 0; |
| 765 | self->trace[currTraceRun].frag.runEnd = false; |
| 766 | self->trace[currTraceRun].frag.hint = kJitHintNone; |
| 767 | self->trace[currTraceRun].frag.isCode = true; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 768 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 769 | self->trace[self->currTraceRun].frag.numInsts++; |
| 770 | self->totalTraceLen++; |
| 771 | self->currRunLen += len; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 772 | |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 773 | /* |
| 774 | * If the last instruction is an invoke, we will try to sneak in |
| 775 | * the move-result* (if existent) into a separate trace run. |
| 776 | */ |
| 777 | int needReservedRun = (flags & kInstrInvoke) ? 1 : 0; |
| 778 | |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 779 | /* Will probably never hit this with the current trace buildier */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 780 | if (self->currTraceRun == |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 781 | (MAX_JIT_RUN_LEN - 1 - needReservedRun)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 782 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 783 | } |
| 784 | |
| Dan Bornstein | c2b486f | 2010-11-12 16:07:16 -0800 | [diff] [blame] | 785 | if (!dexIsGoto(flags) && |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 786 | ((flags & (kInstrCanBranch | |
| 787 | kInstrCanSwitch | |
| 788 | kInstrCanReturn | |
| 789 | kInstrInvoke)) != 0)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 790 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 791 | #if defined(SHOW_TRACE) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 792 | LOGD("TraceGen: ending on %s, basic block end", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 793 | dexGetOpcodeName(decInsn.opcode)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 794 | #endif |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 795 | |
| 796 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 797 | * If the current invoke is a {virtual,interface}, get the |
| 798 | * current class/method pair into the trace as well. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 799 | * If the next instruction is a variant of move-result, insert |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 800 | * it to the trace too. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 801 | */ |
| 802 | if (flags & kInstrInvoke) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 803 | insertClassMethodInfo(self, thisClass, curMethod, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 804 | &decInsn); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 805 | insertMoveResult(lastPC, len, offset, self); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 806 | } |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 807 | } |
| Bill Buzbee | 2ce8a6c | 2009-12-03 15:09:32 -0800 | [diff] [blame] | 808 | /* Break on throw or self-loop */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 809 | if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 810 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 811 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 812 | if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) { |
| 813 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 814 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 815 | /* Abandon the trace request if debugger/profiler is attached */ |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 816 | if (debugOrProfile) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 817 | self->jitState = kJitDone; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 818 | break; |
| 819 | } |
| 820 | if ((flags & kInstrCanReturn) != kInstrCanReturn) { |
| 821 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 822 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 823 | else { |
| 824 | /* |
| 825 | * Last instruction is a return - stay in the dbg interpreter |
| 826 | * for one more instruction if it is a non-void return, since |
| 827 | * we don't want to start a trace with move-result as the first |
| 828 | * instruction (which is already included in the trace |
| 829 | * containing the invoke. |
| 830 | */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 831 | if (decInsn.opcode != OP_RETURN_VOID) { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 832 | stayOneMoreInst = true; |
| 833 | } |
| 834 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 835 | /* NOTE: intentional fallthrough for returns */ |
| 836 | case kJitTSelectEnd: |
| 837 | { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 838 | /* Empty trace - set to bail to interpreter */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 839 | if (self->totalTraceLen == 0) { |
| 840 | dvmJitSetCodeAddr(self->currTraceHead, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 841 | dvmCompilerGetInterpretTemplate(), |
| 842 | dvmCompilerGetInterpretTemplateSet(), |
| 843 | false /* Not method entry */, 0); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 844 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 845 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 846 | break; |
| 847 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 848 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 849 | int lastTraceDesc = self->currTraceRun; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 850 | |
| 851 | /* Extend a new empty desc if the last slot is meta info */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 852 | if (!self->trace[lastTraceDesc].frag.isCode) { |
| 853 | lastTraceDesc = ++self->currTraceRun; |
| 854 | self->trace[lastTraceDesc].frag.startOffset = 0; |
| 855 | self->trace[lastTraceDesc].frag.numInsts = 0; |
| 856 | self->trace[lastTraceDesc].frag.hint = kJitHintNone; |
| 857 | self->trace[lastTraceDesc].frag.isCode = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 858 | } |
| 859 | |
| 860 | /* Mark the end of the trace runs */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 861 | self->trace[lastTraceDesc].frag.runEnd = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 862 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 863 | JitTraceDescription* desc = |
| 864 | (JitTraceDescription*)malloc(sizeof(JitTraceDescription) + |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 865 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 866 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 867 | if (desc == NULL) { |
| 868 | LOGE("Out of memory in trace selection"); |
| 869 | dvmJitStopTranslationRequests(); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 870 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 871 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 872 | break; |
| 873 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 874 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 875 | desc->method = self->interpSave.method; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 876 | memcpy((char*)&(desc->trace[0]), |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 877 | (char*)&(self->trace[0]), |
| 878 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 879 | #if defined(SHOW_TRACE) |
| 880 | LOGD("TraceGen: trace done, adding to queue"); |
| 881 | #endif |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 882 | if (dvmCompilerWorkEnqueue( |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 883 | self->currTraceHead,kWorkOrderTrace,desc)) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 884 | /* Work order successfully enqueued */ |
| 885 | if (gDvmJit.blockingMode) { |
| 886 | dvmCompilerDrainQueue(); |
| 887 | } |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 888 | } else { |
| 889 | /* |
| 890 | * Make sure the descriptor for the abandoned work order is |
| 891 | * freed. |
| 892 | */ |
| 893 | free(desc); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 894 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 895 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 896 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 897 | } |
| 898 | break; |
| 899 | case kJitSingleStep: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 900 | self->jitState = kJitSingleStepEnd; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 901 | break; |
| 902 | case kJitSingleStepEnd: |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 903 | /* |
| 904 | * Clear the inJitCodeCache flag and abandon the resume attempt if |
| 905 | * we cannot switch back to the translation due to corner-case |
| 906 | * conditions. If the flag is not cleared and the code cache is full |
| 907 | * we will be stuck in the debug interpreter as the code cache |
| 908 | * cannot be reset. |
| 909 | */ |
| 910 | if (dvmJitStayInPortableInterpreter()) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 911 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 912 | self->inJitCodeCache = 0; |
| 913 | } else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 914 | self->entryPoint = kInterpEntryResume; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 915 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 916 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 917 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 918 | break; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 919 | case kJitDone: |
| 920 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 921 | break; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 922 | #if defined(WITH_SELF_VERIFICATION) |
| 923 | case kJitSelfVerification: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 924 | if (selfVerificationDebugInterp(pc, self)) { |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 925 | /* |
| 926 | * If the next state is not single-step end, we can switch |
| 927 | * interpreter now. |
| 928 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 929 | if (self->jitState != kJitSingleStepEnd) { |
| 930 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 931 | switchInterp = true; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 932 | } |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 933 | } |
| 934 | break; |
| 935 | #endif |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 936 | case kJitNot: |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 937 | switchInterp = !debugOrProfile; |
| Ben Cheng | ed79ff0 | 2009-10-13 13:26:40 -0700 | [diff] [blame] | 938 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 939 | default: |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 940 | LOGE("Unexpected JIT state: %d entry point: %d", |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 941 | self->jitState, self->entryPoint); |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 942 | dvmAbort(); |
| Ben Cheng | 9c147b8 | 2009-10-07 16:41:46 -0700 | [diff] [blame] | 943 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 944 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 945 | /* |
| 946 | * Final check to see if we can really switch the interpreter. Make sure |
| 947 | * the jitState is kJitDone or kJitNot when switchInterp is set to true. |
| 948 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 949 | assert(switchInterp == false || self->jitState == kJitDone || |
| 950 | self->jitState == kJitNot); |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 951 | return switchInterp && !debugOrProfile && !stayOneMoreInst && |
| 952 | !dvmJitStayInPortableInterpreter(); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 953 | } |
| 954 | |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 955 | JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 956 | { |
| 957 | int idx = dvmJitHash(pc); |
| 958 | |
| 959 | /* Expect a high hit rate on 1st shot */ |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 960 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 961 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 962 | return &gDvmJit.pJitEntryTable[idx]; |
| 963 | else { |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 964 | int chainEndMarker = gDvmJit.jitTableSize; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 965 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 966 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 967 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 968 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 969 | isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 970 | return &gDvmJit.pJitEntryTable[idx]; |
| 971 | } |
| 972 | } |
| 973 | return NULL; |
| 974 | } |
| 975 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 976 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 977 | * Walk through the JIT profile table and find the corresponding JIT code, in |
| 978 | * the specified format (ie trace vs method). This routine needs to be fast. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 979 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 980 | void* getCodeAddrCommon(const u2* dPC, bool methodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 981 | { |
| 982 | int idx = dvmJitHash(dPC); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 983 | const u2* pc = gDvmJit.pJitEntryTable[idx].dPC; |
| 984 | if (pc != NULL) { |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 985 | bool hideTranslation = dvmJitHideTranslation(); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 986 | if (pc == dPC && |
| 987 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 988 | int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? |
| 989 | 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 990 | intptr_t codeAddress = |
| 991 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 992 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 993 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 994 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 995 | return hideTranslation || !codeAddress ? NULL : |
| 996 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 997 | } else { |
| 998 | int chainEndMarker = gDvmJit.jitTableSize; |
| 999 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 1000 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1001 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 1002 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 1003 | methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1004 | int offset = (gDvmJit.profileMode >= |
| 1005 | kTraceProfilingContinuous) ? 0 : |
| 1006 | gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 1007 | intptr_t codeAddress = |
| 1008 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1009 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1010 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1011 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1012 | return hideTranslation || !codeAddress ? NULL : |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1013 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1014 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1015 | } |
| 1016 | } |
| 1017 | } |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1018 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1019 | gDvmJit.addrLookupsNotFound++; |
| 1020 | #endif |
| 1021 | return NULL; |
| 1022 | } |
| 1023 | |
| 1024 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1025 | * If a translated code address, in trace format, exists for the davik byte code |
| 1026 | * pointer return it. |
| 1027 | */ |
| 1028 | void* dvmJitGetTraceAddr(const u2* dPC) |
| 1029 | { |
| 1030 | return getCodeAddrCommon(dPC, false /* method entry */); |
| 1031 | } |
| 1032 | |
| 1033 | /* |
| 1034 | * If a translated code address, in whole-method format, exists for the davik |
| 1035 | * byte code pointer return it. |
| 1036 | */ |
| 1037 | void* dvmJitGetMethodAddr(const u2* dPC) |
| 1038 | { |
| 1039 | return getCodeAddrCommon(dPC, true /* method entry */); |
| 1040 | } |
| 1041 | |
| 1042 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1043 | * Register the translated code pointer into the JitTable. |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 1044 | * NOTE: Once a codeAddress field transitions from initial state to |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1045 | * JIT'd code, it must not be altered without first halting all |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1046 | * threads. This routine should only be called by the compiler |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1047 | * thread. We defer the setting of the profile prefix size until |
| 1048 | * after the new code address is set to ensure that the prefix offset |
| 1049 | * is never applied to the initial interpret-only translation. All |
| 1050 | * translations with non-zero profile prefixes will still be correct |
| 1051 | * if entered as if the profile offset is 0, but the interpret-only |
| 1052 | * template cannot handle a non-zero prefix. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1053 | */ |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1054 | void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1055 | bool isMethodEntry, int profilePrefixSize) |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1056 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1057 | JitEntryInfoUnion oldValue; |
| 1058 | JitEntryInfoUnion newValue; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1059 | JitEntry *jitEntry = dvmJitFindEntry(dPC, isMethodEntry); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1060 | assert(jitEntry); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1061 | /* Note: order of update is important */ |
| 1062 | do { |
| 1063 | oldValue = jitEntry->u; |
| 1064 | newValue = oldValue; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1065 | newValue.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1066 | newValue.info.instructionSet = set; |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1067 | newValue.info.profileOffset = profilePrefixSize; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 1068 | } while (android_atomic_release_cas( |
| 1069 | oldValue.infoWord, newValue.infoWord, |
| 1070 | &jitEntry->u.infoWord) != 0); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1071 | jitEntry->codeAddress = nPC; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
| 1074 | /* |
| 1075 | * Determine if valid trace-bulding request is active. Return true |
| 1076 | * if we need to abort and switch back to the fast interpreter, false |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1077 | * otherwise. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1078 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1079 | bool dvmJitCheckTraceRequest(Thread* self) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1080 | { |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1081 | bool switchInterp = false; /* Assume success */ |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1082 | int i; |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1083 | /* |
| 1084 | * A note on trace "hotness" filtering: |
| 1085 | * |
| 1086 | * Our first level trigger is intentionally loose - we need it to |
| 1087 | * fire easily not just to identify potential traces to compile, but |
| 1088 | * also to allow re-entry into the code cache. |
| 1089 | * |
| 1090 | * The 2nd level filter (done here) exists to be selective about |
| 1091 | * what we actually compile. It works by requiring the same |
| 1092 | * trace head "key" (defined as filterKey below) to appear twice in |
| 1093 | * a relatively short period of time. The difficulty is defining the |
| 1094 | * shape of the filterKey. Unfortunately, there is no "one size fits |
| 1095 | * all" approach. |
| 1096 | * |
| 1097 | * For spiky execution profiles dominated by a smallish |
| 1098 | * number of very hot loops, we would want the second-level filter |
| 1099 | * to be very selective. A good selective filter is requiring an |
| 1100 | * exact match of the Dalvik PC. In other words, defining filterKey as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1101 | * intptr_t filterKey = (intptr_t)self->interpSave.pc |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1102 | * |
| 1103 | * However, for flat execution profiles we do best when aggressively |
| 1104 | * translating. A heuristically decent proxy for this is to use |
| 1105 | * the value of the method pointer containing the trace as the filterKey. |
| 1106 | * Intuitively, this is saying that once any trace in a method appears hot, |
| 1107 | * immediately translate any other trace from that same method that |
| 1108 | * survives the first-level filter. Here, filterKey would be defined as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1109 | * intptr_t filterKey = (intptr_t)self->interpSave.method |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1110 | * |
| 1111 | * The problem is that we can't easily detect whether we're dealing |
| 1112 | * with a spiky or flat profile. If we go with the "pc" match approach, |
| 1113 | * flat profiles perform poorly. If we go with the loose "method" match, |
| 1114 | * we end up generating a lot of useless translations. Probably the |
| 1115 | * best approach in the future will be to retain profile information |
| 1116 | * across runs of each application in order to determine it's profile, |
| 1117 | * and then choose once we have enough history. |
| 1118 | * |
| 1119 | * However, for now we've decided to chose a compromise filter scheme that |
| 1120 | * includes elements of both. The high order bits of the filter key |
| 1121 | * are drawn from the enclosing method, and are combined with a slice |
| 1122 | * of the low-order bits of the Dalvik pc of the trace head. The |
| 1123 | * looseness of the filter can be adjusted by changing with width of |
| 1124 | * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider |
| 1125 | * the slice, the tighter the filter. |
| 1126 | * |
| 1127 | * Note: the fixed shifts in the function below reflect assumed word |
| 1128 | * alignment for method pointers, and half-word alignment of the Dalvik pc. |
| 1129 | * for method pointers and half-word alignment for dalvik pc. |
| 1130 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1131 | u4 methodKey = (u4)self->interpSave.method << |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1132 | (JIT_TRACE_THRESH_FILTER_PC_BITS - 2); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1133 | u4 pcKey = ((u4)self->interpSave.pc >> 1) & |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1134 | ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1); |
| 1135 | intptr_t filterKey = (intptr_t)(methodKey | pcKey); |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1136 | bool debugOrProfile = dvmDebuggerOrProfilerActive(); |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1137 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1138 | /* Check if the JIT request can be handled now */ |
| 1139 | if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) { |
| 1140 | /* Bypass the filter for hot trace requests or during stress mode */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1141 | if (self->jitState == kJitTSelectRequest && |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1142 | gDvmJit.threshold > 6) { |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1143 | /* Two-level filtering scheme */ |
| 1144 | for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1145 | if (filterKey == self->threshFilter[i]) { |
| 1146 | self->threshFilter[i] = 0; // Reset filter entry |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1147 | break; |
| 1148 | } |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1149 | } |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1150 | if (i == JIT_TRACE_THRESH_FILTER_SIZE) { |
| 1151 | /* |
| 1152 | * Use random replacement policy - otherwise we could miss a |
| 1153 | * large loop that contains more traces than the size of our |
| 1154 | * filter array. |
| 1155 | */ |
| 1156 | i = rand() % JIT_TRACE_THRESH_FILTER_SIZE; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1157 | self->threshFilter[i] = filterKey; |
| 1158 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1159 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1160 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1161 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1162 | /* If the compiler is backlogged, cancel any JIT actions */ |
| 1163 | if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1164 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1165 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1166 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1167 | /* |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1168 | * Check for additional reasons that might force the trace select |
| 1169 | * request to be dropped |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1170 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1171 | if (self->jitState == kJitTSelectRequest || |
| 1172 | self->jitState == kJitTSelectRequestHot) { |
| 1173 | if (dvmJitFindEntry(self->interpSave.pc, false)) { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1174 | /* In progress - nothing do do */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1175 | self->jitState = kJitDone; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1176 | } else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1177 | JitEntry *slot = lookupAndAdd(self->interpSave.pc, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1178 | false /* lock */, |
| 1179 | false /* method entry */); |
| 1180 | if (slot == NULL) { |
| 1181 | /* |
| 1182 | * Table is full. This should have been |
| 1183 | * detected by the compiler thread and the table |
| 1184 | * resized before we run into it here. Assume bad things |
| 1185 | * are afoot and disable profiling. |
| 1186 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1187 | self->jitState = kJitDone; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1188 | LOGD("JIT: JitTable full, disabling profiling"); |
| 1189 | dvmJitStopTranslationRequests(); |
| 1190 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1191 | } |
| 1192 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1193 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1194 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1195 | case kJitTSelectRequest: |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1196 | case kJitTSelectRequestHot: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1197 | self->jitState = kJitTSelect; |
| 1198 | self->currTraceHead = self->interpSave.pc; |
| 1199 | self->currTraceRun = 0; |
| 1200 | self->totalTraceLen = 0; |
| 1201 | self->currRunHead = self->interpSave.pc; |
| 1202 | self->currRunLen = 0; |
| 1203 | self->trace[0].frag.startOffset = |
| 1204 | self->interpSave.pc - self->interpSave.method->insns; |
| 1205 | self->trace[0].frag.numInsts = 0; |
| 1206 | self->trace[0].frag.runEnd = false; |
| 1207 | self->trace[0].frag.hint = kJitHintNone; |
| 1208 | self->trace[0].frag.isCode = true; |
| 1209 | self->lastPC = 0; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1210 | break; |
| 1211 | /* |
| 1212 | * For JIT's perspective there is no need to stay in the debug |
| 1213 | * interpreter unless debugger/profiler is attached. |
| 1214 | */ |
| 1215 | case kJitDone: |
| 1216 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1217 | break; |
| 1218 | default: |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1219 | LOGE("Unexpected JIT state: %d entry point: %d", |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1220 | self->jitState, self->entryPoint); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1221 | dvmAbort(); |
| 1222 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1223 | } else { |
| 1224 | /* |
| 1225 | * Cannot build trace this time - ready to leave the dbg interpreter |
| 1226 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1227 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1228 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1229 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1230 | |
| 1231 | /* |
| 1232 | * Final check to see if we can really switch the interpreter. Make sure |
| 1233 | * the jitState is kJitDone when switchInterp is set to true. |
| 1234 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1235 | assert(switchInterp == false || self->jitState == kJitDone); |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 1236 | return switchInterp && !debugOrProfile && |
| 1237 | !dvmJitStayInPortableInterpreter(); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1238 | } |
| 1239 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1240 | /* |
| 1241 | * Resizes the JitTable. Must be a power of 2, and returns true on failure. |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1242 | * Stops all threads, and thus is a heavyweight operation. May only be called |
| 1243 | * by the compiler thread. |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1244 | */ |
| 1245 | bool dvmJitResizeJitTable( unsigned int size ) |
| 1246 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1247 | JitEntry *pNewTable; |
| 1248 | JitEntry *pOldTable; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1249 | JitEntry tempEntry; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1250 | u4 newMask; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1251 | unsigned int oldSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1252 | unsigned int i; |
| 1253 | |
| Ben Cheng | 3f02aa4 | 2009-08-14 13:52:09 -0700 | [diff] [blame] | 1254 | assert(gDvmJit.pJitEntryTable != NULL); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1255 | assert(size && !(size & (size - 1))); /* Is power of 2? */ |
| 1256 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1257 | LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1258 | |
| 1259 | newMask = size - 1; |
| 1260 | |
| 1261 | if (size <= gDvmJit.jitTableSize) { |
| 1262 | return true; |
| 1263 | } |
| 1264 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1265 | /* Make sure requested size is compatible with chain field width */ |
| 1266 | tempEntry.u.info.chain = size; |
| 1267 | if (tempEntry.u.info.chain != size) { |
| 1268 | LOGD("Jit: JitTable request of %d too big", size); |
| 1269 | return true; |
| 1270 | } |
| 1271 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1272 | pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable)); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1273 | if (pNewTable == NULL) { |
| 1274 | return true; |
| 1275 | } |
| 1276 | for (i=0; i< size; i++) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1277 | pNewTable[i].u.info.chain = size; /* Initialize chain termination */ |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1278 | } |
| 1279 | |
| 1280 | /* Stop all other interpreting/jit'ng threads */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1281 | dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1282 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1283 | pOldTable = gDvmJit.pJitEntryTable; |
| 1284 | oldSize = gDvmJit.jitTableSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1285 | |
| 1286 | dvmLockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1287 | gDvmJit.pJitEntryTable = pNewTable; |
| 1288 | gDvmJit.jitTableSize = size; |
| 1289 | gDvmJit.jitTableMask = size - 1; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1290 | gDvmJit.jitTableEntriesUsed = 0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1291 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1292 | for (i=0; i < oldSize; i++) { |
| 1293 | if (pOldTable[i].dPC) { |
| 1294 | JitEntry *p; |
| 1295 | u2 chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1296 | p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/, |
| 1297 | pOldTable[i].u.info.isMethodEntry); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1298 | p->codeAddress = pOldTable[i].codeAddress; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1299 | /* We need to preserve the new chain field, but copy the rest */ |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1300 | chain = p->u.info.chain; |
| 1301 | p->u = pOldTable[i].u; |
| 1302 | p->u.info.chain = chain; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1303 | } |
| 1304 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1305 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1306 | dvmUnlockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1307 | |
| 1308 | free(pOldTable); |
| 1309 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1310 | /* Restart the world */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1311 | dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1312 | |
| 1313 | return false; |
| 1314 | } |
| 1315 | |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1316 | /* |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1317 | * Reset the JitTable to the initial clean state. |
| 1318 | */ |
| 1319 | void dvmJitResetTable(void) |
| 1320 | { |
| 1321 | JitEntry *jitEntry = gDvmJit.pJitEntryTable; |
| 1322 | unsigned int size = gDvmJit.jitTableSize; |
| 1323 | unsigned int i; |
| 1324 | |
| 1325 | dvmLockMutex(&gDvmJit.tableLock); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1326 | |
| 1327 | /* Note: If need to preserve any existing counts. Do so here. */ |
| buzbee | 38c4134 | 2011-01-11 15:45:49 -0800 | [diff] [blame] | 1328 | if (gDvmJit.pJitTraceProfCounters) { |
| 1329 | for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) { |
| 1330 | if (gDvmJit.pJitTraceProfCounters->buckets[i]) |
| 1331 | memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i], |
| 1332 | 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES); |
| 1333 | } |
| 1334 | gDvmJit.pJitTraceProfCounters->next = 0; |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1335 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1336 | |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1337 | memset((void *) jitEntry, 0, sizeof(JitEntry) * size); |
| 1338 | for (i=0; i< size; i++) { |
| 1339 | jitEntry[i].u.info.chain = size; /* Initialize chain termination */ |
| 1340 | } |
| 1341 | gDvmJit.jitTableEntriesUsed = 0; |
| 1342 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 1343 | } |
| 1344 | |
| 1345 | /* |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1346 | * Return the address of the next trace profile counter. This address |
| 1347 | * will be embedded in the generated code for the trace, and thus cannot |
| 1348 | * change while the trace exists. |
| 1349 | */ |
| 1350 | JitTraceCounter_t *dvmJitNextTraceCounter() |
| 1351 | { |
| 1352 | int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES; |
| 1353 | int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES; |
| 1354 | JitTraceCounter_t *res; |
| 1355 | /* Lazily allocate blocks of counters */ |
| 1356 | if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) { |
| 1357 | JitTraceCounter_t *p = |
| 1358 | (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p)); |
| 1359 | if (!p) { |
| 1360 | LOGE("Failed to allocate block of trace profile counters"); |
| 1361 | dvmAbort(); |
| 1362 | } |
| 1363 | gDvmJit.pJitTraceProfCounters->buckets[idx] = p; |
| 1364 | } |
| 1365 | res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem]; |
| 1366 | gDvmJit.pJitTraceProfCounters->next++; |
| 1367 | return res; |
| 1368 | } |
| 1369 | |
| 1370 | /* |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1371 | * Float/double conversion requires clamping to min and max of integer form. If |
| 1372 | * target doesn't support this normally, use these. |
| 1373 | */ |
| 1374 | s8 dvmJitd2l(double d) |
| 1375 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1376 | static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL; |
| 1377 | static const double kMinLong = (double)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1378 | if (d >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1379 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1380 | else if (d <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1381 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1382 | else if (d != d) // NaN case |
| 1383 | return 0; |
| 1384 | else |
| 1385 | return (s8)d; |
| 1386 | } |
| 1387 | |
| 1388 | s8 dvmJitf2l(float f) |
| 1389 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1390 | static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL; |
| 1391 | static const float kMinLong = (float)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1392 | if (f >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1393 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1394 | else if (f <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1395 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1396 | else if (f != f) // NaN case |
| 1397 | return 0; |
| 1398 | else |
| 1399 | return (s8)f; |
| 1400 | } |
| 1401 | |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1402 | /* Should only be called by the compiler thread */ |
| 1403 | void dvmJitChangeProfileMode(TraceProfilingModes newState) |
| 1404 | { |
| 1405 | if (gDvmJit.profileMode != newState) { |
| 1406 | gDvmJit.profileMode = newState; |
| 1407 | dvmJitUnchainAll(); |
| 1408 | } |
| 1409 | } |
| 1410 | |
| 1411 | void dvmJitTraceProfilingOn() |
| 1412 | { |
| 1413 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOff) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1414 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1415 | (void*) kTraceProfilingPeriodicOn); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1416 | else if (gDvmJit.profileMode == kTraceProfilingDisabled) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1417 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1418 | (void*) kTraceProfilingContinuous); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1419 | } |
| 1420 | |
| 1421 | void dvmJitTraceProfilingOff() |
| 1422 | { |
| 1423 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOn) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1424 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1425 | (void*) kTraceProfilingPeriodicOff); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1426 | else if (gDvmJit.profileMode == kTraceProfilingContinuous) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1427 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1428 | (void*) kTraceProfilingDisabled); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1429 | } |
| 1430 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1431 | #endif /* WITH_JIT */ |