| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #ifdef WITH_JIT |
| 17 | |
| 18 | /* |
| 19 | * Target independent portion of Android's Jit |
| 20 | */ |
| 21 | |
| 22 | #include "Dalvik.h" |
| 23 | #include "Jit.h" |
| 24 | |
| 25 | |
| Dan Bornstein | df4daaf | 2010-12-01 14:23:44 -0800 | [diff] [blame] | 26 | #include "libdex/DexOpcodes.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 27 | #include <unistd.h> |
| 28 | #include <pthread.h> |
| 29 | #include <sys/time.h> |
| 30 | #include <signal.h> |
| 31 | #include "compiler/Compiler.h" |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 32 | #include "compiler/CompilerUtility.h" |
| 33 | #include "compiler/CompilerIR.h" |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 34 | #include <errno.h> |
| 35 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 36 | #if defined(WITH_SELF_VERIFICATION) |
| 37 | /* Allocate space for per-thread ShadowSpace data structures */ |
| 38 | void* dvmSelfVerificationShadowSpaceAlloc(Thread* self) |
| 39 | { |
| 40 | self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace)); |
| 41 | if (self->shadowSpace == NULL) |
| 42 | return NULL; |
| 43 | |
| 44 | self->shadowSpace->registerSpaceSize = REG_SPACE; |
| 45 | self->shadowSpace->registerSpace = |
| 46 | (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int)); |
| 47 | |
| 48 | return self->shadowSpace->registerSpace; |
| 49 | } |
| 50 | |
| 51 | /* Free per-thread ShadowSpace data structures */ |
| 52 | void dvmSelfVerificationShadowSpaceFree(Thread* self) |
| 53 | { |
| 54 | free(self->shadowSpace->registerSpace); |
| 55 | free(self->shadowSpace); |
| 56 | } |
| 57 | |
| 58 | /* |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 59 | * Save out PC, FP, thread state, and registers to shadow space. |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 60 | * Return a pointer to the shadow space for JIT to use. |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 61 | * |
| 62 | * The set of saved state from the Thread structure is: |
| 63 | * pc (Dalvik PC) |
| 64 | * fp (Dalvik FP) |
| 65 | * retval |
| 66 | * method |
| 67 | * methodClassDex |
| 68 | * interpStackEnd |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 69 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 70 | void* dvmSelfVerificationSaveState(const u2* pc, u4* fp, |
| 71 | Thread* self, int targetTrace) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 72 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 73 | ShadowSpace *shadowSpace = self->shadowSpace; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 74 | unsigned preBytes = self->interpSave.method->outsSize*4 + |
| 75 | sizeof(StackSaveArea); |
| 76 | unsigned postBytes = self->interpSave.method->registersSize*4; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 77 | |
| 78 | //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x", |
| 79 | // self->threadId, (int)pc, (int)fp); |
| 80 | |
| 81 | if (shadowSpace->selfVerificationState != kSVSIdle) { |
| 82 | LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d", |
| 83 | self->threadId, shadowSpace->selfVerificationState); |
| 84 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 85 | LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 86 | } |
| 87 | shadowSpace->selfVerificationState = kSVSStart; |
| 88 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 89 | if (self->entryPoint == kInterpEntryResume) { |
| 90 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 91 | #if 0 |
| 92 | /* Tracking the success rate of resume after single-stepping */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 93 | if (self->jitResumeDPC == pc) { |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 94 | LOGD("SV single step resumed at %p", pc); |
| 95 | } |
| 96 | else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 97 | LOGD("real %p DPC %p NPC %p", pc, self->jitResumeDPC, |
| 98 | self->jitResumeNPC); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 99 | } |
| 100 | #endif |
| 101 | } |
| 102 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 103 | // Dynamically grow shadow register space if necessary |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 104 | if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 105 | free(shadowSpace->registerSpace); |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 106 | shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 107 | shadowSpace->registerSpace = |
| Ben Cheng | 11d8f14 | 2010-03-24 15:24:19 -0700 | [diff] [blame] | 108 | (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | // Remember original state |
| 112 | shadowSpace->startPC = pc; |
| 113 | shadowSpace->fp = fp; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 114 | shadowSpace->retval = self->retval; |
| 115 | shadowSpace->interpStackEnd = self->interpStackEnd; |
| 116 | |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Store the original method here in case the trace ends with a |
| 119 | * return/invoke, the last method. |
| 120 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 121 | shadowSpace->method = self->interpSave.method; |
| 122 | shadowSpace->methodClassDex = self->interpSave.methodClassDex; |
| 123 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 124 | shadowSpace->shadowFP = shadowSpace->registerSpace + |
| 125 | shadowSpace->registerSpaceSize - postBytes/4; |
| 126 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 127 | self->interpSave.fp = (u4*)shadowSpace->shadowFP; |
| 128 | self->interpStackEnd = (u1*)shadowSpace->registerSpace; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 129 | |
| 130 | // Create a copy of the stack |
| 131 | memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes, |
| 132 | preBytes+postBytes); |
| 133 | |
| 134 | // Setup the shadowed heap space |
| 135 | shadowSpace->heapSpaceTail = shadowSpace->heapSpace; |
| 136 | |
| 137 | // Reset trace length |
| 138 | shadowSpace->traceLength = 0; |
| 139 | |
| 140 | return shadowSpace; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Save ending PC, FP and compiled code exit point to shadow space. |
| 145 | * Return a pointer to the shadow space for JIT to restore state. |
| 146 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 147 | void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp, |
| 148 | SelfVerificationState exitState, |
| 149 | Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 150 | { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 151 | ShadowSpace *shadowSpace = self->shadowSpace; |
| 152 | shadowSpace->endPC = pc; |
| 153 | shadowSpace->endShadowFP = fp; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 154 | shadowSpace->jitExitState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 155 | |
| 156 | //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x", |
| 157 | // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp, |
| 158 | // (int)pc); |
| 159 | |
| 160 | if (shadowSpace->selfVerificationState != kSVSStart) { |
| 161 | LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d", |
| 162 | self->threadId, shadowSpace->selfVerificationState); |
| 163 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 164 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 165 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 166 | LOGD("Interp FP: 0x%x", (int)shadowSpace->fp); |
| 167 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 168 | (int)shadowSpace->endShadowFP); |
| 169 | } |
| 170 | |
| 171 | // Special case when punting after a single instruction |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 172 | if (exitState == kSVSPunt && pc == shadowSpace->startPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 173 | shadowSpace->selfVerificationState = kSVSIdle; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 174 | } else if (exitState == kSVSBackwardBranch && pc < shadowSpace->startPC) { |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 175 | /* |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 176 | * Consider a trace with a backward branch: |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 177 | * 1: .. |
| 178 | * 2: .. |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 179 | * 3: .. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 180 | * 4: .. |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 181 | * 5: Goto {1 or 2 or 3 or 4} |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 182 | * |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 183 | * If there instruction 5 goes to 1 and there is no single-step |
| 184 | * instruction in the loop, pc is equal to shadowSpace->startPC and |
| 185 | * we will honor the backward branch condition. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 186 | * |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 187 | * If the single-step instruction is outside the loop, then after |
| 188 | * resuming in the trace the startPC will be less than pc so we will |
| 189 | * also honor the backward branch condition. |
| 190 | * |
| 191 | * If the single-step is inside the loop, we won't hit the same endPC |
| 192 | * twice when the interpreter is re-executing the trace so we want to |
| 193 | * cancel the backward branch condition. In this case it can be |
| 194 | * detected as the endPC (ie pc) will be less than startPC. |
| Ben Cheng | 60c6dbf | 2010-08-26 12:28:56 -0700 | [diff] [blame] | 195 | */ |
| 196 | shadowSpace->selfVerificationState = kSVSNormal; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 197 | } else { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 198 | shadowSpace->selfVerificationState = exitState; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 199 | } |
| 200 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 201 | /* Restore state before returning */ |
| 202 | self->interpSave.pc = shadowSpace->startPC; |
| 203 | self->interpSave.fp = shadowSpace->fp; |
| 204 | self->interpSave.method = shadowSpace->method; |
| 205 | self->interpSave.methodClassDex = shadowSpace->methodClassDex; |
| 206 | self->retval = shadowSpace->retval; |
| 207 | self->interpStackEnd = shadowSpace->interpStackEnd; |
| 208 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 209 | return shadowSpace; |
| 210 | } |
| 211 | |
| 212 | /* Print contents of virtual registers */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 213 | static void selfVerificationPrintRegisters(int* addr, int* addrRef, |
| 214 | int numWords) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 215 | { |
| 216 | int i; |
| 217 | for (i = 0; i < numWords; i++) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 218 | LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : ""); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
| 222 | /* Print values maintained in shadowSpace */ |
| 223 | static void selfVerificationDumpState(const u2* pc, Thread* self) |
| 224 | { |
| 225 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 226 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 227 | int frameBytes = (int) shadowSpace->registerSpace + |
| 228 | shadowSpace->registerSpaceSize*4 - |
| 229 | (int) shadowSpace->shadowFP; |
| 230 | int localRegs = 0; |
| 231 | int frameBytes2 = 0; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 232 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 233 | localRegs = (stackSave->method->registersSize - |
| 234 | stackSave->method->insSize)*4; |
| 235 | frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs; |
| 236 | } |
| 237 | LOGD("********** SHADOW STATE DUMP **********"); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 238 | LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 239 | (int)(pc - stackSave->method->insns)); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 240 | LOGD("Class: %s", shadowSpace->method->clazz->descriptor); |
| 241 | LOGD("Method: %s", shadowSpace->method->name); |
| 242 | LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 243 | (int)shadowSpace->endPC); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 244 | LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 245 | (int)self->curFrame); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 246 | LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 247 | (int)shadowSpace->endShadowFP); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 248 | LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 249 | localRegs, frameBytes2); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 250 | LOGD("Trace length: %d State: %d", shadowSpace->traceLength, |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 251 | shadowSpace->selfVerificationState); |
| 252 | } |
| 253 | |
| 254 | /* Print decoded instructions in the current trace */ |
| 255 | static void selfVerificationDumpTrace(const u2* pc, Thread* self) |
| 256 | { |
| 257 | ShadowSpace* shadowSpace = self->shadowSpace; |
| 258 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 259 | int i, addr, offset; |
| 260 | DecodedInstruction *decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 261 | |
| 262 | LOGD("********** SHADOW TRACE DUMP **********"); |
| 263 | for (i = 0; i < shadowSpace->traceLength; i++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 264 | addr = shadowSpace->trace[i].addr; |
| 265 | offset = (int)((u2*)addr - stackSave->method->insns); |
| 266 | decInsn = &(shadowSpace->trace[i].decInsn); |
| 267 | /* Not properly decoding instruction, some registers may be garbage */ |
| Andy McFadden | c6b25c7 | 2010-06-22 11:01:20 -0700 | [diff] [blame] | 268 | LOGD("0x%x: (0x%04x) %s", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 269 | addr, offset, dexGetOpcodeName(decInsn->opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 270 | } |
| 271 | } |
| 272 | |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 273 | /* Code is forced into this spin loop when a divergence is detected */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 274 | static void selfVerificationSpinLoop(ShadowSpace *shadowSpace) |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 275 | { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 276 | const u2 *startPC = shadowSpace->startPC; |
| Ben Cheng | 88a0f97 | 2010-02-24 15:00:40 -0800 | [diff] [blame] | 277 | JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 278 | if (desc) { |
| 279 | dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc); |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 280 | /* |
| 281 | * This function effectively terminates the VM right here, so not |
| 282 | * freeing the desc pointer when the enqueuing fails is acceptable. |
| 283 | */ |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 284 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 285 | gDvmJit.selfVerificationSpin = true; |
| 286 | while(gDvmJit.selfVerificationSpin) sleep(10); |
| 287 | } |
| 288 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 289 | /* Manage self verification while in the debug interpreter */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 290 | static bool selfVerificationDebugInterp(const u2* pc, Thread* self) |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 291 | { |
| 292 | ShadowSpace *shadowSpace = self->shadowSpace; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 293 | SelfVerificationState state = shadowSpace->selfVerificationState; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 294 | |
| 295 | DecodedInstruction decInsn; |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 296 | dexDecodeInstruction(pc, &decInsn); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 297 | |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 298 | //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s", |
| 299 | // self->threadId, (int)pc, (int)shadowSpace->endPC, state, |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 300 | // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode)); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 301 | |
| 302 | if (state == kSVSIdle || state == kSVSStart) { |
| 303 | LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d", |
| 304 | self->threadId, state); |
| 305 | selfVerificationDumpState(pc, self); |
| 306 | selfVerificationDumpTrace(pc, self); |
| 307 | } |
| 308 | |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 309 | /* |
| 310 | * Skip endPC once when trace has a backward branch. If the SV state is |
| 311 | * single step, keep it that way. |
| 312 | */ |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 313 | if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) || |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 314 | (state != kSVSBackwardBranch && state != kSVSSingleStep)) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 315 | shadowSpace->selfVerificationState = kSVSDebugInterp; |
| 316 | } |
| 317 | |
| 318 | /* Check that the current pc is the end of the trace */ |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 319 | if ((state == kSVSDebugInterp || state == kSVSSingleStep) && |
| 320 | pc == shadowSpace->endPC) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 321 | |
| 322 | shadowSpace->selfVerificationState = kSVSIdle; |
| 323 | |
| 324 | /* Check register space */ |
| 325 | int frameBytes = (int) shadowSpace->registerSpace + |
| 326 | shadowSpace->registerSpaceSize*4 - |
| 327 | (int) shadowSpace->shadowFP; |
| 328 | if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 329 | LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 330 | selfVerificationDumpState(pc, self); |
| 331 | selfVerificationDumpTrace(pc, self); |
| 332 | LOGD("*** Interp Registers: addr: 0x%x bytes: %d", |
| 333 | (int)shadowSpace->fp, frameBytes); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 334 | selfVerificationPrintRegisters((int*)shadowSpace->fp, |
| 335 | (int*)shadowSpace->shadowFP, |
| 336 | frameBytes/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 337 | LOGD("*** Shadow Registers: addr: 0x%x bytes: %d", |
| 338 | (int)shadowSpace->shadowFP, frameBytes); |
| 339 | selfVerificationPrintRegisters((int*)shadowSpace->shadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 340 | (int*)shadowSpace->fp, |
| 341 | frameBytes/4); |
| 342 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 343 | } |
| 344 | /* Check new frame if it exists (invokes only) */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 345 | if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) { |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 346 | StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame); |
| 347 | int localRegs = (stackSave->method->registersSize - |
| 348 | stackSave->method->insSize)*4; |
| 349 | int frameBytes2 = (int) shadowSpace->fp - |
| 350 | (int) self->curFrame - localRegs; |
| 351 | if (memcmp(((char*)self->curFrame)+localRegs, |
| 352 | ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 353 | LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 354 | self->threadId); |
| 355 | selfVerificationDumpState(pc, self); |
| 356 | selfVerificationDumpTrace(pc, self); |
| 357 | LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d", |
| 358 | (int)self->curFrame, localRegs, frameBytes2); |
| 359 | selfVerificationPrintRegisters((int*)self->curFrame, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 360 | (int*)shadowSpace->endShadowFP, |
| 361 | (frameBytes2+localRegs)/4); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 362 | LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d", |
| 363 | (int)shadowSpace->endShadowFP, localRegs, frameBytes2); |
| 364 | selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP, |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 365 | (int*)self->curFrame, |
| 366 | (frameBytes2+localRegs)/4); |
| 367 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 368 | } |
| 369 | } |
| 370 | |
| 371 | /* Check memory space */ |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 372 | bool memDiff = false; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 373 | ShadowHeap* heapSpacePtr; |
| 374 | for (heapSpacePtr = shadowSpace->heapSpace; |
| 375 | heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) { |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 376 | int memData = *((unsigned int*) heapSpacePtr->addr); |
| 377 | if (heapSpacePtr->data != memData) { |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 378 | LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId); |
| 379 | LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x", |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 380 | heapSpacePtr->addr, memData, heapSpacePtr->data); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 381 | selfVerificationDumpState(pc, self); |
| 382 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 383 | memDiff = true; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 384 | } |
| 385 | } |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 386 | if (memDiff) selfVerificationSpinLoop(shadowSpace); |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * Switch to JIT single step mode to stay in the debug interpreter for |
| 390 | * one more instruction |
| 391 | */ |
| 392 | if (state == kSVSSingleStep) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 393 | self->jitState = kJitSingleStepEnd; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 394 | } |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 395 | return true; |
| 396 | |
| 397 | /* If end not been reached, make sure max length not exceeded */ |
| 398 | } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) { |
| 399 | LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 400 | LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x", |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 401 | (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc); |
| 402 | selfVerificationDumpState(pc, self); |
| 403 | selfVerificationDumpTrace(pc, self); |
| Ben Cheng | ccd6c01 | 2009-10-15 14:52:45 -0700 | [diff] [blame] | 404 | selfVerificationSpinLoop(shadowSpace); |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 405 | |
| 406 | return true; |
| 407 | } |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 408 | /* Log the instruction address and decoded instruction for debug */ |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 409 | shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc; |
| Ben Cheng | bcdc1de | 2009-08-21 16:18:46 -0700 | [diff] [blame] | 410 | shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 411 | shadowSpace->traceLength++; |
| 412 | |
| 413 | return false; |
| 414 | } |
| 415 | #endif |
| 416 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 417 | /* |
| 418 | * If one of our fixed tables or the translation buffer fills up, |
| 419 | * call this routine to avoid wasting cycles on future translation requests. |
| 420 | */ |
| 421 | void dvmJitStopTranslationRequests() |
| 422 | { |
| 423 | /* |
| 424 | * Note 1: This won't necessarily stop all translation requests, and |
| 425 | * operates on a delayed mechanism. Running threads look to the copy |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 426 | * of this value in their private thread structures and won't see |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 427 | * this change until it is refreshed (which happens on interpreter |
| 428 | * entry). |
| 429 | * Note 2: This is a one-shot memory leak on this table. Because this is a |
| 430 | * permanent off switch for Jit profiling, it is a one-time leak of 1K |
| 431 | * bytes, and no further attempt will be made to re-allocate it. Can't |
| 432 | * free it because some thread may be holding a reference. |
| 433 | */ |
| Bill Buzbee | b1d8044 | 2009-12-17 14:55:21 -0800 | [diff] [blame] | 434 | gDvmJit.pProfTable = NULL; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 435 | } |
| 436 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 437 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 438 | /* Convenience function to increment counter from assembly code */ |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 439 | void dvmBumpNoChain(int from) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 440 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 441 | gDvmJit.noChainExit[from]++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | /* Convenience function to increment counter from assembly code */ |
| 445 | void dvmBumpNormal() |
| 446 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 447 | gDvmJit.normalExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* Convenience function to increment counter from assembly code */ |
| 451 | void dvmBumpPunt(int from) |
| 452 | { |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 453 | gDvmJit.puntExit++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 454 | } |
| 455 | #endif |
| 456 | |
| 457 | /* Dumps debugging & tuning stats to the log */ |
| 458 | void dvmJitStats() |
| 459 | { |
| 460 | int i; |
| 461 | int hit; |
| 462 | int not_hit; |
| 463 | int chains; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 464 | int stubs; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 465 | if (gDvmJit.pJitEntryTable) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 466 | for (i=0, stubs=chains=hit=not_hit=0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 467 | i < (int) gDvmJit.jitTableSize; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 468 | i++) { |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 469 | if (gDvmJit.pJitEntryTable[i].dPC != 0) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 470 | hit++; |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 471 | if (gDvmJit.pJitEntryTable[i].codeAddress == |
| Bill Buzbee | bd04724 | 2010-05-13 13:02:53 -0700 | [diff] [blame] | 472 | dvmCompilerGetInterpretTemplate()) |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 473 | stubs++; |
| 474 | } else |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 475 | not_hit++; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 476 | if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 477 | chains++; |
| 478 | } |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 479 | LOGD("JIT: table size is %d, entries used is %d", |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 480 | gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed); |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 481 | LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s", |
| 482 | hit, not_hit + hit, chains, gDvmJit.threshold, |
| 483 | gDvmJit.blockingMode ? "Blocking" : "Non-blocking"); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 484 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 485 | #if defined(WITH_JIT_TUNING) |
| 486 | LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches); |
| 487 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 488 | LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt", |
| 489 | gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound, |
| 490 | gDvmJit.normalExit, gDvmJit.puntExit); |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 491 | |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 492 | LOGD("JIT: ICHits: %d", gDvmICHitCount); |
| 493 | |
| Ben Cheng | 72621c9 | 2010-03-10 13:12:55 -0800 | [diff] [blame] | 494 | LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, " |
| 495 | "%d switch overflow", |
| 496 | gDvmJit.noChainExit[kInlineCacheMiss], |
| 497 | gDvmJit.noChainExit[kCallsiteInterpreted], |
| 498 | gDvmJit.noChainExit[kSwitchOverflow]); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 499 | |
| Ben Cheng | b88ec3c | 2010-05-17 12:50:33 -0700 | [diff] [blame] | 500 | LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, " |
| 501 | "%d dropped", |
| 502 | gDvmJit.icPatchInit, gDvmJit.icPatchRejected, |
| 503 | gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued, |
| Ben Cheng | 452efba | 2010-04-30 15:14:00 -0700 | [diff] [blame] | 504 | gDvmJit.icPatchDropped); |
| 505 | |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 506 | LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return", |
| 507 | gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic, |
| 508 | gDvmJit.invokeNative, gDvmJit.returnOp); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 509 | LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter", |
| 510 | gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined, |
| 511 | gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined); |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 512 | LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000); |
| 513 | LOGD("JIT: Avg unit compilation time: %llu us", |
| Andy McFadden | b7a797d | 2011-02-24 16:55:40 -0800 | [diff] [blame] | 514 | gDvmJit.numCompilations == 0 ? 0 : |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 515 | gDvmJit.jitTime / gDvmJit.numCompilations); |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 516 | LOGD("JIT: Potential GC blocked by compiler: max %llu us / " |
| 517 | "avg %llu us (%d)", |
| 518 | gDvmJit.maxCompilerThreadBlockGCTime, |
| 519 | gDvmJit.numCompilerThreadBlockGC == 0 ? |
| 520 | 0 : gDvmJit.compilerThreadBlockGCTime / |
| 521 | gDvmJit.numCompilerThreadBlockGC, |
| 522 | gDvmJit.numCompilerThreadBlockGC); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 523 | #endif |
| Ben Cheng | 86717f7 | 2010-03-05 15:27:21 -0800 | [diff] [blame] | 524 | |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 525 | LOGD("JIT: %d Translation chains, %d interp stubs", |
| 526 | gDvmJit.translationChains, stubs); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 527 | if (gDvmJit.profileMode == kTraceProfilingContinuous) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 528 | dvmCompilerSortAndPrintTraceProfiles(); |
| Bill Buzbee | 6e963e1 | 2009-06-17 16:56:19 -0700 | [diff] [blame] | 529 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 530 | } |
| 531 | } |
| 532 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 533 | |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 534 | /* End current trace after last successful instruction */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 535 | void dvmJitEndTraceSelect(Thread* self) |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 536 | { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 537 | if (self->jitState == kJitTSelect) |
| 538 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 539 | } |
| 540 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 541 | /* |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 542 | * Find an entry in the JitTable, creating if necessary. |
| 543 | * Returns null if table is full. |
| 544 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 545 | static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked, |
| 546 | bool isMethodEntry) |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 547 | { |
| 548 | u4 chainEndMarker = gDvmJit.jitTableSize; |
| 549 | u4 idx = dvmJitHash(dPC); |
| 550 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 551 | /* |
| 552 | * Walk the bucket chain to find an exact match for our PC and trace/method |
| 553 | * type |
| 554 | */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 555 | while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) && |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 556 | ((gDvmJit.pJitEntryTable[idx].dPC != dPC) || |
| 557 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != |
| 558 | isMethodEntry))) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 559 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 560 | } |
| 561 | |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 562 | if (gDvmJit.pJitEntryTable[idx].dPC != dPC || |
| 563 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 564 | /* |
| 565 | * No match. Aquire jitTableLock and find the last |
| 566 | * slot in the chain. Possibly continue the chain walk in case |
| 567 | * some other thread allocated the slot we were looking |
| 568 | * at previuosly (perhaps even the dPC we're trying to enter). |
| 569 | */ |
| 570 | if (!callerLocked) |
| 571 | dvmLockMutex(&gDvmJit.tableLock); |
| 572 | /* |
| 573 | * At this point, if .dPC is NULL, then the slot we're |
| 574 | * looking at is the target slot from the primary hash |
| 575 | * (the simple, and common case). Otherwise we're going |
| 576 | * to have to find a free slot and chain it. |
| 577 | */ |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 578 | ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */ |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 579 | if (gDvmJit.pJitEntryTable[idx].dPC != NULL) { |
| 580 | u4 prev; |
| 581 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 582 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 583 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 584 | isMethodEntry) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 585 | /* Another thread got there first for this dPC */ |
| 586 | if (!callerLocked) |
| 587 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 588 | return &gDvmJit.pJitEntryTable[idx]; |
| 589 | } |
| 590 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| 591 | } |
| 592 | /* Here, idx should be pointing to the last cell of an |
| 593 | * active chain whose last member contains a valid dPC */ |
| 594 | assert(gDvmJit.pJitEntryTable[idx].dPC != NULL); |
| 595 | /* Linear walk to find a free cell and add it to the end */ |
| 596 | prev = idx; |
| 597 | while (true) { |
| 598 | idx++; |
| 599 | if (idx == chainEndMarker) |
| 600 | idx = 0; /* Wraparound */ |
| 601 | if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) || |
| 602 | (idx == prev)) |
| 603 | break; |
| 604 | } |
| 605 | if (idx != prev) { |
| 606 | JitEntryInfoUnion oldValue; |
| 607 | JitEntryInfoUnion newValue; |
| 608 | /* |
| 609 | * Although we hold the lock so that noone else will |
| 610 | * be trying to update a chain field, the other fields |
| 611 | * packed into the word may be in use by other threads. |
| 612 | */ |
| 613 | do { |
| 614 | oldValue = gDvmJit.pJitEntryTable[prev].u; |
| 615 | newValue = oldValue; |
| 616 | newValue.info.chain = idx; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 617 | } while (android_atomic_release_cas(oldValue.infoWord, |
| 618 | newValue.infoWord, |
| 619 | &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 620 | } |
| 621 | } |
| 622 | if (gDvmJit.pJitEntryTable[idx].dPC == NULL) { |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 623 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 624 | /* |
| 625 | * Initialize codeAddress and allocate the slot. Must |
| 626 | * happen in this order (since dPC is set, the entry is live. |
| 627 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 628 | android_atomic_release_store((int32_t)dPC, |
| 629 | (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 630 | gDvmJit.pJitEntryTable[idx].dPC = dPC; |
| 631 | gDvmJit.jitTableEntriesUsed++; |
| 632 | } else { |
| 633 | /* Table is full */ |
| 634 | idx = chainEndMarker; |
| 635 | } |
| 636 | if (!callerLocked) |
| 637 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 638 | } |
| 639 | return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx]; |
| 640 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 641 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 642 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 643 | * Append the class ptr of "this" and the current method ptr to the current |
| 644 | * trace. That is, the trace runs will contain the following components: |
| 645 | * + trace run that ends with an invoke (existing entry) |
| 646 | * + thisClass (new) |
| 647 | * + calleeMethod (new) |
| 648 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 649 | static void insertClassMethodInfo(Thread* self, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 650 | const ClassObject* thisClass, |
| 651 | const Method* calleeMethod, |
| 652 | const DecodedInstruction* insn) |
| 653 | { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 654 | int currTraceRun = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 655 | self->trace[currTraceRun].info.meta = thisClass ? |
| 656 | (void *) thisClass->descriptor : NULL; |
| 657 | self->trace[currTraceRun].isCode = false; |
| 658 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 659 | currTraceRun = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 660 | self->trace[currTraceRun].info.meta = thisClass ? |
| 661 | (void *) thisClass->classLoader : NULL; |
| 662 | self->trace[currTraceRun].isCode = false; |
| 663 | |
| 664 | currTraceRun = ++self->currTraceRun; |
| 665 | self->trace[currTraceRun].info.meta = (void *) calleeMethod; |
| 666 | self->trace[currTraceRun].isCode = false; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | /* |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 670 | * Check if the next instruction following the invoke is a move-result and if |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 671 | * so add it to the trace. That is, this will add the trace run that includes |
| 672 | * the move-result to the trace list. |
| 673 | * |
| 674 | * + trace run that ends with an invoke (existing entry) |
| 675 | * + thisClass (existing entry) |
| 676 | * + calleeMethod (existing entry) |
| 677 | * + move result (new) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 678 | * |
| 679 | * lastPC, len, offset are all from the preceding invoke instruction |
| 680 | */ |
| 681 | static void insertMoveResult(const u2 *lastPC, int len, int offset, |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 682 | Thread *self) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 683 | { |
| 684 | DecodedInstruction nextDecInsn; |
| 685 | const u2 *moveResultPC = lastPC + len; |
| 686 | |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 687 | dexDecodeInstruction(moveResultPC, &nextDecInsn); |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 688 | if ((nextDecInsn.opcode != OP_MOVE_RESULT) && |
| 689 | (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) && |
| 690 | (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT)) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 691 | return; |
| 692 | |
| 693 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 694 | int currTraceRun = ++self->currTraceRun; |
| 695 | self->currRunHead = moveResultPC; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 696 | self->trace[currTraceRun].info.frag.startOffset = offset + len; |
| 697 | self->trace[currTraceRun].info.frag.numInsts = 1; |
| 698 | self->trace[currTraceRun].info.frag.runEnd = false; |
| 699 | self->trace[currTraceRun].info.frag.hint = kJitHintNone; |
| 700 | self->trace[currTraceRun].isCode = true; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 701 | self->totalTraceLen++; |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 702 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 703 | self->currRunLen = dexGetWidthFromInstruction(moveResultPC); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 707 | * Adds to the current trace request one instruction at a time, just |
| 708 | * before that instruction is interpreted. This is the primary trace |
| 709 | * selection function. NOTE: return instruction are handled a little |
| 710 | * differently. In general, instructions are "proposed" to be added |
| 711 | * to the current trace prior to interpretation. If the interpreter |
| 712 | * then successfully completes the instruction, is will be considered |
| 713 | * part of the request. This allows us to examine machine state prior |
| 714 | * to interpretation, and also abort the trace request if the instruction |
| 715 | * throws or does something unexpected. However, return instructions |
| 716 | * will cause an immediate end to the translation request - which will |
| 717 | * be passed to the compiler before the return completes. This is done |
| 718 | * in response to special handling of returns by the interpreter (and |
| 719 | * because returns cannot throw in a way that causes problems for the |
| 720 | * translated code. |
| 721 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 722 | int dvmCheckJit(const u2* pc, Thread* self, const ClassObject* thisClass, |
| 723 | const Method* curMethod) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 724 | { |
| Carl Shapiro | e3c01da | 2010-05-20 22:54:18 -0700 | [diff] [blame] | 725 | int flags, len; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 726 | int switchInterp = false; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 727 | bool debugOrProfile = dvmDebuggerOrProfilerActive(); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 728 | /* Stay in the dbg interpreter for the next instruction */ |
| 729 | bool stayOneMoreInst = false; |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 730 | |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 731 | /* |
| 732 | * Bug 2710533 - dalvik crash when disconnecting debugger |
| 733 | * |
| 734 | * Reset the entry point to the default value. If needed it will be set to a |
| 735 | * specific value in the corresponding case statement (eg kJitSingleStepEnd) |
| 736 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 737 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 738 | |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 739 | /* Prepare to handle last PC and stage the current PC */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 740 | const u2 *lastPC = self->lastPC; |
| 741 | self->lastPC = pc; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 742 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 743 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 744 | int offset; |
| 745 | DecodedInstruction decInsn; |
| 746 | case kJitTSelect: |
| Ben Cheng | dc84bb2 | 2009-10-02 12:58:52 -0700 | [diff] [blame] | 747 | /* First instruction - just remember the PC and exit */ |
| 748 | if (lastPC == NULL) break; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 749 | /* Grow the trace around the last PC if jitState is kJitTSelect */ |
| Dan Bornstein | 5432239 | 2010-11-17 14:16:56 -0800 | [diff] [blame] | 750 | dexDecodeInstruction(lastPC, &decInsn); |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 751 | |
| 752 | /* |
| 753 | * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due |
| 754 | * to the amount of space it takes to generate the chaining |
| 755 | * cells. |
| 756 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 757 | if (self->totalTraceLen != 0 && |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 758 | (decInsn.opcode == OP_PACKED_SWITCH || |
| 759 | decInsn.opcode == OP_SPARSE_SWITCH)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 760 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | 6c10a97 | 2009-10-29 14:39:18 -0700 | [diff] [blame] | 761 | break; |
| 762 | } |
| 763 | |
| Bill Buzbee | f9f3328 | 2009-11-22 12:45:30 -0800 | [diff] [blame] | 764 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 765 | #if defined(SHOW_TRACE) |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 766 | LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opcode)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 767 | #endif |
| Dan Bornstein | e485276 | 2010-12-02 12:45:00 -0800 | [diff] [blame] | 768 | flags = dexGetFlagsFromOpcode(decInsn.opcode); |
| 769 | len = dexGetWidthFromInstruction(lastPC); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 770 | offset = lastPC - self->interpSave.method->insns; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 771 | assert((unsigned) offset < |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 772 | dvmGetMethodInsnsSize(self->interpSave.method)); |
| 773 | if (lastPC != self->currRunHead + self->currRunLen) { |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 774 | int currTraceRun; |
| 775 | /* We need to start a new trace run */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 776 | currTraceRun = ++self->currTraceRun; |
| 777 | self->currRunLen = 0; |
| 778 | self->currRunHead = (u2*)lastPC; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 779 | self->trace[currTraceRun].info.frag.startOffset = offset; |
| 780 | self->trace[currTraceRun].info.frag.numInsts = 0; |
| 781 | self->trace[currTraceRun].info.frag.runEnd = false; |
| 782 | self->trace[currTraceRun].info.frag.hint = kJitHintNone; |
| 783 | self->trace[currTraceRun].isCode = true; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 784 | } |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 785 | self->trace[self->currTraceRun].info.frag.numInsts++; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 786 | self->totalTraceLen++; |
| 787 | self->currRunLen += len; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 788 | |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 789 | /* |
| 790 | * If the last instruction is an invoke, we will try to sneak in |
| 791 | * the move-result* (if existent) into a separate trace run. |
| 792 | */ |
| 793 | int needReservedRun = (flags & kInstrInvoke) ? 1 : 0; |
| 794 | |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 795 | /* Will probably never hit this with the current trace buildier */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 796 | if (self->currTraceRun == |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 797 | (MAX_JIT_RUN_LEN - 1 - needReservedRun)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 798 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | 79d173c | 2009-09-29 16:12:51 -0700 | [diff] [blame] | 799 | } |
| 800 | |
| Dan Bornstein | c2b486f | 2010-11-12 16:07:16 -0800 | [diff] [blame] | 801 | if (!dexIsGoto(flags) && |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 802 | ((flags & (kInstrCanBranch | |
| 803 | kInstrCanSwitch | |
| 804 | kInstrCanReturn | |
| 805 | kInstrInvoke)) != 0)) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 806 | self->jitState = kJitTSelectEnd; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 807 | #if defined(SHOW_TRACE) |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 808 | LOGD("TraceGen: ending on %s, basic block end", |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 809 | dexGetOpcodeName(decInsn.opcode)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 810 | #endif |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 811 | |
| 812 | /* |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 813 | * If the current invoke is a {virtual,interface}, get the |
| 814 | * current class/method pair into the trace as well. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 815 | * If the next instruction is a variant of move-result, insert |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 816 | * it to the trace too. |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 817 | */ |
| 818 | if (flags & kInstrInvoke) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 819 | insertClassMethodInfo(self, thisClass, curMethod, |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 820 | &decInsn); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 821 | insertMoveResult(lastPC, len, offset, self); |
| Ben Cheng | d44faf5 | 2010-06-02 15:33:51 -0700 | [diff] [blame] | 822 | } |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 823 | } |
| Bill Buzbee | 2ce8a6c | 2009-12-03 15:09:32 -0800 | [diff] [blame] | 824 | /* Break on throw or self-loop */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 825 | if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 826 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 827 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 828 | if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) { |
| 829 | self->jitState = kJitTSelectEnd; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 830 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 831 | /* Abandon the trace request if debugger/profiler is attached */ |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 832 | if (debugOrProfile) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 833 | self->jitState = kJitDone; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 834 | break; |
| 835 | } |
| 836 | if ((flags & kInstrCanReturn) != kInstrCanReturn) { |
| 837 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 838 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 839 | else { |
| 840 | /* |
| 841 | * Last instruction is a return - stay in the dbg interpreter |
| 842 | * for one more instruction if it is a non-void return, since |
| 843 | * we don't want to start a trace with move-result as the first |
| 844 | * instruction (which is already included in the trace |
| 845 | * containing the invoke. |
| 846 | */ |
| Dan Bornstein | 9a1f816 | 2010-12-01 17:02:26 -0800 | [diff] [blame] | 847 | if (decInsn.opcode != OP_RETURN_VOID) { |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 848 | stayOneMoreInst = true; |
| 849 | } |
| 850 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 851 | /* NOTE: intentional fallthrough for returns */ |
| 852 | case kJitTSelectEnd: |
| 853 | { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 854 | /* Empty trace - set to bail to interpreter */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 855 | if (self->totalTraceLen == 0) { |
| 856 | dvmJitSetCodeAddr(self->currTraceHead, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 857 | dvmCompilerGetInterpretTemplate(), |
| 858 | dvmCompilerGetInterpretTemplateSet(), |
| 859 | false /* Not method entry */, 0); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 860 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 861 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 862 | break; |
| 863 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 864 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 865 | int lastTraceDesc = self->currTraceRun; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 866 | |
| 867 | /* Extend a new empty desc if the last slot is meta info */ |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 868 | if (!self->trace[lastTraceDesc].isCode) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 869 | lastTraceDesc = ++self->currTraceRun; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 870 | self->trace[lastTraceDesc].info.frag.startOffset = 0; |
| 871 | self->trace[lastTraceDesc].info.frag.numInsts = 0; |
| 872 | self->trace[lastTraceDesc].info.frag.hint = kJitHintNone; |
| 873 | self->trace[lastTraceDesc].isCode = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 874 | } |
| 875 | |
| 876 | /* Mark the end of the trace runs */ |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 877 | self->trace[lastTraceDesc].info.frag.runEnd = true; |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 878 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 879 | JitTraceDescription* desc = |
| 880 | (JitTraceDescription*)malloc(sizeof(JitTraceDescription) + |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 881 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 882 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 883 | if (desc == NULL) { |
| 884 | LOGE("Out of memory in trace selection"); |
| 885 | dvmJitStopTranslationRequests(); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 886 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 887 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 888 | break; |
| 889 | } |
| Ben Cheng | 7a2697d | 2010-06-07 13:44:23 -0700 | [diff] [blame] | 890 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 891 | desc->method = self->interpSave.method; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 892 | memcpy((char*)&(desc->trace[0]), |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 893 | (char*)&(self->trace[0]), |
| 894 | sizeof(JitTraceRun) * (self->currTraceRun+1)); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 895 | #if defined(SHOW_TRACE) |
| 896 | LOGD("TraceGen: trace done, adding to queue"); |
| 897 | #endif |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 898 | if (dvmCompilerWorkEnqueue( |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 899 | self->currTraceHead,kWorkOrderTrace,desc)) { |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 900 | /* Work order successfully enqueued */ |
| 901 | if (gDvmJit.blockingMode) { |
| 902 | dvmCompilerDrainQueue(); |
| 903 | } |
| Ben Cheng | 1357e94 | 2010-02-10 17:21:39 -0800 | [diff] [blame] | 904 | } else { |
| 905 | /* |
| 906 | * Make sure the descriptor for the abandoned work order is |
| 907 | * freed. |
| 908 | */ |
| 909 | free(desc); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 910 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 911 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 912 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 913 | } |
| 914 | break; |
| 915 | case kJitSingleStep: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 916 | self->jitState = kJitSingleStepEnd; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 917 | break; |
| 918 | case kJitSingleStepEnd: |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 919 | /* |
| 920 | * Clear the inJitCodeCache flag and abandon the resume attempt if |
| 921 | * we cannot switch back to the translation due to corner-case |
| 922 | * conditions. If the flag is not cleared and the code cache is full |
| 923 | * we will be stuck in the debug interpreter as the code cache |
| 924 | * cannot be reset. |
| 925 | */ |
| 926 | if (dvmJitStayInPortableInterpreter()) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 927 | self->entryPoint = kInterpEntryInstr; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 928 | self->inJitCodeCache = 0; |
| 929 | } else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 930 | self->entryPoint = kInterpEntryResume; |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 931 | } |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 932 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 933 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 934 | break; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 935 | case kJitDone: |
| 936 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 937 | break; |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 938 | #if defined(WITH_SELF_VERIFICATION) |
| 939 | case kJitSelfVerification: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 940 | if (selfVerificationDebugInterp(pc, self)) { |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 941 | /* |
| 942 | * If the next state is not single-step end, we can switch |
| 943 | * interpreter now. |
| 944 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 945 | if (self->jitState != kJitSingleStepEnd) { |
| 946 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 947 | switchInterp = true; |
| Ben Cheng | d5adae1 | 2010-03-26 17:45:28 -0700 | [diff] [blame] | 948 | } |
| Jeff Hao | 97319a8 | 2009-08-12 16:57:15 -0700 | [diff] [blame] | 949 | } |
| 950 | break; |
| 951 | #endif |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 952 | case kJitNot: |
| Ben Cheng | 1c52e6d | 2010-07-02 13:00:39 -0700 | [diff] [blame] | 953 | switchInterp = !debugOrProfile; |
| Ben Cheng | ed79ff0 | 2009-10-13 13:26:40 -0700 | [diff] [blame] | 954 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 955 | default: |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 956 | LOGE("Unexpected JIT state: %d entry point: %d", |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 957 | self->jitState, self->entryPoint); |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 958 | dvmAbort(); |
| Ben Cheng | 9c147b8 | 2009-10-07 16:41:46 -0700 | [diff] [blame] | 959 | break; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 960 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 961 | /* |
| 962 | * Final check to see if we can really switch the interpreter. Make sure |
| 963 | * the jitState is kJitDone or kJitNot when switchInterp is set to true. |
| 964 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 965 | assert(switchInterp == false || self->jitState == kJitDone || |
| 966 | self->jitState == kJitNot); |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 967 | return switchInterp && !debugOrProfile && !stayOneMoreInst && |
| 968 | !dvmJitStayInPortableInterpreter(); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 969 | } |
| 970 | |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 971 | JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 972 | { |
| 973 | int idx = dvmJitHash(pc); |
| 974 | |
| 975 | /* Expect a high hit rate on 1st shot */ |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 976 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 977 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 978 | return &gDvmJit.pJitEntryTable[idx]; |
| 979 | else { |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 980 | int chainEndMarker = gDvmJit.jitTableSize; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 981 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 982 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 983 | if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && |
| 984 | (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 985 | isMethodEntry)) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 986 | return &gDvmJit.pJitEntryTable[idx]; |
| 987 | } |
| 988 | } |
| 989 | return NULL; |
| 990 | } |
| 991 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 992 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 993 | * Walk through the JIT profile table and find the corresponding JIT code, in |
| 994 | * the specified format (ie trace vs method). This routine needs to be fast. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 995 | */ |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 996 | void* getCodeAddrCommon(const u2* dPC, bool methodEntry) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 997 | { |
| 998 | int idx = dvmJitHash(dPC); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 999 | const u2* pc = gDvmJit.pJitEntryTable[idx].dPC; |
| 1000 | if (pc != NULL) { |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 1001 | bool hideTranslation = dvmJitHideTranslation(); |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1002 | if (pc == dPC && |
| 1003 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1004 | int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? |
| 1005 | 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 1006 | intptr_t codeAddress = |
| 1007 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1008 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1009 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1010 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1011 | return hideTranslation || !codeAddress ? NULL : |
| 1012 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1013 | } else { |
| 1014 | int chainEndMarker = gDvmJit.jitTableSize; |
| 1015 | while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { |
| 1016 | idx = gDvmJit.pJitEntryTable[idx].u.info.chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1017 | if (gDvmJit.pJitEntryTable[idx].dPC == dPC && |
| 1018 | gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == |
| 1019 | methodEntry) { |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1020 | int offset = (gDvmJit.profileMode >= |
| 1021 | kTraceProfilingContinuous) ? 0 : |
| 1022 | gDvmJit.pJitEntryTable[idx].u.info.profileOffset; |
| 1023 | intptr_t codeAddress = |
| 1024 | (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1025 | #if defined(WITH_JIT_TUNING) |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1026 | gDvmJit.addrLookupsFound++; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1027 | #endif |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1028 | return hideTranslation || !codeAddress ? NULL : |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1029 | (void *)(codeAddress + offset); |
| Bill Buzbee | 9797a23 | 2010-01-12 12:20:13 -0800 | [diff] [blame] | 1030 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1031 | } |
| 1032 | } |
| 1033 | } |
| Ben Cheng | 978738d | 2010-05-13 13:45:57 -0700 | [diff] [blame] | 1034 | #if defined(WITH_JIT_TUNING) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1035 | gDvmJit.addrLookupsNotFound++; |
| 1036 | #endif |
| 1037 | return NULL; |
| 1038 | } |
| 1039 | |
| 1040 | /* |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1041 | * If a translated code address, in trace format, exists for the davik byte code |
| 1042 | * pointer return it. |
| 1043 | */ |
| 1044 | void* dvmJitGetTraceAddr(const u2* dPC) |
| 1045 | { |
| 1046 | return getCodeAddrCommon(dPC, false /* method entry */); |
| 1047 | } |
| 1048 | |
| 1049 | /* |
| 1050 | * If a translated code address, in whole-method format, exists for the davik |
| 1051 | * byte code pointer return it. |
| 1052 | */ |
| 1053 | void* dvmJitGetMethodAddr(const u2* dPC) |
| 1054 | { |
| 1055 | return getCodeAddrCommon(dPC, true /* method entry */); |
| 1056 | } |
| 1057 | |
| 1058 | /* |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1059 | * Register the translated code pointer into the JitTable. |
| Bill Buzbee | 9a8c75a | 2009-11-08 14:31:20 -0800 | [diff] [blame] | 1060 | * NOTE: Once a codeAddress field transitions from initial state to |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1061 | * JIT'd code, it must not be altered without first halting all |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1062 | * threads. This routine should only be called by the compiler |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1063 | * thread. We defer the setting of the profile prefix size until |
| 1064 | * after the new code address is set to ensure that the prefix offset |
| 1065 | * is never applied to the initial interpret-only translation. All |
| 1066 | * translations with non-zero profile prefixes will still be correct |
| 1067 | * if entered as if the profile offset is 0, but the interpret-only |
| 1068 | * template cannot handle a non-zero prefix. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1069 | */ |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1070 | void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1071 | bool isMethodEntry, int profilePrefixSize) |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1072 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1073 | JitEntryInfoUnion oldValue; |
| 1074 | JitEntryInfoUnion newValue; |
| Ben Cheng | 20d7e6c | 2011-02-18 17:12:42 -0800 | [diff] [blame] | 1075 | /* |
| 1076 | * Method-based JIT doesn't go through the normal profiling phase, so use |
| 1077 | * lookupAndAdd here to request a new entry in the table. |
| 1078 | */ |
| 1079 | JitEntry *jitEntry = isMethodEntry ? |
| 1080 | lookupAndAdd(dPC, false /* caller locked */, true) : |
| 1081 | dvmJitFindEntry(dPC, isMethodEntry); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1082 | assert(jitEntry); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1083 | /* Note: order of update is important */ |
| 1084 | do { |
| 1085 | oldValue = jitEntry->u; |
| 1086 | newValue = oldValue; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1087 | newValue.info.isMethodEntry = isMethodEntry; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1088 | newValue.info.instructionSet = set; |
| buzbee | 99ddb1e | 2011-01-28 10:44:30 -0800 | [diff] [blame] | 1089 | newValue.info.profileOffset = profilePrefixSize; |
| Andy McFadden | 6e10b9a | 2010-06-14 15:24:39 -0700 | [diff] [blame] | 1090 | } while (android_atomic_release_cas( |
| 1091 | oldValue.infoWord, newValue.infoWord, |
| 1092 | &jitEntry->u.infoWord) != 0); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1093 | jitEntry->codeAddress = nPC; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1094 | } |
| 1095 | |
| 1096 | /* |
| 1097 | * Determine if valid trace-bulding request is active. Return true |
| 1098 | * if we need to abort and switch back to the fast interpreter, false |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1099 | * otherwise. |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1100 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1101 | bool dvmJitCheckTraceRequest(Thread* self) |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1102 | { |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1103 | bool switchInterp = false; /* Assume success */ |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1104 | int i; |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1105 | /* |
| 1106 | * A note on trace "hotness" filtering: |
| 1107 | * |
| 1108 | * Our first level trigger is intentionally loose - we need it to |
| 1109 | * fire easily not just to identify potential traces to compile, but |
| 1110 | * also to allow re-entry into the code cache. |
| 1111 | * |
| 1112 | * The 2nd level filter (done here) exists to be selective about |
| 1113 | * what we actually compile. It works by requiring the same |
| 1114 | * trace head "key" (defined as filterKey below) to appear twice in |
| 1115 | * a relatively short period of time. The difficulty is defining the |
| 1116 | * shape of the filterKey. Unfortunately, there is no "one size fits |
| 1117 | * all" approach. |
| 1118 | * |
| 1119 | * For spiky execution profiles dominated by a smallish |
| 1120 | * number of very hot loops, we would want the second-level filter |
| 1121 | * to be very selective. A good selective filter is requiring an |
| 1122 | * exact match of the Dalvik PC. In other words, defining filterKey as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1123 | * intptr_t filterKey = (intptr_t)self->interpSave.pc |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1124 | * |
| 1125 | * However, for flat execution profiles we do best when aggressively |
| 1126 | * translating. A heuristically decent proxy for this is to use |
| 1127 | * the value of the method pointer containing the trace as the filterKey. |
| 1128 | * Intuitively, this is saying that once any trace in a method appears hot, |
| 1129 | * immediately translate any other trace from that same method that |
| 1130 | * survives the first-level filter. Here, filterKey would be defined as: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1131 | * intptr_t filterKey = (intptr_t)self->interpSave.method |
| buzbee | 852aacd | 2010-06-08 16:24:46 -0700 | [diff] [blame] | 1132 | * |
| 1133 | * The problem is that we can't easily detect whether we're dealing |
| 1134 | * with a spiky or flat profile. If we go with the "pc" match approach, |
| 1135 | * flat profiles perform poorly. If we go with the loose "method" match, |
| 1136 | * we end up generating a lot of useless translations. Probably the |
| 1137 | * best approach in the future will be to retain profile information |
| 1138 | * across runs of each application in order to determine it's profile, |
| 1139 | * and then choose once we have enough history. |
| 1140 | * |
| 1141 | * However, for now we've decided to chose a compromise filter scheme that |
| 1142 | * includes elements of both. The high order bits of the filter key |
| 1143 | * are drawn from the enclosing method, and are combined with a slice |
| 1144 | * of the low-order bits of the Dalvik pc of the trace head. The |
| 1145 | * looseness of the filter can be adjusted by changing with width of |
| 1146 | * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider |
| 1147 | * the slice, the tighter the filter. |
| 1148 | * |
| 1149 | * Note: the fixed shifts in the function below reflect assumed word |
| 1150 | * alignment for method pointers, and half-word alignment of the Dalvik pc. |
| 1151 | * for method pointers and half-word alignment for dalvik pc. |
| 1152 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1153 | u4 methodKey = (u4)self->interpSave.method << |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1154 | (JIT_TRACE_THRESH_FILTER_PC_BITS - 2); |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1155 | u4 pcKey = ((u4)self->interpSave.pc >> 1) & |
| buzbee | c35294d | 2010-06-09 14:22:50 -0700 | [diff] [blame] | 1156 | ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1); |
| 1157 | intptr_t filterKey = (intptr_t)(methodKey | pcKey); |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1158 | bool debugOrProfile = dvmDebuggerOrProfilerActive(); |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1159 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1160 | /* Check if the JIT request can be handled now */ |
| 1161 | if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) { |
| 1162 | /* Bypass the filter for hot trace requests or during stress mode */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1163 | if (self->jitState == kJitTSelectRequest && |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1164 | gDvmJit.threshold > 6) { |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1165 | /* Two-level filtering scheme */ |
| 1166 | for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1167 | if (filterKey == self->threshFilter[i]) { |
| 1168 | self->threshFilter[i] = 0; // Reset filter entry |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1169 | break; |
| 1170 | } |
| Bill Buzbee | 48f1824 | 2009-06-19 16:02:27 -0700 | [diff] [blame] | 1171 | } |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1172 | if (i == JIT_TRACE_THRESH_FILTER_SIZE) { |
| 1173 | /* |
| 1174 | * Use random replacement policy - otherwise we could miss a |
| 1175 | * large loop that contains more traces than the size of our |
| 1176 | * filter array. |
| 1177 | */ |
| 1178 | i = rand() % JIT_TRACE_THRESH_FILTER_SIZE; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1179 | self->threshFilter[i] = filterKey; |
| 1180 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1181 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1182 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1183 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1184 | /* If the compiler is backlogged, cancel any JIT actions */ |
| 1185 | if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1186 | self->jitState = kJitDone; |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1187 | } |
| Bill Buzbee | d726991 | 2009-11-10 14:31:32 -0800 | [diff] [blame] | 1188 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1189 | /* |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1190 | * Check for additional reasons that might force the trace select |
| 1191 | * request to be dropped |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1192 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1193 | if (self->jitState == kJitTSelectRequest || |
| 1194 | self->jitState == kJitTSelectRequestHot) { |
| 1195 | if (dvmJitFindEntry(self->interpSave.pc, false)) { |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1196 | /* In progress - nothing do do */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1197 | self->jitState = kJitDone; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1198 | } else { |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1199 | JitEntry *slot = lookupAndAdd(self->interpSave.pc, |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1200 | false /* lock */, |
| 1201 | false /* method entry */); |
| 1202 | if (slot == NULL) { |
| 1203 | /* |
| 1204 | * Table is full. This should have been |
| 1205 | * detected by the compiler thread and the table |
| 1206 | * resized before we run into it here. Assume bad things |
| 1207 | * are afoot and disable profiling. |
| 1208 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1209 | self->jitState = kJitDone; |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1210 | LOGD("JIT: JitTable full, disabling profiling"); |
| 1211 | dvmJitStopTranslationRequests(); |
| 1212 | } |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1213 | } |
| 1214 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1215 | |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1216 | switch (self->jitState) { |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1217 | case kJitTSelectRequest: |
| Ben Cheng | 40094c1 | 2010-02-24 20:58:44 -0800 | [diff] [blame] | 1218 | case kJitTSelectRequestHot: |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1219 | self->jitState = kJitTSelect; |
| 1220 | self->currTraceHead = self->interpSave.pc; |
| 1221 | self->currTraceRun = 0; |
| 1222 | self->totalTraceLen = 0; |
| 1223 | self->currRunHead = self->interpSave.pc; |
| 1224 | self->currRunLen = 0; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 1225 | self->trace[0].info.frag.startOffset = |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1226 | self->interpSave.pc - self->interpSave.method->insns; |
| Ben Cheng | 385828e | 2011-03-04 16:48:33 -0800 | [diff] [blame^] | 1227 | self->trace[0].info.frag.numInsts = 0; |
| 1228 | self->trace[0].info.frag.runEnd = false; |
| 1229 | self->trace[0].info.frag.hint = kJitHintNone; |
| 1230 | self->trace[0].isCode = true; |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1231 | self->lastPC = 0; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1232 | break; |
| 1233 | /* |
| 1234 | * For JIT's perspective there is no need to stay in the debug |
| 1235 | * interpreter unless debugger/profiler is attached. |
| 1236 | */ |
| 1237 | case kJitDone: |
| 1238 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1239 | break; |
| 1240 | default: |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1241 | LOGE("Unexpected JIT state: %d entry point: %d", |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1242 | self->jitState, self->entryPoint); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1243 | dvmAbort(); |
| 1244 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1245 | } else { |
| 1246 | /* |
| 1247 | * Cannot build trace this time - ready to leave the dbg interpreter |
| 1248 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1249 | self->jitState = kJitDone; |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1250 | switchInterp = true; |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1251 | } |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1252 | |
| 1253 | /* |
| 1254 | * Final check to see if we can really switch the interpreter. Make sure |
| 1255 | * the jitState is kJitDone when switchInterp is set to true. |
| 1256 | */ |
| buzbee | 9f601a9 | 2011-02-11 17:48:20 -0800 | [diff] [blame] | 1257 | assert(switchInterp == false || self->jitState == kJitDone); |
| Ben Cheng | 1a7b9d7 | 2010-09-20 22:20:31 -0700 | [diff] [blame] | 1258 | return switchInterp && !debugOrProfile && |
| 1259 | !dvmJitStayInPortableInterpreter(); |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1260 | } |
| 1261 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1262 | /* |
| 1263 | * Resizes the JitTable. Must be a power of 2, and returns true on failure. |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1264 | * Stops all threads, and thus is a heavyweight operation. May only be called |
| 1265 | * by the compiler thread. |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1266 | */ |
| 1267 | bool dvmJitResizeJitTable( unsigned int size ) |
| 1268 | { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1269 | JitEntry *pNewTable; |
| 1270 | JitEntry *pOldTable; |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1271 | JitEntry tempEntry; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1272 | u4 newMask; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1273 | unsigned int oldSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1274 | unsigned int i; |
| 1275 | |
| Ben Cheng | 3f02aa4 | 2009-08-14 13:52:09 -0700 | [diff] [blame] | 1276 | assert(gDvmJit.pJitEntryTable != NULL); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1277 | assert(size && !(size & (size - 1))); /* Is power of 2? */ |
| 1278 | |
| Ben Cheng | a497359 | 2010-03-31 11:59:18 -0700 | [diff] [blame] | 1279 | LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1280 | |
| 1281 | newMask = size - 1; |
| 1282 | |
| 1283 | if (size <= gDvmJit.jitTableSize) { |
| 1284 | return true; |
| 1285 | } |
| 1286 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1287 | /* Make sure requested size is compatible with chain field width */ |
| 1288 | tempEntry.u.info.chain = size; |
| 1289 | if (tempEntry.u.info.chain != size) { |
| 1290 | LOGD("Jit: JitTable request of %d too big", size); |
| 1291 | return true; |
| 1292 | } |
| 1293 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1294 | pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable)); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1295 | if (pNewTable == NULL) { |
| 1296 | return true; |
| 1297 | } |
| 1298 | for (i=0; i< size; i++) { |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1299 | pNewTable[i].u.info.chain = size; /* Initialize chain termination */ |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1300 | } |
| 1301 | |
| 1302 | /* Stop all other interpreting/jit'ng threads */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1303 | dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1304 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1305 | pOldTable = gDvmJit.pJitEntryTable; |
| 1306 | oldSize = gDvmJit.jitTableSize; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1307 | |
| 1308 | dvmLockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1309 | gDvmJit.pJitEntryTable = pNewTable; |
| 1310 | gDvmJit.jitTableSize = size; |
| 1311 | gDvmJit.jitTableMask = size - 1; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1312 | gDvmJit.jitTableEntriesUsed = 0; |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1313 | |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1314 | for (i=0; i < oldSize; i++) { |
| 1315 | if (pOldTable[i].dPC) { |
| 1316 | JitEntry *p; |
| 1317 | u2 chain; |
| Ben Cheng | cfdeca3 | 2011-01-14 11:36:46 -0800 | [diff] [blame] | 1318 | p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/, |
| 1319 | pOldTable[i].u.info.isMethodEntry); |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1320 | p->codeAddress = pOldTable[i].codeAddress; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1321 | /* We need to preserve the new chain field, but copy the rest */ |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1322 | chain = p->u.info.chain; |
| 1323 | p->u = pOldTable[i].u; |
| 1324 | p->u.info.chain = chain; |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1325 | } |
| 1326 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1327 | |
| Bill Buzbee | 964a7b0 | 2010-01-28 12:54:19 -0800 | [diff] [blame] | 1328 | dvmUnlockMutex(&gDvmJit.tableLock); |
| Bill Buzbee | 716f120 | 2009-07-23 13:22:09 -0700 | [diff] [blame] | 1329 | |
| 1330 | free(pOldTable); |
| 1331 | |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1332 | /* Restart the world */ |
| Ben Cheng | a8e64a7 | 2009-10-20 13:01:36 -0700 | [diff] [blame] | 1333 | dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE); |
| Bill Buzbee | 2717622 | 2009-06-09 09:20:16 -0700 | [diff] [blame] | 1334 | |
| 1335 | return false; |
| 1336 | } |
| 1337 | |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1338 | /* |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1339 | * Reset the JitTable to the initial clean state. |
| 1340 | */ |
| 1341 | void dvmJitResetTable(void) |
| 1342 | { |
| 1343 | JitEntry *jitEntry = gDvmJit.pJitEntryTable; |
| 1344 | unsigned int size = gDvmJit.jitTableSize; |
| 1345 | unsigned int i; |
| 1346 | |
| 1347 | dvmLockMutex(&gDvmJit.tableLock); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1348 | |
| 1349 | /* Note: If need to preserve any existing counts. Do so here. */ |
| buzbee | 38c4134 | 2011-01-11 15:45:49 -0800 | [diff] [blame] | 1350 | if (gDvmJit.pJitTraceProfCounters) { |
| 1351 | for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) { |
| 1352 | if (gDvmJit.pJitTraceProfCounters->buckets[i]) |
| 1353 | memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i], |
| 1354 | 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES); |
| 1355 | } |
| 1356 | gDvmJit.pJitTraceProfCounters->next = 0; |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1357 | } |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1358 | |
| Ben Cheng | 60c24f4 | 2010-01-04 12:29:56 -0800 | [diff] [blame] | 1359 | memset((void *) jitEntry, 0, sizeof(JitEntry) * size); |
| 1360 | for (i=0; i< size; i++) { |
| 1361 | jitEntry[i].u.info.chain = size; /* Initialize chain termination */ |
| 1362 | } |
| 1363 | gDvmJit.jitTableEntriesUsed = 0; |
| 1364 | dvmUnlockMutex(&gDvmJit.tableLock); |
| 1365 | } |
| 1366 | |
| 1367 | /* |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1368 | * Return the address of the next trace profile counter. This address |
| 1369 | * will be embedded in the generated code for the trace, and thus cannot |
| 1370 | * change while the trace exists. |
| 1371 | */ |
| 1372 | JitTraceCounter_t *dvmJitNextTraceCounter() |
| 1373 | { |
| 1374 | int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES; |
| 1375 | int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES; |
| 1376 | JitTraceCounter_t *res; |
| 1377 | /* Lazily allocate blocks of counters */ |
| 1378 | if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) { |
| 1379 | JitTraceCounter_t *p = |
| 1380 | (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p)); |
| 1381 | if (!p) { |
| 1382 | LOGE("Failed to allocate block of trace profile counters"); |
| 1383 | dvmAbort(); |
| 1384 | } |
| 1385 | gDvmJit.pJitTraceProfCounters->buckets[idx] = p; |
| 1386 | } |
| 1387 | res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem]; |
| 1388 | gDvmJit.pJitTraceProfCounters->next++; |
| 1389 | return res; |
| 1390 | } |
| 1391 | |
| 1392 | /* |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1393 | * Float/double conversion requires clamping to min and max of integer form. If |
| 1394 | * target doesn't support this normally, use these. |
| 1395 | */ |
| 1396 | s8 dvmJitd2l(double d) |
| 1397 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1398 | static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL; |
| 1399 | static const double kMinLong = (double)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1400 | if (d >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1401 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1402 | else if (d <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1403 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1404 | else if (d != d) // NaN case |
| 1405 | return 0; |
| 1406 | else |
| 1407 | return (s8)d; |
| 1408 | } |
| 1409 | |
| 1410 | s8 dvmJitf2l(float f) |
| 1411 | { |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1412 | static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL; |
| 1413 | static const float kMinLong = (float)(s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1414 | if (f >= kMaxLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1415 | return (s8)0x7fffffffffffffffULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1416 | else if (f <= kMinLong) |
| Bill Buzbee | 9727c3d | 2009-08-01 11:32:36 -0700 | [diff] [blame] | 1417 | return (s8)0x8000000000000000ULL; |
| Bill Buzbee | 50a6bf2 | 2009-07-08 13:08:04 -0700 | [diff] [blame] | 1418 | else if (f != f) // NaN case |
| 1419 | return 0; |
| 1420 | else |
| 1421 | return (s8)f; |
| 1422 | } |
| 1423 | |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1424 | /* Should only be called by the compiler thread */ |
| 1425 | void dvmJitChangeProfileMode(TraceProfilingModes newState) |
| 1426 | { |
| 1427 | if (gDvmJit.profileMode != newState) { |
| 1428 | gDvmJit.profileMode = newState; |
| 1429 | dvmJitUnchainAll(); |
| 1430 | } |
| 1431 | } |
| 1432 | |
| 1433 | void dvmJitTraceProfilingOn() |
| 1434 | { |
| 1435 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOff) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1436 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1437 | (void*) kTraceProfilingPeriodicOn); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1438 | else if (gDvmJit.profileMode == kTraceProfilingDisabled) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1439 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1440 | (void*) kTraceProfilingContinuous); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1441 | } |
| 1442 | |
| 1443 | void dvmJitTraceProfilingOff() |
| 1444 | { |
| 1445 | if (gDvmJit.profileMode == kTraceProfilingPeriodicOn) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1446 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1447 | (void*) kTraceProfilingPeriodicOff); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1448 | else if (gDvmJit.profileMode == kTraceProfilingContinuous) |
| Bill Buzbee | 1b3da59 | 2011-02-03 07:38:22 -0800 | [diff] [blame] | 1449 | dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, |
| 1450 | (void*) kTraceProfilingDisabled); |
| buzbee | 2e152ba | 2010-12-15 16:32:35 -0800 | [diff] [blame] | 1451 | } |
| 1452 | |
| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame] | 1453 | #endif /* WITH_JIT */ |