blob: 7abe72884ccff748f6672431e338579118c3b461 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
Dan Bornsteindf4daaf2010-12-01 14:23:44 -080026#include "libdex/DexOpcodes.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070027#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
Bill Buzbee9a8c75a2009-11-08 14:31:20 -080063 InterpState* interpState, int targetTrace)
Jeff Hao97319a82009-08-12 16:57:15 -070064{
65 Thread *self = dvmThreadSelf();
66 ShadowSpace *shadowSpace = self->shadowSpace;
Ben Cheng11d8f142010-03-24 15:24:19 -070067 unsigned preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
68 unsigned postBytes = interpState->method->registersSize*4;
Jeff Hao97319a82009-08-12 16:57:15 -070069
70 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
71 // self->threadId, (int)pc, (int)fp);
72
73 if (shadowSpace->selfVerificationState != kSVSIdle) {
74 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
75 self->threadId, shadowSpace->selfVerificationState);
76 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -070077 LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
Jeff Hao97319a82009-08-12 16:57:15 -070078 }
79 shadowSpace->selfVerificationState = kSVSStart;
80
Ben Chengd5adae12010-03-26 17:45:28 -070081 if (interpState->entryPoint == kInterpEntryResume) {
82 interpState->entryPoint = kInterpEntryInstr;
83#if 0
84 /* Tracking the success rate of resume after single-stepping */
85 if (interpState->jitResumeDPC == pc) {
86 LOGD("SV single step resumed at %p", pc);
87 }
88 else {
89 LOGD("real %p DPC %p NPC %p", pc, interpState->jitResumeDPC,
90 interpState->jitResumeNPC);
91 }
92#endif
93 }
94
Jeff Hao97319a82009-08-12 16:57:15 -070095 // Dynamically grow shadow register space if necessary
Ben Cheng11d8f142010-03-24 15:24:19 -070096 if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
Jeff Hao97319a82009-08-12 16:57:15 -070097 free(shadowSpace->registerSpace);
Ben Cheng11d8f142010-03-24 15:24:19 -070098 shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
Jeff Hao97319a82009-08-12 16:57:15 -070099 shadowSpace->registerSpace =
Ben Cheng11d8f142010-03-24 15:24:19 -0700100 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
Jeff Hao97319a82009-08-12 16:57:15 -0700101 }
102
103 // Remember original state
104 shadowSpace->startPC = pc;
105 shadowSpace->fp = fp;
Ben Chengccd6c012009-10-15 14:52:45 -0700106 shadowSpace->glue = interpState;
107 /*
108 * Store the original method here in case the trace ends with a
109 * return/invoke, the last method.
110 */
111 shadowSpace->method = interpState->method;
Jeff Hao97319a82009-08-12 16:57:15 -0700112 shadowSpace->shadowFP = shadowSpace->registerSpace +
113 shadowSpace->registerSpaceSize - postBytes/4;
114
115 // Create a copy of the InterpState
Ben Chengccd6c012009-10-15 14:52:45 -0700116 memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
Carl Shapirofc75f3e2010-12-07 11:43:38 -0800117 shadowSpace->interpState.fp = (u4*)shadowSpace->shadowFP;
Jeff Hao97319a82009-08-12 16:57:15 -0700118 shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
119
120 // Create a copy of the stack
121 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
122 preBytes+postBytes);
123
124 // Setup the shadowed heap space
125 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
126
127 // Reset trace length
128 shadowSpace->traceLength = 0;
129
130 return shadowSpace;
131}
132
133/*
134 * Save ending PC, FP and compiled code exit point to shadow space.
135 * Return a pointer to the shadow space for JIT to restore state.
136 */
137void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
Ben Cheng7a2697d2010-06-07 13:44:23 -0700138 SelfVerificationState exitState)
Jeff Hao97319a82009-08-12 16:57:15 -0700139{
140 Thread *self = dvmThreadSelf();
141 ShadowSpace *shadowSpace = self->shadowSpace;
Ben Chengd5adae12010-03-26 17:45:28 -0700142 // Official InterpState structure
Carl Shapirofc75f3e2010-12-07 11:43:38 -0800143 InterpState *realGlue = (InterpState*)shadowSpace->glue;
Jeff Hao97319a82009-08-12 16:57:15 -0700144 shadowSpace->endPC = pc;
145 shadowSpace->endShadowFP = fp;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700146 shadowSpace->jitExitState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700147
148 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
149 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
150 // (int)pc);
151
152 if (shadowSpace->selfVerificationState != kSVSStart) {
153 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
154 self->threadId, shadowSpace->selfVerificationState);
155 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700156 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700157 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700158 LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
159 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700160 (int)shadowSpace->endShadowFP);
161 }
162
Ben Chengd5adae12010-03-26 17:45:28 -0700163 // Move the resume [ND]PC from the shadow space to the real space so that
164 // the debug interpreter can return to the translation
Ben Cheng7a2697d2010-06-07 13:44:23 -0700165 if (exitState == kSVSSingleStep) {
Ben Chengd5adae12010-03-26 17:45:28 -0700166 realGlue->jitResumeNPC = shadowSpace->interpState.jitResumeNPC;
167 realGlue->jitResumeDPC = shadowSpace->interpState.jitResumeDPC;
168 } else {
169 realGlue->jitResumeNPC = NULL;
170 realGlue->jitResumeDPC = NULL;
171 }
172
Jeff Hao97319a82009-08-12 16:57:15 -0700173 // Special case when punting after a single instruction
Ben Cheng7a2697d2010-06-07 13:44:23 -0700174 if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700175 shadowSpace->selfVerificationState = kSVSIdle;
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700176 } else if (exitState == kSVSBackwardBranch && pc < shadowSpace->startPC) {
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700177 /*
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700178 * Consider a trace with a backward branch:
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700179 * 1: ..
180 * 2: ..
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700181 * 3: ..
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700182 * 4: ..
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700183 * 5: Goto {1 or 2 or 3 or 4}
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700184 *
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700185 * If there instruction 5 goes to 1 and there is no single-step
186 * instruction in the loop, pc is equal to shadowSpace->startPC and
187 * we will honor the backward branch condition.
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700188 *
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700189 * If the single-step instruction is outside the loop, then after
190 * resuming in the trace the startPC will be less than pc so we will
191 * also honor the backward branch condition.
192 *
193 * If the single-step is inside the loop, we won't hit the same endPC
194 * twice when the interpreter is re-executing the trace so we want to
195 * cancel the backward branch condition. In this case it can be
196 * detected as the endPC (ie pc) will be less than startPC.
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700197 */
198 shadowSpace->selfVerificationState = kSVSNormal;
Jeff Hao97319a82009-08-12 16:57:15 -0700199 } else {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700200 shadowSpace->selfVerificationState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700201 }
202
203 return shadowSpace;
204}
205
206/* Print contents of virtual registers */
Ben Chengccd6c012009-10-15 14:52:45 -0700207static void selfVerificationPrintRegisters(int* addr, int* addrRef,
208 int numWords)
Jeff Hao97319a82009-08-12 16:57:15 -0700209{
210 int i;
211 for (i = 0; i < numWords; i++) {
Ben Chengccd6c012009-10-15 14:52:45 -0700212 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
Jeff Hao97319a82009-08-12 16:57:15 -0700213 }
214}
215
216/* Print values maintained in shadowSpace */
217static void selfVerificationDumpState(const u2* pc, Thread* self)
218{
219 ShadowSpace* shadowSpace = self->shadowSpace;
220 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
221 int frameBytes = (int) shadowSpace->registerSpace +
222 shadowSpace->registerSpaceSize*4 -
223 (int) shadowSpace->shadowFP;
224 int localRegs = 0;
225 int frameBytes2 = 0;
226 if (self->curFrame < shadowSpace->fp) {
227 localRegs = (stackSave->method->registersSize -
228 stackSave->method->insSize)*4;
229 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
230 }
231 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700232 LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
Jeff Hao97319a82009-08-12 16:57:15 -0700233 (int)(pc - stackSave->method->insns));
Ben Chengccd6c012009-10-15 14:52:45 -0700234 LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
235 LOGD("Method: %s", shadowSpace->method->name);
236 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700237 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700238 LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
Jeff Hao97319a82009-08-12 16:57:15 -0700239 (int)self->curFrame);
Ben Chengccd6c012009-10-15 14:52:45 -0700240 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700241 (int)shadowSpace->endShadowFP);
Ben Chengccd6c012009-10-15 14:52:45 -0700242 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
Jeff Hao97319a82009-08-12 16:57:15 -0700243 localRegs, frameBytes2);
Ben Chengccd6c012009-10-15 14:52:45 -0700244 LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
Jeff Hao97319a82009-08-12 16:57:15 -0700245 shadowSpace->selfVerificationState);
246}
247
248/* Print decoded instructions in the current trace */
249static void selfVerificationDumpTrace(const u2* pc, Thread* self)
250{
251 ShadowSpace* shadowSpace = self->shadowSpace;
252 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700253 int i, addr, offset;
254 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700255
256 LOGD("********** SHADOW TRACE DUMP **********");
257 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700258 addr = shadowSpace->trace[i].addr;
259 offset = (int)((u2*)addr - stackSave->method->insns);
260 decInsn = &(shadowSpace->trace[i].decInsn);
261 /* Not properly decoding instruction, some registers may be garbage */
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700262 LOGD("0x%x: (0x%04x) %s",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800263 addr, offset, dexGetOpcodeName(decInsn->opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700264 }
265}
266
Ben Chengbcdc1de2009-08-21 16:18:46 -0700267/* Code is forced into this spin loop when a divergence is detected */
Ben Chengccd6c012009-10-15 14:52:45 -0700268static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
Ben Chengbcdc1de2009-08-21 16:18:46 -0700269{
Ben Chengccd6c012009-10-15 14:52:45 -0700270 const u2 *startPC = shadowSpace->startPC;
Ben Cheng88a0f972010-02-24 15:00:40 -0800271 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
Ben Chengccd6c012009-10-15 14:52:45 -0700272 if (desc) {
273 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
Ben Cheng1357e942010-02-10 17:21:39 -0800274 /*
275 * This function effectively terminates the VM right here, so not
276 * freeing the desc pointer when the enqueuing fails is acceptable.
277 */
Ben Chengccd6c012009-10-15 14:52:45 -0700278 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700279 gDvmJit.selfVerificationSpin = true;
280 while(gDvmJit.selfVerificationSpin) sleep(10);
281}
282
Jeff Hao97319a82009-08-12 16:57:15 -0700283/* Manage self verification while in the debug interpreter */
Ben Chengd5adae12010-03-26 17:45:28 -0700284static bool selfVerificationDebugInterp(const u2* pc, Thread* self,
285 InterpState *interpState)
Jeff Hao97319a82009-08-12 16:57:15 -0700286{
287 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700288 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700289
290 DecodedInstruction decInsn;
Dan Bornstein54322392010-11-17 14:16:56 -0800291 dexDecodeInstruction(pc, &decInsn);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700292
Jeff Hao97319a82009-08-12 16:57:15 -0700293 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
294 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800295 // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700296
297 if (state == kSVSIdle || state == kSVSStart) {
298 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
299 self->threadId, state);
300 selfVerificationDumpState(pc, self);
301 selfVerificationDumpTrace(pc, self);
302 }
303
Ben Chengd5adae12010-03-26 17:45:28 -0700304 /*
305 * Skip endPC once when trace has a backward branch. If the SV state is
306 * single step, keep it that way.
307 */
Jeff Hao97319a82009-08-12 16:57:15 -0700308 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
Ben Chengd5adae12010-03-26 17:45:28 -0700309 (state != kSVSBackwardBranch && state != kSVSSingleStep)) {
Jeff Hao97319a82009-08-12 16:57:15 -0700310 shadowSpace->selfVerificationState = kSVSDebugInterp;
311 }
312
313 /* Check that the current pc is the end of the trace */
Ben Chengd5adae12010-03-26 17:45:28 -0700314 if ((state == kSVSDebugInterp || state == kSVSSingleStep) &&
315 pc == shadowSpace->endPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700316
317 shadowSpace->selfVerificationState = kSVSIdle;
318
319 /* Check register space */
320 int frameBytes = (int) shadowSpace->registerSpace +
321 shadowSpace->registerSpaceSize*4 -
322 (int) shadowSpace->shadowFP;
323 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700324 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
Jeff Hao97319a82009-08-12 16:57:15 -0700325 selfVerificationDumpState(pc, self);
326 selfVerificationDumpTrace(pc, self);
327 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
328 (int)shadowSpace->fp, frameBytes);
Ben Chengccd6c012009-10-15 14:52:45 -0700329 selfVerificationPrintRegisters((int*)shadowSpace->fp,
330 (int*)shadowSpace->shadowFP,
331 frameBytes/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700332 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
333 (int)shadowSpace->shadowFP, frameBytes);
334 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700335 (int*)shadowSpace->fp,
336 frameBytes/4);
337 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700338 }
339 /* Check new frame if it exists (invokes only) */
340 if (self->curFrame < shadowSpace->fp) {
341 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
342 int localRegs = (stackSave->method->registersSize -
343 stackSave->method->insSize)*4;
344 int frameBytes2 = (int) shadowSpace->fp -
345 (int) self->curFrame - localRegs;
346 if (memcmp(((char*)self->curFrame)+localRegs,
347 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700348 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
Jeff Hao97319a82009-08-12 16:57:15 -0700349 self->threadId);
350 selfVerificationDumpState(pc, self);
351 selfVerificationDumpTrace(pc, self);
352 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
353 (int)self->curFrame, localRegs, frameBytes2);
354 selfVerificationPrintRegisters((int*)self->curFrame,
Ben Chengccd6c012009-10-15 14:52:45 -0700355 (int*)shadowSpace->endShadowFP,
356 (frameBytes2+localRegs)/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700357 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
358 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
359 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700360 (int*)self->curFrame,
361 (frameBytes2+localRegs)/4);
362 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700363 }
364 }
365
366 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700367 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700368 ShadowHeap* heapSpacePtr;
369 for (heapSpacePtr = shadowSpace->heapSpace;
370 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700371 int memData = *((unsigned int*) heapSpacePtr->addr);
372 if (heapSpacePtr->data != memData) {
Ben Chengccd6c012009-10-15 14:52:45 -0700373 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
374 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700375 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700376 selfVerificationDumpState(pc, self);
377 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700378 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700379 }
380 }
Ben Chengccd6c012009-10-15 14:52:45 -0700381 if (memDiff) selfVerificationSpinLoop(shadowSpace);
Ben Chengd5adae12010-03-26 17:45:28 -0700382
383 /*
384 * Switch to JIT single step mode to stay in the debug interpreter for
385 * one more instruction
386 */
387 if (state == kSVSSingleStep) {
388 interpState->jitState = kJitSingleStepEnd;
389 }
Jeff Hao97319a82009-08-12 16:57:15 -0700390 return true;
391
392 /* If end not been reached, make sure max length not exceeded */
393 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
394 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
Ben Chengccd6c012009-10-15 14:52:45 -0700395 LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
Jeff Hao97319a82009-08-12 16:57:15 -0700396 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
397 selfVerificationDumpState(pc, self);
398 selfVerificationDumpTrace(pc, self);
Ben Chengccd6c012009-10-15 14:52:45 -0700399 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700400
401 return true;
402 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700403 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700404 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700405 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700406 shadowSpace->traceLength++;
407
408 return false;
409}
410#endif
411
Ben Chengba4fc8b2009-06-01 13:00:29 -0700412/*
413 * If one of our fixed tables or the translation buffer fills up,
414 * call this routine to avoid wasting cycles on future translation requests.
415 */
416void dvmJitStopTranslationRequests()
417{
418 /*
419 * Note 1: This won't necessarily stop all translation requests, and
420 * operates on a delayed mechanism. Running threads look to the copy
421 * of this value in their private InterpState structures and won't see
422 * this change until it is refreshed (which happens on interpreter
423 * entry).
424 * Note 2: This is a one-shot memory leak on this table. Because this is a
425 * permanent off switch for Jit profiling, it is a one-time leak of 1K
426 * bytes, and no further attempt will be made to re-allocate it. Can't
427 * free it because some thread may be holding a reference.
428 */
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800429 gDvmJit.pProfTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700430}
431
Ben Cheng978738d2010-05-13 13:45:57 -0700432#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700433/* Convenience function to increment counter from assembly code */
Ben Cheng6c10a972009-10-29 14:39:18 -0700434void dvmBumpNoChain(int from)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700435{
Ben Cheng6c10a972009-10-29 14:39:18 -0700436 gDvmJit.noChainExit[from]++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700437}
438
439/* Convenience function to increment counter from assembly code */
440void dvmBumpNormal()
441{
Ben Cheng6c10a972009-10-29 14:39:18 -0700442 gDvmJit.normalExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700443}
444
445/* Convenience function to increment counter from assembly code */
446void dvmBumpPunt(int from)
447{
Ben Cheng6c10a972009-10-29 14:39:18 -0700448 gDvmJit.puntExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700449}
450#endif
451
452/* Dumps debugging & tuning stats to the log */
453void dvmJitStats()
454{
455 int i;
456 int hit;
457 int not_hit;
458 int chains;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800459 int stubs;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700460 if (gDvmJit.pJitEntryTable) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800461 for (i=0, stubs=chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700462 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700463 i++) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800464 if (gDvmJit.pJitEntryTable[i].dPC != 0) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700465 hit++;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800466 if (gDvmJit.pJitEntryTable[i].codeAddress ==
Bill Buzbeebd047242010-05-13 13:02:53 -0700467 dvmCompilerGetInterpretTemplate())
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800468 stubs++;
469 } else
Ben Chengba4fc8b2009-06-01 13:00:29 -0700470 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700471 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700472 chains++;
473 }
Ben Cheng72621c92010-03-10 13:12:55 -0800474 LOGD("JIT: table size is %d, entries used is %d",
Ben Cheng86717f72010-03-05 15:27:21 -0800475 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed);
Ben Cheng72621c92010-03-10 13:12:55 -0800476 LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
477 hit, not_hit + hit, chains, gDvmJit.threshold,
478 gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
Ben Cheng86717f72010-03-05 15:27:21 -0800479
Ben Cheng978738d2010-05-13 13:45:57 -0700480#if defined(WITH_JIT_TUNING)
481 LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
482
Ben Cheng72621c92010-03-10 13:12:55 -0800483 LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
484 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
485 gDvmJit.normalExit, gDvmJit.puntExit);
Ben Cheng452efba2010-04-30 15:14:00 -0700486
Ben Cheng978738d2010-05-13 13:45:57 -0700487 LOGD("JIT: ICHits: %d", gDvmICHitCount);
488
Ben Cheng72621c92010-03-10 13:12:55 -0800489 LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
490 "%d switch overflow",
491 gDvmJit.noChainExit[kInlineCacheMiss],
492 gDvmJit.noChainExit[kCallsiteInterpreted],
493 gDvmJit.noChainExit[kSwitchOverflow]);
Ben Cheng86717f72010-03-05 15:27:21 -0800494
Ben Chengb88ec3c2010-05-17 12:50:33 -0700495 LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
496 "%d dropped",
497 gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
498 gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
Ben Cheng452efba2010-04-30 15:14:00 -0700499 gDvmJit.icPatchDropped);
500
Ben Cheng86717f72010-03-05 15:27:21 -0800501 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
502 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
503 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700504 LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter",
505 gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined,
506 gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
Ben Cheng86717f72010-03-05 15:27:21 -0800507 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
508 LOGD("JIT: Avg unit compilation time: %llu us",
509 gDvmJit.jitTime / gDvmJit.numCompilations);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700510#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800511
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800512 LOGD("JIT: %d Translation chains, %d interp stubs",
513 gDvmJit.translationChains, stubs);
buzbee2e152ba2010-12-15 16:32:35 -0800514 if (gDvmJit.profileMode == kTraceProfilingContinuous) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700515 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700516 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700517 }
518}
519
Bill Buzbee716f1202009-07-23 13:22:09 -0700520
Andy McFadden953a0ed2010-09-17 15:48:38 -0700521static void setTraceConstruction(JitEntry *slot, bool value)
Bill Buzbeed7269912009-11-10 14:31:32 -0800522{
523
524 JitEntryInfoUnion oldValue;
525 JitEntryInfoUnion newValue;
526 do {
527 oldValue = slot->u;
528 newValue = oldValue;
529 newValue.info.traceConstruction = value;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700530 } while (android_atomic_release_cas(oldValue.infoWord, newValue.infoWord,
531 &slot->u.infoWord) != 0);
Bill Buzbeed7269912009-11-10 14:31:32 -0800532}
533
Andy McFadden953a0ed2010-09-17 15:48:38 -0700534static void resetTracehead(InterpState* interpState, JitEntry *slot)
Bill Buzbeed7269912009-11-10 14:31:32 -0800535{
Bill Buzbeebd047242010-05-13 13:02:53 -0700536 slot->codeAddress = dvmCompilerGetInterpretTemplate();
Bill Buzbeed7269912009-11-10 14:31:32 -0800537 setTraceConstruction(slot, false);
538}
539
540/* Clean up any pending trace builds */
541void dvmJitAbortTraceSelect(InterpState* interpState)
542{
543 if (interpState->jitState == kJitTSelect)
Ben Chenga4973592010-03-31 11:59:18 -0700544 interpState->jitState = kJitDone;
Bill Buzbeed7269912009-11-10 14:31:32 -0800545}
546
Ben Chengba4fc8b2009-06-01 13:00:29 -0700547/*
Bill Buzbee964a7b02010-01-28 12:54:19 -0800548 * Find an entry in the JitTable, creating if necessary.
549 * Returns null if table is full.
550 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800551static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked,
552 bool isMethodEntry)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800553{
554 u4 chainEndMarker = gDvmJit.jitTableSize;
555 u4 idx = dvmJitHash(dPC);
556
Ben Chengcfdeca32011-01-14 11:36:46 -0800557 /*
558 * Walk the bucket chain to find an exact match for our PC and trace/method
559 * type
560 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800561 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
Ben Chengcfdeca32011-01-14 11:36:46 -0800562 ((gDvmJit.pJitEntryTable[idx].dPC != dPC) ||
563 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry !=
564 isMethodEntry))) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800565 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
566 }
567
Ben Chengcfdeca32011-01-14 11:36:46 -0800568 if (gDvmJit.pJitEntryTable[idx].dPC != dPC ||
569 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800570 /*
571 * No match. Aquire jitTableLock and find the last
572 * slot in the chain. Possibly continue the chain walk in case
573 * some other thread allocated the slot we were looking
574 * at previuosly (perhaps even the dPC we're trying to enter).
575 */
576 if (!callerLocked)
577 dvmLockMutex(&gDvmJit.tableLock);
578 /*
579 * At this point, if .dPC is NULL, then the slot we're
580 * looking at is the target slot from the primary hash
581 * (the simple, and common case). Otherwise we're going
582 * to have to find a free slot and chain it.
583 */
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700584 ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800585 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
586 u4 prev;
587 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800588 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
589 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
590 isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800591 /* Another thread got there first for this dPC */
592 if (!callerLocked)
593 dvmUnlockMutex(&gDvmJit.tableLock);
594 return &gDvmJit.pJitEntryTable[idx];
595 }
596 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
597 }
598 /* Here, idx should be pointing to the last cell of an
599 * active chain whose last member contains a valid dPC */
600 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
601 /* Linear walk to find a free cell and add it to the end */
602 prev = idx;
603 while (true) {
604 idx++;
605 if (idx == chainEndMarker)
606 idx = 0; /* Wraparound */
607 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
608 (idx == prev))
609 break;
610 }
611 if (idx != prev) {
612 JitEntryInfoUnion oldValue;
613 JitEntryInfoUnion newValue;
614 /*
615 * Although we hold the lock so that noone else will
616 * be trying to update a chain field, the other fields
617 * packed into the word may be in use by other threads.
618 */
619 do {
620 oldValue = gDvmJit.pJitEntryTable[prev].u;
621 newValue = oldValue;
622 newValue.info.chain = idx;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700623 } while (android_atomic_release_cas(oldValue.infoWord,
624 newValue.infoWord,
625 &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800626 }
627 }
628 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800629 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800630 /*
631 * Initialize codeAddress and allocate the slot. Must
632 * happen in this order (since dPC is set, the entry is live.
633 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800634 android_atomic_release_store((int32_t)dPC,
635 (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800636 gDvmJit.pJitEntryTable[idx].dPC = dPC;
637 gDvmJit.jitTableEntriesUsed++;
638 } else {
639 /* Table is full */
640 idx = chainEndMarker;
641 }
642 if (!callerLocked)
643 dvmUnlockMutex(&gDvmJit.tableLock);
644 }
645 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
646}
Ben Chenga4973592010-03-31 11:59:18 -0700647
Bill Buzbee964a7b02010-01-28 12:54:19 -0800648/*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700649 * Append the class ptr of "this" and the current method ptr to the current
650 * trace. That is, the trace runs will contain the following components:
651 * + trace run that ends with an invoke (existing entry)
652 * + thisClass (new)
653 * + calleeMethod (new)
654 */
655static void insertClassMethodInfo(InterpState* interpState,
656 const ClassObject* thisClass,
657 const Method* calleeMethod,
658 const DecodedInstruction* insn)
659{
660 int currTraceRun = ++interpState->currTraceRun;
661 interpState->trace[currTraceRun].meta = (void *) thisClass;
662 currTraceRun = ++interpState->currTraceRun;
663 interpState->trace[currTraceRun].meta = (void *) calleeMethod;
664}
665
666/*
Ben Chengd44faf52010-06-02 15:33:51 -0700667 * Check if the next instruction following the invoke is a move-result and if
Ben Cheng7a2697d2010-06-07 13:44:23 -0700668 * so add it to the trace. That is, this will add the trace run that includes
669 * the move-result to the trace list.
670 *
671 * + trace run that ends with an invoke (existing entry)
672 * + thisClass (existing entry)
673 * + calleeMethod (existing entry)
674 * + move result (new)
Ben Chengd44faf52010-06-02 15:33:51 -0700675 *
676 * lastPC, len, offset are all from the preceding invoke instruction
677 */
678static void insertMoveResult(const u2 *lastPC, int len, int offset,
679 InterpState *interpState)
680{
681 DecodedInstruction nextDecInsn;
682 const u2 *moveResultPC = lastPC + len;
683
Dan Bornstein54322392010-11-17 14:16:56 -0800684 dexDecodeInstruction(moveResultPC, &nextDecInsn);
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800685 if ((nextDecInsn.opcode != OP_MOVE_RESULT) &&
686 (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) &&
687 (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT))
Ben Chengd44faf52010-06-02 15:33:51 -0700688 return;
689
690 /* We need to start a new trace run */
691 int currTraceRun = ++interpState->currTraceRun;
692 interpState->currRunHead = moveResultPC;
693 interpState->trace[currTraceRun].frag.startOffset = offset + len;
694 interpState->trace[currTraceRun].frag.numInsts = 1;
695 interpState->trace[currTraceRun].frag.runEnd = false;
696 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700697 interpState->trace[currTraceRun].frag.isCode = true;
Ben Chengd44faf52010-06-02 15:33:51 -0700698 interpState->totalTraceLen++;
699
Dan Bornsteine4852762010-12-02 12:45:00 -0800700 interpState->currRunLen = dexGetWidthFromInstruction(moveResultPC);
Ben Chengd44faf52010-06-02 15:33:51 -0700701}
702
703/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700704 * Adds to the current trace request one instruction at a time, just
705 * before that instruction is interpreted. This is the primary trace
706 * selection function. NOTE: return instruction are handled a little
707 * differently. In general, instructions are "proposed" to be added
708 * to the current trace prior to interpretation. If the interpreter
709 * then successfully completes the instruction, is will be considered
710 * part of the request. This allows us to examine machine state prior
711 * to interpretation, and also abort the trace request if the instruction
712 * throws or does something unexpected. However, return instructions
713 * will cause an immediate end to the translation request - which will
714 * be passed to the compiler before the return completes. This is done
715 * in response to special handling of returns by the interpreter (and
716 * because returns cannot throw in a way that causes problems for the
717 * translated code.
718 */
Ben Cheng7a2697d2010-06-07 13:44:23 -0700719int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState,
720 const ClassObject* thisClass, const Method* curMethod)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700721{
Carl Shapiroe3c01da2010-05-20 22:54:18 -0700722 int flags, len;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700723 int switchInterp = false;
Ben Chenga4973592010-03-31 11:59:18 -0700724 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Ben Cheng7a2697d2010-06-07 13:44:23 -0700725 /* Stay in the dbg interpreter for the next instruction */
726 bool stayOneMoreInst = false;
Bill Buzbeed7269912009-11-10 14:31:32 -0800727
Ben Cheng1c52e6d2010-07-02 13:00:39 -0700728 /*
729 * Bug 2710533 - dalvik crash when disconnecting debugger
730 *
731 * Reset the entry point to the default value. If needed it will be set to a
732 * specific value in the corresponding case statement (eg kJitSingleStepEnd)
733 */
734 interpState->entryPoint = kInterpEntryInstr;
735
Ben Cheng79d173c2009-09-29 16:12:51 -0700736 /* Prepare to handle last PC and stage the current PC */
737 const u2 *lastPC = interpState->lastPC;
738 interpState->lastPC = pc;
739
Ben Chengba4fc8b2009-06-01 13:00:29 -0700740 switch (interpState->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700741 int offset;
742 DecodedInstruction decInsn;
743 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700744 /* First instruction - just remember the PC and exit */
745 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700746 /* Grow the trace around the last PC if jitState is kJitTSelect */
Dan Bornstein54322392010-11-17 14:16:56 -0800747 dexDecodeInstruction(lastPC, &decInsn);
Ben Cheng6c10a972009-10-29 14:39:18 -0700748
749 /*
750 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
751 * to the amount of space it takes to generate the chaining
752 * cells.
753 */
754 if (interpState->totalTraceLen != 0 &&
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800755 (decInsn.opcode == OP_PACKED_SWITCH ||
756 decInsn.opcode == OP_SPARSE_SWITCH)) {
Ben Cheng6c10a972009-10-29 14:39:18 -0700757 interpState->jitState = kJitTSelectEnd;
758 break;
759 }
760
Bill Buzbeef9f33282009-11-22 12:45:30 -0800761
Ben Chengba4fc8b2009-06-01 13:00:29 -0700762#if defined(SHOW_TRACE)
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800763 LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opcode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700764#endif
Dan Bornsteine4852762010-12-02 12:45:00 -0800765 flags = dexGetFlagsFromOpcode(decInsn.opcode);
766 len = dexGetWidthFromInstruction(lastPC);
Ben Cheng79d173c2009-09-29 16:12:51 -0700767 offset = lastPC - interpState->method->insns;
768 assert((unsigned) offset <
769 dvmGetMethodInsnsSize(interpState->method));
770 if (lastPC != interpState->currRunHead + interpState->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700771 int currTraceRun;
772 /* We need to start a new trace run */
773 currTraceRun = ++interpState->currTraceRun;
774 interpState->currRunLen = 0;
Ben Cheng79d173c2009-09-29 16:12:51 -0700775 interpState->currRunHead = (u2*)lastPC;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700776 interpState->trace[currTraceRun].frag.startOffset = offset;
777 interpState->trace[currTraceRun].frag.numInsts = 0;
778 interpState->trace[currTraceRun].frag.runEnd = false;
779 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700780 interpState->trace[currTraceRun].frag.isCode = true;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700781 }
782 interpState->trace[interpState->currTraceRun].frag.numInsts++;
783 interpState->totalTraceLen++;
784 interpState->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700785
Ben Chengd44faf52010-06-02 15:33:51 -0700786 /*
787 * If the last instruction is an invoke, we will try to sneak in
788 * the move-result* (if existent) into a separate trace run.
789 */
790 int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
791
Ben Cheng79d173c2009-09-29 16:12:51 -0700792 /* Will probably never hit this with the current trace buildier */
Ben Chengd44faf52010-06-02 15:33:51 -0700793 if (interpState->currTraceRun ==
794 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
Ben Cheng79d173c2009-09-29 16:12:51 -0700795 interpState->jitState = kJitTSelectEnd;
796 }
797
Dan Bornsteinc2b486f2010-11-12 16:07:16 -0800798 if (!dexIsGoto(flags) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700799 ((flags & (kInstrCanBranch |
800 kInstrCanSwitch |
801 kInstrCanReturn |
802 kInstrInvoke)) != 0)) {
803 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700804#if defined(SHOW_TRACE)
Ben Chengd44faf52010-06-02 15:33:51 -0700805 LOGD("TraceGen: ending on %s, basic block end",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800806 dexGetOpcodeName(decInsn.opcode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700807#endif
Ben Chengd44faf52010-06-02 15:33:51 -0700808
809 /*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700810 * If the current invoke is a {virtual,interface}, get the
811 * current class/method pair into the trace as well.
Ben Chengd44faf52010-06-02 15:33:51 -0700812 * If the next instruction is a variant of move-result, insert
Ben Cheng7a2697d2010-06-07 13:44:23 -0700813 * it to the trace too.
Ben Chengd44faf52010-06-02 15:33:51 -0700814 */
815 if (flags & kInstrInvoke) {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700816 insertClassMethodInfo(interpState, thisClass, curMethod,
817 &decInsn);
Ben Chengd44faf52010-06-02 15:33:51 -0700818 insertMoveResult(lastPC, len, offset, interpState);
819 }
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700820 }
Bill Buzbee2ce8a6c2009-12-03 15:09:32 -0800821 /* Break on throw or self-loop */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800822 if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700823 interpState->jitState = kJitTSelectEnd;
824 }
825 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
826 interpState->jitState = kJitTSelectEnd;
827 }
Ben Chenga4973592010-03-31 11:59:18 -0700828 /* Abandon the trace request if debugger/profiler is attached */
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700829 if (debugOrProfile) {
Ben Chenga4973592010-03-31 11:59:18 -0700830 interpState->jitState = kJitDone;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700831 break;
832 }
833 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
834 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700835 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700836 else {
837 /*
838 * Last instruction is a return - stay in the dbg interpreter
839 * for one more instruction if it is a non-void return, since
840 * we don't want to start a trace with move-result as the first
841 * instruction (which is already included in the trace
842 * containing the invoke.
843 */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800844 if (decInsn.opcode != OP_RETURN_VOID) {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700845 stayOneMoreInst = true;
846 }
847 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700848 /* NOTE: intentional fallthrough for returns */
849 case kJitTSelectEnd:
850 {
Ben Chenga4973592010-03-31 11:59:18 -0700851 /* Bad trace */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700852 if (interpState->totalTraceLen == 0) {
Bill Buzbeed7269912009-11-10 14:31:32 -0800853 /* Bad trace - mark as untranslatable */
Ben Chenga4973592010-03-31 11:59:18 -0700854 interpState->jitState = kJitDone;
855 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700856 break;
857 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700858
859 int lastTraceDesc = interpState->currTraceRun;
860
861 /* Extend a new empty desc if the last slot is meta info */
862 if (!interpState->trace[lastTraceDesc].frag.isCode) {
863 lastTraceDesc = ++interpState->currTraceRun;
864 interpState->trace[lastTraceDesc].frag.startOffset = 0;
865 interpState->trace[lastTraceDesc].frag.numInsts = 0;
866 interpState->trace[lastTraceDesc].frag.hint = kJitHintNone;
867 interpState->trace[lastTraceDesc].frag.isCode = true;
868 }
869
870 /* Mark the end of the trace runs */
871 interpState->trace[lastTraceDesc].frag.runEnd = true;
872
Ben Chengba4fc8b2009-06-01 13:00:29 -0700873 JitTraceDescription* desc =
874 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
875 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
Ben Cheng7a2697d2010-06-07 13:44:23 -0700876
Ben Chengba4fc8b2009-06-01 13:00:29 -0700877 if (desc == NULL) {
878 LOGE("Out of memory in trace selection");
879 dvmJitStopTranslationRequests();
Ben Chenga4973592010-03-31 11:59:18 -0700880 interpState->jitState = kJitDone;
881 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700882 break;
883 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700884
Ben Chengba4fc8b2009-06-01 13:00:29 -0700885 desc->method = interpState->method;
886 memcpy((char*)&(desc->trace[0]),
887 (char*)&(interpState->trace[0]),
888 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
889#if defined(SHOW_TRACE)
890 LOGD("TraceGen: trace done, adding to queue");
891#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800892 if (dvmCompilerWorkEnqueue(
893 interpState->currTraceHead,kWorkOrderTrace,desc)) {
894 /* Work order successfully enqueued */
895 if (gDvmJit.blockingMode) {
896 dvmCompilerDrainQueue();
897 }
Ben Cheng1357e942010-02-10 17:21:39 -0800898 } else {
899 /*
900 * Make sure the descriptor for the abandoned work order is
901 * freed.
902 */
903 free(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700904 }
Bill Buzbee964a7b02010-01-28 12:54:19 -0800905 /*
906 * Reset "trace in progress" flag whether or not we
907 * successfully entered a work order.
908 */
Ben Cheng6999d842010-01-26 16:46:15 -0800909 JitEntry *jitEntry =
Ben Chengcfdeca32011-01-14 11:36:46 -0800910 lookupAndAdd(interpState->currTraceHead,
911 false /* lock */,
912 false /* method entry */);
Ben Cheng6999d842010-01-26 16:46:15 -0800913 if (jitEntry) {
914 setTraceConstruction(jitEntry, false);
915 }
Ben Chenga4973592010-03-31 11:59:18 -0700916 interpState->jitState = kJitDone;
917 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700918 }
919 break;
920 case kJitSingleStep:
921 interpState->jitState = kJitSingleStepEnd;
922 break;
923 case kJitSingleStepEnd:
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700924 /*
925 * Clear the inJitCodeCache flag and abandon the resume attempt if
926 * we cannot switch back to the translation due to corner-case
927 * conditions. If the flag is not cleared and the code cache is full
928 * we will be stuck in the debug interpreter as the code cache
929 * cannot be reset.
930 */
931 if (dvmJitStayInPortableInterpreter()) {
932 interpState->entryPoint = kInterpEntryInstr;
933 self->inJitCodeCache = 0;
934 } else {
935 interpState->entryPoint = kInterpEntryResume;
936 }
Ben Chenga4973592010-03-31 11:59:18 -0700937 interpState->jitState = kJitDone;
938 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700939 break;
Ben Chenga4973592010-03-31 11:59:18 -0700940 case kJitDone:
941 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700942 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700943#if defined(WITH_SELF_VERIFICATION)
944 case kJitSelfVerification:
Ben Chengd5adae12010-03-26 17:45:28 -0700945 if (selfVerificationDebugInterp(pc, self, interpState)) {
946 /*
947 * If the next state is not single-step end, we can switch
948 * interpreter now.
949 */
950 if (interpState->jitState != kJitSingleStepEnd) {
Ben Chenga4973592010-03-31 11:59:18 -0700951 interpState->jitState = kJitDone;
952 switchInterp = true;
Ben Chengd5adae12010-03-26 17:45:28 -0700953 }
Jeff Hao97319a82009-08-12 16:57:15 -0700954 }
955 break;
956#endif
Ben Chenga4973592010-03-31 11:59:18 -0700957 case kJitNot:
Ben Cheng1c52e6d2010-07-02 13:00:39 -0700958 switchInterp = !debugOrProfile;
Ben Chenged79ff02009-10-13 13:26:40 -0700959 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700960 default:
Ben Chenga4973592010-03-31 11:59:18 -0700961 LOGE("Unexpected JIT state: %d entry point: %d",
962 interpState->jitState, interpState->entryPoint);
963 dvmAbort();
Ben Cheng9c147b82009-10-07 16:41:46 -0700964 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700965 }
Ben Chenga4973592010-03-31 11:59:18 -0700966 /*
967 * Final check to see if we can really switch the interpreter. Make sure
968 * the jitState is kJitDone or kJitNot when switchInterp is set to true.
969 */
970 assert(switchInterp == false || interpState->jitState == kJitDone ||
971 interpState->jitState == kJitNot);
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700972 return switchInterp && !debugOrProfile && !stayOneMoreInst &&
973 !dvmJitStayInPortableInterpreter();
Ben Chengba4fc8b2009-06-01 13:00:29 -0700974}
975
Ben Chengccd6c012009-10-15 14:52:45 -0700976JitEntry *dvmFindJitEntry(const u2* pc)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700977{
978 int idx = dvmJitHash(pc);
979
980 /* Expect a high hit rate on 1st shot */
981 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
982 return &gDvmJit.pJitEntryTable[idx];
983 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700984 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700985 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
986 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700987 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
988 return &gDvmJit.pJitEntryTable[idx];
989 }
990 }
991 return NULL;
992}
993
Bill Buzbee27176222009-06-09 09:20:16 -0700994/*
Ben Chengcfdeca32011-01-14 11:36:46 -0800995 * Walk through the JIT profile table and find the corresponding JIT code, in
996 * the specified format (ie trace vs method). This routine needs to be fast.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700997 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800998void* getCodeAddrCommon(const u2* dPC, bool methodEntry)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700999{
1000 int idx = dvmJitHash(dPC);
Ben Chengcfdeca32011-01-14 11:36:46 -08001001 const u2* pc = gDvmJit.pJitEntryTable[idx].dPC;
1002 if (pc != NULL) {
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001003 bool hideTranslation = dvmJitHideTranslation();
Ben Chengcfdeca32011-01-14 11:36:46 -08001004 if (pc == dPC &&
1005 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001006 int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ?
1007 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1008 intptr_t codeAddress =
1009 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001010#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001011 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001012#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001013 return hideTranslation || !codeAddress ? NULL :
1014 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001015 } else {
1016 int chainEndMarker = gDvmJit.jitTableSize;
1017 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1018 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001019 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
1020 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1021 methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001022 int offset = (gDvmJit.profileMode >=
1023 kTraceProfilingContinuous) ? 0 :
1024 gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1025 intptr_t codeAddress =
1026 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001027#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001028 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001029#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001030 return hideTranslation || !codeAddress ? NULL :
buzbee2e152ba2010-12-15 16:32:35 -08001031 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001032 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001033 }
1034 }
1035 }
Ben Cheng978738d2010-05-13 13:45:57 -07001036#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001037 gDvmJit.addrLookupsNotFound++;
1038#endif
1039 return NULL;
1040}
1041
1042/*
Ben Chengcfdeca32011-01-14 11:36:46 -08001043 * If a translated code address, in trace format, exists for the davik byte code
1044 * pointer return it.
1045 */
1046void* dvmJitGetTraceAddr(const u2* dPC)
1047{
1048 return getCodeAddrCommon(dPC, false /* method entry */);
1049}
1050
1051/*
1052 * If a translated code address, in whole-method format, exists for the davik
1053 * byte code pointer return it.
1054 */
1055void* dvmJitGetMethodAddr(const u2* dPC)
1056{
1057 return getCodeAddrCommon(dPC, true /* method entry */);
1058}
1059
1060/*
Ben Chengba4fc8b2009-06-01 13:00:29 -07001061 * Register the translated code pointer into the JitTable.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08001062 * NOTE: Once a codeAddress field transitions from initial state to
Ben Chengba4fc8b2009-06-01 13:00:29 -07001063 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -07001064 * threads. This routine should only be called by the compiler
buzbee2e152ba2010-12-15 16:32:35 -08001065 * thread. We defer the setting of the profile prefix size until
1066 * after the new code address is set to ensure that the prefix offset
1067 * is never applied to the initial interpret-only translation. All
1068 * translations with non-zero profile prefixes will still be correct
1069 * if entered as if the profile offset is 0, but the interpret-only
1070 * template cannot handle a non-zero prefix.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001071 */
buzbee2e152ba2010-12-15 16:32:35 -08001072void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
Ben Chengcfdeca32011-01-14 11:36:46 -08001073 bool isMethodEntry, int profilePrefixSize)
buzbee2e152ba2010-12-15 16:32:35 -08001074{
Bill Buzbee716f1202009-07-23 13:22:09 -07001075 JitEntryInfoUnion oldValue;
1076 JitEntryInfoUnion newValue;
Ben Chengcfdeca32011-01-14 11:36:46 -08001077 JitEntry *jitEntry = lookupAndAdd(dPC, false, isMethodEntry);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001078 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -07001079 /* Note: order of update is important */
1080 do {
1081 oldValue = jitEntry->u;
1082 newValue = oldValue;
Ben Chengcfdeca32011-01-14 11:36:46 -08001083 newValue.info.isMethodEntry = isMethodEntry;
Bill Buzbee716f1202009-07-23 13:22:09 -07001084 newValue.info.instructionSet = set;
buzbee99ddb1e2011-01-28 10:44:30 -08001085 newValue.info.profileOffset = profilePrefixSize;
Andy McFadden6e10b9a2010-06-14 15:24:39 -07001086 } while (android_atomic_release_cas(
1087 oldValue.infoWord, newValue.infoWord,
1088 &jitEntry->u.infoWord) != 0);
Bill Buzbee716f1202009-07-23 13:22:09 -07001089 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001090}
1091
1092/*
1093 * Determine if valid trace-bulding request is active. Return true
1094 * if we need to abort and switch back to the fast interpreter, false
Ben Chenga4973592010-03-31 11:59:18 -07001095 * otherwise.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001096 */
Ben Chengba4fc8b2009-06-01 13:00:29 -07001097bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
1098{
Ben Chenga4973592010-03-31 11:59:18 -07001099 bool switchInterp = false; /* Assume success */
Bill Buzbee48f18242009-06-19 16:02:27 -07001100 int i;
buzbee852aacd2010-06-08 16:24:46 -07001101 /*
1102 * A note on trace "hotness" filtering:
1103 *
1104 * Our first level trigger is intentionally loose - we need it to
1105 * fire easily not just to identify potential traces to compile, but
1106 * also to allow re-entry into the code cache.
1107 *
1108 * The 2nd level filter (done here) exists to be selective about
1109 * what we actually compile. It works by requiring the same
1110 * trace head "key" (defined as filterKey below) to appear twice in
1111 * a relatively short period of time. The difficulty is defining the
1112 * shape of the filterKey. Unfortunately, there is no "one size fits
1113 * all" approach.
1114 *
1115 * For spiky execution profiles dominated by a smallish
1116 * number of very hot loops, we would want the second-level filter
1117 * to be very selective. A good selective filter is requiring an
1118 * exact match of the Dalvik PC. In other words, defining filterKey as:
1119 * intptr_t filterKey = (intptr_t)interpState->pc
1120 *
1121 * However, for flat execution profiles we do best when aggressively
1122 * translating. A heuristically decent proxy for this is to use
1123 * the value of the method pointer containing the trace as the filterKey.
1124 * Intuitively, this is saying that once any trace in a method appears hot,
1125 * immediately translate any other trace from that same method that
1126 * survives the first-level filter. Here, filterKey would be defined as:
1127 * intptr_t filterKey = (intptr_t)interpState->method
1128 *
1129 * The problem is that we can't easily detect whether we're dealing
1130 * with a spiky or flat profile. If we go with the "pc" match approach,
1131 * flat profiles perform poorly. If we go with the loose "method" match,
1132 * we end up generating a lot of useless translations. Probably the
1133 * best approach in the future will be to retain profile information
1134 * across runs of each application in order to determine it's profile,
1135 * and then choose once we have enough history.
1136 *
1137 * However, for now we've decided to chose a compromise filter scheme that
1138 * includes elements of both. The high order bits of the filter key
1139 * are drawn from the enclosing method, and are combined with a slice
1140 * of the low-order bits of the Dalvik pc of the trace head. The
1141 * looseness of the filter can be adjusted by changing with width of
1142 * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider
1143 * the slice, the tighter the filter.
1144 *
1145 * Note: the fixed shifts in the function below reflect assumed word
1146 * alignment for method pointers, and half-word alignment of the Dalvik pc.
1147 * for method pointers and half-word alignment for dalvik pc.
1148 */
buzbeec35294d2010-06-09 14:22:50 -07001149 u4 methodKey = (u4)interpState->method <<
1150 (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
1151 u4 pcKey = ((u4)interpState->pc >> 1) &
1152 ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1153 intptr_t filterKey = (intptr_t)(methodKey | pcKey);
Ben Chenga4973592010-03-31 11:59:18 -07001154 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Ben Cheng40094c12010-02-24 20:58:44 -08001155
Ben Chenga4973592010-03-31 11:59:18 -07001156 /* Check if the JIT request can be handled now */
1157 if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
1158 /* Bypass the filter for hot trace requests or during stress mode */
1159 if (interpState->jitState == kJitTSelectRequest &&
1160 gDvmJit.threshold > 6) {
Ben Cheng40094c12010-02-24 20:58:44 -08001161 /* Two-level filtering scheme */
1162 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
1163 if (filterKey == interpState->threshFilter[i]) {
buzbee852aacd2010-06-08 16:24:46 -07001164 interpState->threshFilter[i] = 0; // Reset filter entry
Ben Cheng40094c12010-02-24 20:58:44 -08001165 break;
1166 }
Bill Buzbee48f18242009-06-19 16:02:27 -07001167 }
Ben Cheng40094c12010-02-24 20:58:44 -08001168 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1169 /*
1170 * Use random replacement policy - otherwise we could miss a
1171 * large loop that contains more traces than the size of our
1172 * filter array.
1173 */
1174 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
1175 interpState->threshFilter[i] = filterKey;
Ben Chenga4973592010-03-31 11:59:18 -07001176 interpState->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001177 }
Ben Chenga4973592010-03-31 11:59:18 -07001178 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001179
Ben Chenga4973592010-03-31 11:59:18 -07001180 /* If the compiler is backlogged, cancel any JIT actions */
1181 if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
1182 interpState->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001183 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001184
Ben Chengba4fc8b2009-06-01 13:00:29 -07001185 /*
Ben Chenga4973592010-03-31 11:59:18 -07001186 * Check for additional reasons that might force the trace select
1187 * request to be dropped
Ben Chengba4fc8b2009-06-01 13:00:29 -07001188 */
Ben Chenga4973592010-03-31 11:59:18 -07001189 if (interpState->jitState == kJitTSelectRequest ||
1190 interpState->jitState == kJitTSelectRequestHot) {
Ben Chengcfdeca32011-01-14 11:36:46 -08001191 JitEntry *slot = lookupAndAdd(interpState->pc,
1192 false /* lock */,
1193 false /* method entry */);
Bill Buzbee716f1202009-07-23 13:22:09 -07001194 if (slot == NULL) {
Ben Chengba4fc8b2009-06-01 13:00:29 -07001195 /*
Bill Buzbee716f1202009-07-23 13:22:09 -07001196 * Table is full. This should have been
1197 * detected by the compiler thread and the table
1198 * resized before we run into it here. Assume bad things
1199 * are afoot and disable profiling.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001200 */
Ben Chenga4973592010-03-31 11:59:18 -07001201 interpState->jitState = kJitDone;
Bill Buzbee716f1202009-07-23 13:22:09 -07001202 LOGD("JIT: JitTable full, disabling profiling");
1203 dvmJitStopTranslationRequests();
Bill Buzbeed7269912009-11-10 14:31:32 -08001204 } else if (slot->u.info.traceConstruction) {
1205 /*
Ben Cheng60c24f42010-01-04 12:29:56 -08001206 * Trace request already in progress, but most likely it
Bill Buzbeed7269912009-11-10 14:31:32 -08001207 * aborted without cleaning up. Assume the worst and
1208 * mark trace head as untranslatable. If we're wrong,
1209 * the compiler thread will correct the entry when the
1210 * translation is completed. The downside here is that
1211 * some existing translation may chain to the interpret-only
1212 * template instead of the real translation during this
1213 * window. Performance, but not correctness, issue.
1214 */
Ben Chenga4973592010-03-31 11:59:18 -07001215 interpState->jitState = kJitDone;
Bill Buzbeed7269912009-11-10 14:31:32 -08001216 resetTracehead(interpState, slot);
1217 } else if (slot->codeAddress) {
1218 /* Nothing to do here - just return */
Ben Chenga4973592010-03-31 11:59:18 -07001219 interpState->jitState = kJitDone;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001220 } else {
Bill Buzbeed7269912009-11-10 14:31:32 -08001221 /*
1222 * Mark request. Note, we are not guaranteed exclusivity
1223 * here. A window exists for another thread to be
1224 * attempting to build this same trace. Rather than
1225 * bear the cost of locking, we'll just allow that to
1226 * happen. The compiler thread, if it chooses, can
1227 * discard redundant requests.
1228 */
1229 setTraceConstruction(slot, true);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001230 }
1231 }
Ben Chenga4973592010-03-31 11:59:18 -07001232
Ben Chengba4fc8b2009-06-01 13:00:29 -07001233 switch (interpState->jitState) {
1234 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -08001235 case kJitTSelectRequestHot:
Ben Chenga4973592010-03-31 11:59:18 -07001236 interpState->jitState = kJitTSelect;
1237 interpState->currTraceHead = interpState->pc;
1238 interpState->currTraceRun = 0;
1239 interpState->totalTraceLen = 0;
1240 interpState->currRunHead = interpState->pc;
1241 interpState->currRunLen = 0;
1242 interpState->trace[0].frag.startOffset =
1243 interpState->pc - interpState->method->insns;
1244 interpState->trace[0].frag.numInsts = 0;
1245 interpState->trace[0].frag.runEnd = false;
1246 interpState->trace[0].frag.hint = kJitHintNone;
Ben Cheng7a2697d2010-06-07 13:44:23 -07001247 interpState->trace[0].frag.isCode = true;
Ben Chenga4973592010-03-31 11:59:18 -07001248 interpState->lastPC = 0;
1249 break;
1250 /*
1251 * For JIT's perspective there is no need to stay in the debug
1252 * interpreter unless debugger/profiler is attached.
1253 */
1254 case kJitDone:
1255 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001256 break;
1257 default:
Ben Chenga4973592010-03-31 11:59:18 -07001258 LOGE("Unexpected JIT state: %d entry point: %d",
1259 interpState->jitState, interpState->entryPoint);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001260 dvmAbort();
1261 }
Ben Chenga4973592010-03-31 11:59:18 -07001262 } else {
1263 /*
1264 * Cannot build trace this time - ready to leave the dbg interpreter
1265 */
1266 interpState->jitState = kJitDone;
1267 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001268 }
Ben Chenga4973592010-03-31 11:59:18 -07001269
1270 /*
1271 * Final check to see if we can really switch the interpreter. Make sure
1272 * the jitState is kJitDone when switchInterp is set to true.
1273 */
1274 assert(switchInterp == false || interpState->jitState == kJitDone);
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001275 return switchInterp && !debugOrProfile &&
1276 !dvmJitStayInPortableInterpreter();
Ben Chengba4fc8b2009-06-01 13:00:29 -07001277}
1278
Bill Buzbee27176222009-06-09 09:20:16 -07001279/*
1280 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
Bill Buzbee964a7b02010-01-28 12:54:19 -08001281 * Stops all threads, and thus is a heavyweight operation. May only be called
1282 * by the compiler thread.
Bill Buzbee27176222009-06-09 09:20:16 -07001283 */
1284bool dvmJitResizeJitTable( unsigned int size )
1285{
Bill Buzbee716f1202009-07-23 13:22:09 -07001286 JitEntry *pNewTable;
1287 JitEntry *pOldTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001288 JitEntry tempEntry;
Bill Buzbee27176222009-06-09 09:20:16 -07001289 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -07001290 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001291 unsigned int i;
1292
Ben Cheng3f02aa42009-08-14 13:52:09 -07001293 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -07001294 assert(size && !(size & (size - 1))); /* Is power of 2? */
1295
Ben Chenga4973592010-03-31 11:59:18 -07001296 LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
Bill Buzbee27176222009-06-09 09:20:16 -07001297
1298 newMask = size - 1;
1299
1300 if (size <= gDvmJit.jitTableSize) {
1301 return true;
1302 }
1303
Bill Buzbee964a7b02010-01-28 12:54:19 -08001304 /* Make sure requested size is compatible with chain field width */
1305 tempEntry.u.info.chain = size;
1306 if (tempEntry.u.info.chain != size) {
1307 LOGD("Jit: JitTable request of %d too big", size);
1308 return true;
1309 }
1310
Bill Buzbee716f1202009-07-23 13:22:09 -07001311 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -07001312 if (pNewTable == NULL) {
1313 return true;
1314 }
1315 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -07001316 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -07001317 }
1318
1319 /* Stop all other interpreting/jit'ng threads */
Ben Chenga8e64a72009-10-20 13:01:36 -07001320 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001321
Bill Buzbee716f1202009-07-23 13:22:09 -07001322 pOldTable = gDvmJit.pJitEntryTable;
1323 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001324
1325 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -07001326 gDvmJit.pJitEntryTable = pNewTable;
1327 gDvmJit.jitTableSize = size;
1328 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -07001329 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -07001330
Bill Buzbee716f1202009-07-23 13:22:09 -07001331 for (i=0; i < oldSize; i++) {
1332 if (pOldTable[i].dPC) {
1333 JitEntry *p;
1334 u2 chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001335 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/,
1336 pOldTable[i].u.info.isMethodEntry);
Bill Buzbee964a7b02010-01-28 12:54:19 -08001337 p->codeAddress = pOldTable[i].codeAddress;
Bill Buzbee716f1202009-07-23 13:22:09 -07001338 /* We need to preserve the new chain field, but copy the rest */
Bill Buzbee716f1202009-07-23 13:22:09 -07001339 chain = p->u.info.chain;
1340 p->u = pOldTable[i].u;
1341 p->u.info.chain = chain;
Bill Buzbee716f1202009-07-23 13:22:09 -07001342 }
1343 }
buzbee2e152ba2010-12-15 16:32:35 -08001344
Bill Buzbee964a7b02010-01-28 12:54:19 -08001345 dvmUnlockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -07001346
1347 free(pOldTable);
1348
Bill Buzbee27176222009-06-09 09:20:16 -07001349 /* Restart the world */
Ben Chenga8e64a72009-10-20 13:01:36 -07001350 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001351
1352 return false;
1353}
1354
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001355/*
Ben Cheng60c24f42010-01-04 12:29:56 -08001356 * Reset the JitTable to the initial clean state.
1357 */
1358void dvmJitResetTable(void)
1359{
1360 JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1361 unsigned int size = gDvmJit.jitTableSize;
1362 unsigned int i;
1363
1364 dvmLockMutex(&gDvmJit.tableLock);
buzbee2e152ba2010-12-15 16:32:35 -08001365
1366 /* Note: If need to preserve any existing counts. Do so here. */
buzbee38c41342011-01-11 15:45:49 -08001367 if (gDvmJit.pJitTraceProfCounters) {
1368 for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) {
1369 if (gDvmJit.pJitTraceProfCounters->buckets[i])
1370 memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i],
1371 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES);
1372 }
1373 gDvmJit.pJitTraceProfCounters->next = 0;
buzbee2e152ba2010-12-15 16:32:35 -08001374 }
buzbee2e152ba2010-12-15 16:32:35 -08001375
Ben Cheng60c24f42010-01-04 12:29:56 -08001376 memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1377 for (i=0; i< size; i++) {
1378 jitEntry[i].u.info.chain = size; /* Initialize chain termination */
1379 }
1380 gDvmJit.jitTableEntriesUsed = 0;
1381 dvmUnlockMutex(&gDvmJit.tableLock);
1382}
1383
1384/*
buzbee2e152ba2010-12-15 16:32:35 -08001385 * Return the address of the next trace profile counter. This address
1386 * will be embedded in the generated code for the trace, and thus cannot
1387 * change while the trace exists.
1388 */
1389JitTraceCounter_t *dvmJitNextTraceCounter()
1390{
1391 int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES;
1392 int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES;
1393 JitTraceCounter_t *res;
1394 /* Lazily allocate blocks of counters */
1395 if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) {
1396 JitTraceCounter_t *p =
1397 (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p));
1398 if (!p) {
1399 LOGE("Failed to allocate block of trace profile counters");
1400 dvmAbort();
1401 }
1402 gDvmJit.pJitTraceProfCounters->buckets[idx] = p;
1403 }
1404 res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem];
1405 gDvmJit.pJitTraceProfCounters->next++;
1406 return res;
1407}
1408
1409/*
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001410 * Float/double conversion requires clamping to min and max of integer form. If
1411 * target doesn't support this normally, use these.
1412 */
1413s8 dvmJitd2l(double d)
1414{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001415 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1416 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001417 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001418 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001419 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001420 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001421 else if (d != d) // NaN case
1422 return 0;
1423 else
1424 return (s8)d;
1425}
1426
1427s8 dvmJitf2l(float f)
1428{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001429 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1430 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001431 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001432 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001433 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001434 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001435 else if (f != f) // NaN case
1436 return 0;
1437 else
1438 return (s8)f;
1439}
1440
buzbee2e152ba2010-12-15 16:32:35 -08001441/* Should only be called by the compiler thread */
1442void dvmJitChangeProfileMode(TraceProfilingModes newState)
1443{
1444 if (gDvmJit.profileMode != newState) {
1445 gDvmJit.profileMode = newState;
1446 dvmJitUnchainAll();
1447 }
1448}
1449
1450void dvmJitTraceProfilingOn()
1451{
1452 if (gDvmJit.profileMode == kTraceProfilingPeriodicOff)
1453 dvmCompilerWorkEnqueue(NULL, kWorkOrderProfileMode,
1454 (void*) kTraceProfilingPeriodicOn);
1455 else if (gDvmJit.profileMode == kTraceProfilingDisabled)
1456 dvmCompilerWorkEnqueue(NULL, kWorkOrderProfileMode,
1457 (void*) kTraceProfilingContinuous);
1458}
1459
1460void dvmJitTraceProfilingOff()
1461{
1462 if (gDvmJit.profileMode == kTraceProfilingPeriodicOn)
1463 dvmCompilerWorkEnqueue(NULL, kWorkOrderProfileMode,
1464 (void*) kTraceProfilingPeriodicOff);
1465 else if (gDvmJit.profileMode == kTraceProfilingContinuous)
1466 dvmCompilerWorkEnqueue(NULL, kWorkOrderProfileMode,
1467 (void*) kTraceProfilingDisabled);
1468}
1469
Ben Chengba4fc8b2009-06-01 13:00:29 -07001470#endif /* WITH_JIT */