blob: 6a4615d3ec201f95129e16ef7dc6b440a2d88ec6 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
Dan Bornsteindf4daaf2010-12-01 14:23:44 -080026#include "libdex/DexOpcodes.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070027#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
buzbee9f601a92011-02-11 17:48:20 -080059 * Save out PC, FP, thread state, and registers to shadow space.
Jeff Hao97319a82009-08-12 16:57:15 -070060 * Return a pointer to the shadow space for JIT to use.
buzbee9f601a92011-02-11 17:48:20 -080061 *
62 * The set of saved state from the Thread structure is:
63 * pc (Dalvik PC)
64 * fp (Dalvik FP)
65 * retval
66 * method
67 * methodClassDex
68 * interpStackEnd
Jeff Hao97319a82009-08-12 16:57:15 -070069 */
buzbee9f601a92011-02-11 17:48:20 -080070void* dvmSelfVerificationSaveState(const u2* pc, u4* fp,
71 Thread* self, int targetTrace)
Jeff Hao97319a82009-08-12 16:57:15 -070072{
Jeff Hao97319a82009-08-12 16:57:15 -070073 ShadowSpace *shadowSpace = self->shadowSpace;
buzbee9f601a92011-02-11 17:48:20 -080074 unsigned preBytes = self->interpSave.method->outsSize*4 +
75 sizeof(StackSaveArea);
76 unsigned postBytes = self->interpSave.method->registersSize*4;
Jeff Hao97319a82009-08-12 16:57:15 -070077
78 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
79 // self->threadId, (int)pc, (int)fp);
80
81 if (shadowSpace->selfVerificationState != kSVSIdle) {
82 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
83 self->threadId, shadowSpace->selfVerificationState);
84 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -070085 LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
Jeff Hao97319a82009-08-12 16:57:15 -070086 }
87 shadowSpace->selfVerificationState = kSVSStart;
88
buzbee9f601a92011-02-11 17:48:20 -080089 if (self->entryPoint == kInterpEntryResume) {
90 self->entryPoint = kInterpEntryInstr;
Ben Chengd5adae12010-03-26 17:45:28 -070091#if 0
92 /* Tracking the success rate of resume after single-stepping */
buzbee9f601a92011-02-11 17:48:20 -080093 if (self->jitResumeDPC == pc) {
Ben Chengd5adae12010-03-26 17:45:28 -070094 LOGD("SV single step resumed at %p", pc);
95 }
96 else {
buzbee9f601a92011-02-11 17:48:20 -080097 LOGD("real %p DPC %p NPC %p", pc, self->jitResumeDPC,
98 self->jitResumeNPC);
Ben Chengd5adae12010-03-26 17:45:28 -070099 }
100#endif
101 }
102
Jeff Hao97319a82009-08-12 16:57:15 -0700103 // Dynamically grow shadow register space if necessary
Ben Cheng11d8f142010-03-24 15:24:19 -0700104 if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
Jeff Hao97319a82009-08-12 16:57:15 -0700105 free(shadowSpace->registerSpace);
Ben Cheng11d8f142010-03-24 15:24:19 -0700106 shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
Jeff Hao97319a82009-08-12 16:57:15 -0700107 shadowSpace->registerSpace =
Ben Cheng11d8f142010-03-24 15:24:19 -0700108 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
Jeff Hao97319a82009-08-12 16:57:15 -0700109 }
110
111 // Remember original state
112 shadowSpace->startPC = pc;
113 shadowSpace->fp = fp;
buzbee9f601a92011-02-11 17:48:20 -0800114 shadowSpace->retval = self->retval;
115 shadowSpace->interpStackEnd = self->interpStackEnd;
116
Ben Chengccd6c012009-10-15 14:52:45 -0700117 /*
118 * Store the original method here in case the trace ends with a
119 * return/invoke, the last method.
120 */
buzbee9f601a92011-02-11 17:48:20 -0800121 shadowSpace->method = self->interpSave.method;
122 shadowSpace->methodClassDex = self->interpSave.methodClassDex;
123
Jeff Hao97319a82009-08-12 16:57:15 -0700124 shadowSpace->shadowFP = shadowSpace->registerSpace +
125 shadowSpace->registerSpaceSize - postBytes/4;
126
buzbee9f601a92011-02-11 17:48:20 -0800127 self->interpSave.fp = (u4*)shadowSpace->shadowFP;
128 self->interpStackEnd = (u1*)shadowSpace->registerSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700129
130 // Create a copy of the stack
131 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
132 preBytes+postBytes);
133
134 // Setup the shadowed heap space
135 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
136
137 // Reset trace length
138 shadowSpace->traceLength = 0;
139
140 return shadowSpace;
141}
142
143/*
144 * Save ending PC, FP and compiled code exit point to shadow space.
145 * Return a pointer to the shadow space for JIT to restore state.
146 */
buzbee9f601a92011-02-11 17:48:20 -0800147void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp,
148 SelfVerificationState exitState,
149 Thread* self)
Jeff Hao97319a82009-08-12 16:57:15 -0700150{
Jeff Hao97319a82009-08-12 16:57:15 -0700151 ShadowSpace *shadowSpace = self->shadowSpace;
152 shadowSpace->endPC = pc;
153 shadowSpace->endShadowFP = fp;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700154 shadowSpace->jitExitState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700155
156 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
157 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
158 // (int)pc);
159
160 if (shadowSpace->selfVerificationState != kSVSStart) {
161 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
162 self->threadId, shadowSpace->selfVerificationState);
163 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700164 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700165 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700166 LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
167 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700168 (int)shadowSpace->endShadowFP);
169 }
170
171 // Special case when punting after a single instruction
Ben Cheng7a2697d2010-06-07 13:44:23 -0700172 if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700173 shadowSpace->selfVerificationState = kSVSIdle;
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700174 } else if (exitState == kSVSBackwardBranch && pc < shadowSpace->startPC) {
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700175 /*
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700176 * Consider a trace with a backward branch:
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700177 * 1: ..
178 * 2: ..
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700179 * 3: ..
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700180 * 4: ..
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700181 * 5: Goto {1 or 2 or 3 or 4}
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700182 *
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700183 * If there instruction 5 goes to 1 and there is no single-step
184 * instruction in the loop, pc is equal to shadowSpace->startPC and
185 * we will honor the backward branch condition.
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700186 *
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700187 * If the single-step instruction is outside the loop, then after
188 * resuming in the trace the startPC will be less than pc so we will
189 * also honor the backward branch condition.
190 *
191 * If the single-step is inside the loop, we won't hit the same endPC
192 * twice when the interpreter is re-executing the trace so we want to
193 * cancel the backward branch condition. In this case it can be
194 * detected as the endPC (ie pc) will be less than startPC.
Ben Cheng60c6dbf2010-08-26 12:28:56 -0700195 */
196 shadowSpace->selfVerificationState = kSVSNormal;
Jeff Hao97319a82009-08-12 16:57:15 -0700197 } else {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700198 shadowSpace->selfVerificationState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700199 }
200
buzbee9f601a92011-02-11 17:48:20 -0800201 /* Restore state before returning */
202 self->interpSave.pc = shadowSpace->startPC;
203 self->interpSave.fp = shadowSpace->fp;
204 self->interpSave.method = shadowSpace->method;
205 self->interpSave.methodClassDex = shadowSpace->methodClassDex;
206 self->retval = shadowSpace->retval;
207 self->interpStackEnd = shadowSpace->interpStackEnd;
208
Jeff Hao97319a82009-08-12 16:57:15 -0700209 return shadowSpace;
210}
211
212/* Print contents of virtual registers */
Ben Chengccd6c012009-10-15 14:52:45 -0700213static void selfVerificationPrintRegisters(int* addr, int* addrRef,
214 int numWords)
Jeff Hao97319a82009-08-12 16:57:15 -0700215{
216 int i;
217 for (i = 0; i < numWords; i++) {
Ben Chengccd6c012009-10-15 14:52:45 -0700218 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
Jeff Hao97319a82009-08-12 16:57:15 -0700219 }
220}
221
222/* Print values maintained in shadowSpace */
223static void selfVerificationDumpState(const u2* pc, Thread* self)
224{
225 ShadowSpace* shadowSpace = self->shadowSpace;
226 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
227 int frameBytes = (int) shadowSpace->registerSpace +
228 shadowSpace->registerSpaceSize*4 -
229 (int) shadowSpace->shadowFP;
230 int localRegs = 0;
231 int frameBytes2 = 0;
buzbee9f601a92011-02-11 17:48:20 -0800232 if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
Jeff Hao97319a82009-08-12 16:57:15 -0700233 localRegs = (stackSave->method->registersSize -
234 stackSave->method->insSize)*4;
235 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
236 }
237 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700238 LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
Jeff Hao97319a82009-08-12 16:57:15 -0700239 (int)(pc - stackSave->method->insns));
Ben Chengccd6c012009-10-15 14:52:45 -0700240 LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
241 LOGD("Method: %s", shadowSpace->method->name);
242 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700243 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700244 LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
Jeff Hao97319a82009-08-12 16:57:15 -0700245 (int)self->curFrame);
Ben Chengccd6c012009-10-15 14:52:45 -0700246 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700247 (int)shadowSpace->endShadowFP);
Ben Chengccd6c012009-10-15 14:52:45 -0700248 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
Jeff Hao97319a82009-08-12 16:57:15 -0700249 localRegs, frameBytes2);
Ben Chengccd6c012009-10-15 14:52:45 -0700250 LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
Jeff Hao97319a82009-08-12 16:57:15 -0700251 shadowSpace->selfVerificationState);
252}
253
254/* Print decoded instructions in the current trace */
255static void selfVerificationDumpTrace(const u2* pc, Thread* self)
256{
257 ShadowSpace* shadowSpace = self->shadowSpace;
258 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700259 int i, addr, offset;
260 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700261
262 LOGD("********** SHADOW TRACE DUMP **********");
263 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700264 addr = shadowSpace->trace[i].addr;
265 offset = (int)((u2*)addr - stackSave->method->insns);
266 decInsn = &(shadowSpace->trace[i].decInsn);
267 /* Not properly decoding instruction, some registers may be garbage */
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700268 LOGD("0x%x: (0x%04x) %s",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800269 addr, offset, dexGetOpcodeName(decInsn->opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700270 }
271}
272
Ben Chengbcdc1de2009-08-21 16:18:46 -0700273/* Code is forced into this spin loop when a divergence is detected */
Ben Chengccd6c012009-10-15 14:52:45 -0700274static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
Ben Chengbcdc1de2009-08-21 16:18:46 -0700275{
Ben Chengccd6c012009-10-15 14:52:45 -0700276 const u2 *startPC = shadowSpace->startPC;
Ben Cheng88a0f972010-02-24 15:00:40 -0800277 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
Ben Chengccd6c012009-10-15 14:52:45 -0700278 if (desc) {
279 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
Ben Cheng1357e942010-02-10 17:21:39 -0800280 /*
281 * This function effectively terminates the VM right here, so not
282 * freeing the desc pointer when the enqueuing fails is acceptable.
283 */
Ben Chengccd6c012009-10-15 14:52:45 -0700284 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700285 gDvmJit.selfVerificationSpin = true;
286 while(gDvmJit.selfVerificationSpin) sleep(10);
287}
288
Jeff Hao97319a82009-08-12 16:57:15 -0700289/* Manage self verification while in the debug interpreter */
buzbee9f601a92011-02-11 17:48:20 -0800290static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
Jeff Hao97319a82009-08-12 16:57:15 -0700291{
292 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700293 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700294
295 DecodedInstruction decInsn;
Dan Bornstein54322392010-11-17 14:16:56 -0800296 dexDecodeInstruction(pc, &decInsn);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700297
Jeff Hao97319a82009-08-12 16:57:15 -0700298 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
299 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800300 // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700301
302 if (state == kSVSIdle || state == kSVSStart) {
303 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
304 self->threadId, state);
305 selfVerificationDumpState(pc, self);
306 selfVerificationDumpTrace(pc, self);
307 }
308
Ben Chengd5adae12010-03-26 17:45:28 -0700309 /*
310 * Skip endPC once when trace has a backward branch. If the SV state is
311 * single step, keep it that way.
312 */
Jeff Hao97319a82009-08-12 16:57:15 -0700313 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
Ben Chengd5adae12010-03-26 17:45:28 -0700314 (state != kSVSBackwardBranch && state != kSVSSingleStep)) {
Jeff Hao97319a82009-08-12 16:57:15 -0700315 shadowSpace->selfVerificationState = kSVSDebugInterp;
316 }
317
318 /* Check that the current pc is the end of the trace */
Ben Chengd5adae12010-03-26 17:45:28 -0700319 if ((state == kSVSDebugInterp || state == kSVSSingleStep) &&
320 pc == shadowSpace->endPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700321
322 shadowSpace->selfVerificationState = kSVSIdle;
323
324 /* Check register space */
325 int frameBytes = (int) shadowSpace->registerSpace +
326 shadowSpace->registerSpaceSize*4 -
327 (int) shadowSpace->shadowFP;
328 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700329 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
Jeff Hao97319a82009-08-12 16:57:15 -0700330 selfVerificationDumpState(pc, self);
331 selfVerificationDumpTrace(pc, self);
332 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
333 (int)shadowSpace->fp, frameBytes);
Ben Chengccd6c012009-10-15 14:52:45 -0700334 selfVerificationPrintRegisters((int*)shadowSpace->fp,
335 (int*)shadowSpace->shadowFP,
336 frameBytes/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700337 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
338 (int)shadowSpace->shadowFP, frameBytes);
339 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700340 (int*)shadowSpace->fp,
341 frameBytes/4);
342 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700343 }
344 /* Check new frame if it exists (invokes only) */
buzbee9f601a92011-02-11 17:48:20 -0800345 if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
Jeff Hao97319a82009-08-12 16:57:15 -0700346 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
347 int localRegs = (stackSave->method->registersSize -
348 stackSave->method->insSize)*4;
349 int frameBytes2 = (int) shadowSpace->fp -
350 (int) self->curFrame - localRegs;
351 if (memcmp(((char*)self->curFrame)+localRegs,
352 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700353 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
Jeff Hao97319a82009-08-12 16:57:15 -0700354 self->threadId);
355 selfVerificationDumpState(pc, self);
356 selfVerificationDumpTrace(pc, self);
357 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
358 (int)self->curFrame, localRegs, frameBytes2);
359 selfVerificationPrintRegisters((int*)self->curFrame,
Ben Chengccd6c012009-10-15 14:52:45 -0700360 (int*)shadowSpace->endShadowFP,
361 (frameBytes2+localRegs)/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700362 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
363 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
364 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700365 (int*)self->curFrame,
366 (frameBytes2+localRegs)/4);
367 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700368 }
369 }
370
371 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700372 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700373 ShadowHeap* heapSpacePtr;
374 for (heapSpacePtr = shadowSpace->heapSpace;
375 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700376 int memData = *((unsigned int*) heapSpacePtr->addr);
377 if (heapSpacePtr->data != memData) {
Ben Chengccd6c012009-10-15 14:52:45 -0700378 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
379 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700380 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700381 selfVerificationDumpState(pc, self);
382 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700383 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700384 }
385 }
Ben Chengccd6c012009-10-15 14:52:45 -0700386 if (memDiff) selfVerificationSpinLoop(shadowSpace);
Ben Chengd5adae12010-03-26 17:45:28 -0700387
388 /*
389 * Switch to JIT single step mode to stay in the debug interpreter for
390 * one more instruction
391 */
392 if (state == kSVSSingleStep) {
buzbee9f601a92011-02-11 17:48:20 -0800393 self->jitState = kJitSingleStepEnd;
Ben Chengd5adae12010-03-26 17:45:28 -0700394 }
Jeff Hao97319a82009-08-12 16:57:15 -0700395 return true;
396
397 /* If end not been reached, make sure max length not exceeded */
398 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
399 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
Ben Chengccd6c012009-10-15 14:52:45 -0700400 LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
Jeff Hao97319a82009-08-12 16:57:15 -0700401 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
402 selfVerificationDumpState(pc, self);
403 selfVerificationDumpTrace(pc, self);
Ben Chengccd6c012009-10-15 14:52:45 -0700404 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700405
406 return true;
407 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700408 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700409 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700410 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700411 shadowSpace->traceLength++;
412
413 return false;
414}
415#endif
416
Ben Chengba4fc8b2009-06-01 13:00:29 -0700417/*
418 * If one of our fixed tables or the translation buffer fills up,
419 * call this routine to avoid wasting cycles on future translation requests.
420 */
421void dvmJitStopTranslationRequests()
422{
423 /*
424 * Note 1: This won't necessarily stop all translation requests, and
425 * operates on a delayed mechanism. Running threads look to the copy
buzbee9f601a92011-02-11 17:48:20 -0800426 * of this value in their private thread structures and won't see
Ben Chengba4fc8b2009-06-01 13:00:29 -0700427 * this change until it is refreshed (which happens on interpreter
428 * entry).
429 * Note 2: This is a one-shot memory leak on this table. Because this is a
430 * permanent off switch for Jit profiling, it is a one-time leak of 1K
431 * bytes, and no further attempt will be made to re-allocate it. Can't
432 * free it because some thread may be holding a reference.
433 */
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800434 gDvmJit.pProfTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700435}
436
Ben Cheng978738d2010-05-13 13:45:57 -0700437#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700438/* Convenience function to increment counter from assembly code */
Ben Cheng6c10a972009-10-29 14:39:18 -0700439void dvmBumpNoChain(int from)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700440{
Ben Cheng6c10a972009-10-29 14:39:18 -0700441 gDvmJit.noChainExit[from]++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700442}
443
444/* Convenience function to increment counter from assembly code */
445void dvmBumpNormal()
446{
Ben Cheng6c10a972009-10-29 14:39:18 -0700447 gDvmJit.normalExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700448}
449
450/* Convenience function to increment counter from assembly code */
451void dvmBumpPunt(int from)
452{
Ben Cheng6c10a972009-10-29 14:39:18 -0700453 gDvmJit.puntExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700454}
455#endif
456
457/* Dumps debugging & tuning stats to the log */
458void dvmJitStats()
459{
460 int i;
461 int hit;
462 int not_hit;
463 int chains;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800464 int stubs;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700465 if (gDvmJit.pJitEntryTable) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800466 for (i=0, stubs=chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700467 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700468 i++) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800469 if (gDvmJit.pJitEntryTable[i].dPC != 0) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700470 hit++;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800471 if (gDvmJit.pJitEntryTable[i].codeAddress ==
Bill Buzbeebd047242010-05-13 13:02:53 -0700472 dvmCompilerGetInterpretTemplate())
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800473 stubs++;
474 } else
Ben Chengba4fc8b2009-06-01 13:00:29 -0700475 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700476 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700477 chains++;
478 }
Ben Cheng72621c92010-03-10 13:12:55 -0800479 LOGD("JIT: table size is %d, entries used is %d",
Ben Cheng86717f72010-03-05 15:27:21 -0800480 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed);
Ben Cheng72621c92010-03-10 13:12:55 -0800481 LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
482 hit, not_hit + hit, chains, gDvmJit.threshold,
483 gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
Ben Cheng86717f72010-03-05 15:27:21 -0800484
Ben Cheng978738d2010-05-13 13:45:57 -0700485#if defined(WITH_JIT_TUNING)
486 LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
487
Ben Cheng72621c92010-03-10 13:12:55 -0800488 LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
489 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
490 gDvmJit.normalExit, gDvmJit.puntExit);
Ben Cheng452efba2010-04-30 15:14:00 -0700491
Ben Cheng978738d2010-05-13 13:45:57 -0700492 LOGD("JIT: ICHits: %d", gDvmICHitCount);
493
Ben Cheng72621c92010-03-10 13:12:55 -0800494 LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
495 "%d switch overflow",
496 gDvmJit.noChainExit[kInlineCacheMiss],
497 gDvmJit.noChainExit[kCallsiteInterpreted],
498 gDvmJit.noChainExit[kSwitchOverflow]);
Ben Cheng86717f72010-03-05 15:27:21 -0800499
Ben Chengb88ec3c2010-05-17 12:50:33 -0700500 LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
501 "%d dropped",
502 gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
503 gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
Ben Cheng452efba2010-04-30 15:14:00 -0700504 gDvmJit.icPatchDropped);
505
Ben Cheng86717f72010-03-05 15:27:21 -0800506 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
507 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
508 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700509 LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter",
510 gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined,
511 gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
Ben Cheng86717f72010-03-05 15:27:21 -0800512 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
513 LOGD("JIT: Avg unit compilation time: %llu us",
Andy McFaddenb7a797d2011-02-24 16:55:40 -0800514 gDvmJit.numCompilations == 0 ? 0 :
Ben Cheng86717f72010-03-05 15:27:21 -0800515 gDvmJit.jitTime / gDvmJit.numCompilations);
Ben Cheng385828e2011-03-04 16:48:33 -0800516 LOGD("JIT: Potential GC blocked by compiler: max %llu us / "
517 "avg %llu us (%d)",
518 gDvmJit.maxCompilerThreadBlockGCTime,
519 gDvmJit.numCompilerThreadBlockGC == 0 ?
520 0 : gDvmJit.compilerThreadBlockGCTime /
521 gDvmJit.numCompilerThreadBlockGC,
522 gDvmJit.numCompilerThreadBlockGC);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700523#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800524
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800525 LOGD("JIT: %d Translation chains, %d interp stubs",
526 gDvmJit.translationChains, stubs);
buzbee2e152ba2010-12-15 16:32:35 -0800527 if (gDvmJit.profileMode == kTraceProfilingContinuous) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700528 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700529 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700530 }
531}
532
Bill Buzbee716f1202009-07-23 13:22:09 -0700533
Bill Buzbee1b3da592011-02-03 07:38:22 -0800534/* End current trace after last successful instruction */
buzbee9f601a92011-02-11 17:48:20 -0800535void dvmJitEndTraceSelect(Thread* self)
Bill Buzbeed7269912009-11-10 14:31:32 -0800536{
buzbee9f601a92011-02-11 17:48:20 -0800537 if (self->jitState == kJitTSelect)
538 self->jitState = kJitTSelectEnd;
Bill Buzbeed7269912009-11-10 14:31:32 -0800539}
540
Ben Chengba4fc8b2009-06-01 13:00:29 -0700541/*
Bill Buzbee964a7b02010-01-28 12:54:19 -0800542 * Find an entry in the JitTable, creating if necessary.
543 * Returns null if table is full.
544 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800545static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked,
546 bool isMethodEntry)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800547{
548 u4 chainEndMarker = gDvmJit.jitTableSize;
549 u4 idx = dvmJitHash(dPC);
550
Ben Chengcfdeca32011-01-14 11:36:46 -0800551 /*
552 * Walk the bucket chain to find an exact match for our PC and trace/method
553 * type
554 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800555 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
Ben Chengcfdeca32011-01-14 11:36:46 -0800556 ((gDvmJit.pJitEntryTable[idx].dPC != dPC) ||
557 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry !=
558 isMethodEntry))) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800559 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
560 }
561
Ben Chengcfdeca32011-01-14 11:36:46 -0800562 if (gDvmJit.pJitEntryTable[idx].dPC != dPC ||
563 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800564 /*
565 * No match. Aquire jitTableLock and find the last
566 * slot in the chain. Possibly continue the chain walk in case
567 * some other thread allocated the slot we were looking
568 * at previuosly (perhaps even the dPC we're trying to enter).
569 */
570 if (!callerLocked)
571 dvmLockMutex(&gDvmJit.tableLock);
572 /*
573 * At this point, if .dPC is NULL, then the slot we're
574 * looking at is the target slot from the primary hash
575 * (the simple, and common case). Otherwise we're going
576 * to have to find a free slot and chain it.
577 */
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700578 ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800579 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
580 u4 prev;
581 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800582 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
583 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
584 isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800585 /* Another thread got there first for this dPC */
586 if (!callerLocked)
587 dvmUnlockMutex(&gDvmJit.tableLock);
588 return &gDvmJit.pJitEntryTable[idx];
589 }
590 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
591 }
592 /* Here, idx should be pointing to the last cell of an
593 * active chain whose last member contains a valid dPC */
594 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
595 /* Linear walk to find a free cell and add it to the end */
596 prev = idx;
597 while (true) {
598 idx++;
599 if (idx == chainEndMarker)
600 idx = 0; /* Wraparound */
601 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
602 (idx == prev))
603 break;
604 }
605 if (idx != prev) {
606 JitEntryInfoUnion oldValue;
607 JitEntryInfoUnion newValue;
608 /*
609 * Although we hold the lock so that noone else will
610 * be trying to update a chain field, the other fields
611 * packed into the word may be in use by other threads.
612 */
613 do {
614 oldValue = gDvmJit.pJitEntryTable[prev].u;
615 newValue = oldValue;
616 newValue.info.chain = idx;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700617 } while (android_atomic_release_cas(oldValue.infoWord,
618 newValue.infoWord,
619 &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800620 }
621 }
622 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800623 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800624 /*
625 * Initialize codeAddress and allocate the slot. Must
626 * happen in this order (since dPC is set, the entry is live.
627 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800628 android_atomic_release_store((int32_t)dPC,
629 (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800630 gDvmJit.pJitEntryTable[idx].dPC = dPC;
631 gDvmJit.jitTableEntriesUsed++;
632 } else {
633 /* Table is full */
634 idx = chainEndMarker;
635 }
636 if (!callerLocked)
637 dvmUnlockMutex(&gDvmJit.tableLock);
638 }
639 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
640}
Ben Chenga4973592010-03-31 11:59:18 -0700641
buzbeed82cebc2011-03-14 12:25:24 -0700642/* Dump a trace description */
643void dvmJitDumpTraceDesc(JitTraceDescription *trace)
644{
645 int i;
646 bool done = false;
647 const u2* dpc;
648 const u2* dpcBase;
649 int curFrag = 0;
650 LOGD("===========================================");
651 LOGD("Trace dump 0x%x, Method %s starting offset 0x%x",(int)trace,
652 trace->method->name,trace->trace[curFrag].info.frag.startOffset);
653 dpcBase = trace->method->insns;
654 while (!done) {
655 DecodedInstruction decInsn;
656 if (trace->trace[curFrag].isCode) {
657 LOGD("Frag[%d]- Insts: %d, start: 0x%x, hint: 0x%x, end: %d",
658 curFrag, trace->trace[curFrag].info.frag.numInsts,
659 trace->trace[curFrag].info.frag.startOffset,
660 trace->trace[curFrag].info.frag.hint,
661 trace->trace[curFrag].info.frag.runEnd);
662 dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset;
663 for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) {
664 dexDecodeInstruction(dpc, &decInsn);
665 LOGD(" 0x%04x - %s",(dpc-dpcBase),
666 dexGetOpcodeName(decInsn.opcode));
667 dpc += dexGetWidthFromOpcode(decInsn.opcode);
668 }
669 if (trace->trace[curFrag].info.frag.runEnd) {
670 done = true;
671 }
672 } else {
673 LOGD("Frag[%d]- META info: 0x%08x", curFrag,
674 (int)trace->trace[curFrag].info.meta);
675 }
676 curFrag++;
677 }
678 LOGD("-------------------------------------------");
679}
680
Bill Buzbee964a7b02010-01-28 12:54:19 -0800681/*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700682 * Append the class ptr of "this" and the current method ptr to the current
683 * trace. That is, the trace runs will contain the following components:
684 * + trace run that ends with an invoke (existing entry)
685 * + thisClass (new)
686 * + calleeMethod (new)
687 */
buzbee9f601a92011-02-11 17:48:20 -0800688static void insertClassMethodInfo(Thread* self,
Ben Cheng7a2697d2010-06-07 13:44:23 -0700689 const ClassObject* thisClass,
690 const Method* calleeMethod,
691 const DecodedInstruction* insn)
692{
buzbee9f601a92011-02-11 17:48:20 -0800693 int currTraceRun = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800694 self->trace[currTraceRun].info.meta = thisClass ?
695 (void *) thisClass->descriptor : NULL;
696 self->trace[currTraceRun].isCode = false;
697
buzbee9f601a92011-02-11 17:48:20 -0800698 currTraceRun = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800699 self->trace[currTraceRun].info.meta = thisClass ?
700 (void *) thisClass->classLoader : NULL;
701 self->trace[currTraceRun].isCode = false;
702
703 currTraceRun = ++self->currTraceRun;
704 self->trace[currTraceRun].info.meta = (void *) calleeMethod;
705 self->trace[currTraceRun].isCode = false;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700706}
707
708/*
Ben Chengd44faf52010-06-02 15:33:51 -0700709 * Check if the next instruction following the invoke is a move-result and if
Ben Cheng7a2697d2010-06-07 13:44:23 -0700710 * so add it to the trace. That is, this will add the trace run that includes
711 * the move-result to the trace list.
712 *
713 * + trace run that ends with an invoke (existing entry)
714 * + thisClass (existing entry)
715 * + calleeMethod (existing entry)
716 * + move result (new)
Ben Chengd44faf52010-06-02 15:33:51 -0700717 *
718 * lastPC, len, offset are all from the preceding invoke instruction
719 */
720static void insertMoveResult(const u2 *lastPC, int len, int offset,
buzbee9f601a92011-02-11 17:48:20 -0800721 Thread *self)
Ben Chengd44faf52010-06-02 15:33:51 -0700722{
723 DecodedInstruction nextDecInsn;
724 const u2 *moveResultPC = lastPC + len;
725
Dan Bornstein54322392010-11-17 14:16:56 -0800726 dexDecodeInstruction(moveResultPC, &nextDecInsn);
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800727 if ((nextDecInsn.opcode != OP_MOVE_RESULT) &&
728 (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) &&
729 (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT))
Ben Chengd44faf52010-06-02 15:33:51 -0700730 return;
731
732 /* We need to start a new trace run */
buzbee9f601a92011-02-11 17:48:20 -0800733 int currTraceRun = ++self->currTraceRun;
734 self->currRunHead = moveResultPC;
Ben Cheng385828e2011-03-04 16:48:33 -0800735 self->trace[currTraceRun].info.frag.startOffset = offset + len;
736 self->trace[currTraceRun].info.frag.numInsts = 1;
737 self->trace[currTraceRun].info.frag.runEnd = false;
738 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
739 self->trace[currTraceRun].isCode = true;
buzbee9f601a92011-02-11 17:48:20 -0800740 self->totalTraceLen++;
Ben Chengd44faf52010-06-02 15:33:51 -0700741
buzbee9f601a92011-02-11 17:48:20 -0800742 self->currRunLen = dexGetWidthFromInstruction(moveResultPC);
Ben Chengd44faf52010-06-02 15:33:51 -0700743}
744
745/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700746 * Adds to the current trace request one instruction at a time, just
747 * before that instruction is interpreted. This is the primary trace
748 * selection function. NOTE: return instruction are handled a little
749 * differently. In general, instructions are "proposed" to be added
750 * to the current trace prior to interpretation. If the interpreter
751 * then successfully completes the instruction, is will be considered
752 * part of the request. This allows us to examine machine state prior
753 * to interpretation, and also abort the trace request if the instruction
754 * throws or does something unexpected. However, return instructions
755 * will cause an immediate end to the translation request - which will
756 * be passed to the compiler before the return completes. This is done
757 * in response to special handling of returns by the interpreter (and
758 * because returns cannot throw in a way that causes problems for the
759 * translated code.
760 */
buzbee9f601a92011-02-11 17:48:20 -0800761int dvmCheckJit(const u2* pc, Thread* self, const ClassObject* thisClass,
762 const Method* curMethod)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700763{
Carl Shapiroe3c01da2010-05-20 22:54:18 -0700764 int flags, len;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700765 int switchInterp = false;
Ben Chenga4973592010-03-31 11:59:18 -0700766 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Ben Cheng7a2697d2010-06-07 13:44:23 -0700767 /* Stay in the dbg interpreter for the next instruction */
768 bool stayOneMoreInst = false;
Bill Buzbeed7269912009-11-10 14:31:32 -0800769
Ben Cheng1c52e6d2010-07-02 13:00:39 -0700770 /*
771 * Bug 2710533 - dalvik crash when disconnecting debugger
772 *
773 * Reset the entry point to the default value. If needed it will be set to a
774 * specific value in the corresponding case statement (eg kJitSingleStepEnd)
775 */
buzbee9f601a92011-02-11 17:48:20 -0800776 self->entryPoint = kInterpEntryInstr;
Ben Cheng1c52e6d2010-07-02 13:00:39 -0700777
Ben Cheng79d173c2009-09-29 16:12:51 -0700778 /* Prepare to handle last PC and stage the current PC */
buzbee9f601a92011-02-11 17:48:20 -0800779 const u2 *lastPC = self->lastPC;
780 self->lastPC = pc;
Ben Cheng79d173c2009-09-29 16:12:51 -0700781
buzbee9f601a92011-02-11 17:48:20 -0800782 switch (self->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700783 int offset;
784 DecodedInstruction decInsn;
785 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700786 /* First instruction - just remember the PC and exit */
787 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700788 /* Grow the trace around the last PC if jitState is kJitTSelect */
Dan Bornstein54322392010-11-17 14:16:56 -0800789 dexDecodeInstruction(lastPC, &decInsn);
Ben Cheng6c10a972009-10-29 14:39:18 -0700790
791 /*
792 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
793 * to the amount of space it takes to generate the chaining
794 * cells.
795 */
buzbee9f601a92011-02-11 17:48:20 -0800796 if (self->totalTraceLen != 0 &&
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800797 (decInsn.opcode == OP_PACKED_SWITCH ||
798 decInsn.opcode == OP_SPARSE_SWITCH)) {
buzbee9f601a92011-02-11 17:48:20 -0800799 self->jitState = kJitTSelectEnd;
Ben Cheng6c10a972009-10-29 14:39:18 -0700800 break;
801 }
802
Bill Buzbeef9f33282009-11-22 12:45:30 -0800803
Ben Chengba4fc8b2009-06-01 13:00:29 -0700804#if defined(SHOW_TRACE)
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800805 LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opcode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700806#endif
Dan Bornsteine4852762010-12-02 12:45:00 -0800807 flags = dexGetFlagsFromOpcode(decInsn.opcode);
808 len = dexGetWidthFromInstruction(lastPC);
buzbee9f601a92011-02-11 17:48:20 -0800809 offset = lastPC - self->interpSave.method->insns;
Ben Cheng79d173c2009-09-29 16:12:51 -0700810 assert((unsigned) offset <
buzbee9f601a92011-02-11 17:48:20 -0800811 dvmGetMethodInsnsSize(self->interpSave.method));
812 if (lastPC != self->currRunHead + self->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700813 int currTraceRun;
814 /* We need to start a new trace run */
buzbee9f601a92011-02-11 17:48:20 -0800815 currTraceRun = ++self->currTraceRun;
816 self->currRunLen = 0;
817 self->currRunHead = (u2*)lastPC;
Ben Cheng385828e2011-03-04 16:48:33 -0800818 self->trace[currTraceRun].info.frag.startOffset = offset;
819 self->trace[currTraceRun].info.frag.numInsts = 0;
820 self->trace[currTraceRun].info.frag.runEnd = false;
821 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
822 self->trace[currTraceRun].isCode = true;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700823 }
Ben Cheng385828e2011-03-04 16:48:33 -0800824 self->trace[self->currTraceRun].info.frag.numInsts++;
buzbee9f601a92011-02-11 17:48:20 -0800825 self->totalTraceLen++;
826 self->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700827
Ben Chengd44faf52010-06-02 15:33:51 -0700828 /*
829 * If the last instruction is an invoke, we will try to sneak in
830 * the move-result* (if existent) into a separate trace run.
831 */
832 int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
833
Ben Cheng79d173c2009-09-29 16:12:51 -0700834 /* Will probably never hit this with the current trace buildier */
buzbee9f601a92011-02-11 17:48:20 -0800835 if (self->currTraceRun ==
Ben Chengd44faf52010-06-02 15:33:51 -0700836 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
buzbee9f601a92011-02-11 17:48:20 -0800837 self->jitState = kJitTSelectEnd;
Ben Cheng79d173c2009-09-29 16:12:51 -0700838 }
839
Dan Bornsteinc2b486f2010-11-12 16:07:16 -0800840 if (!dexIsGoto(flags) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700841 ((flags & (kInstrCanBranch |
842 kInstrCanSwitch |
843 kInstrCanReturn |
844 kInstrInvoke)) != 0)) {
buzbee9f601a92011-02-11 17:48:20 -0800845 self->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700846#if defined(SHOW_TRACE)
Ben Chengd44faf52010-06-02 15:33:51 -0700847 LOGD("TraceGen: ending on %s, basic block end",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800848 dexGetOpcodeName(decInsn.opcode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700849#endif
Ben Chengd44faf52010-06-02 15:33:51 -0700850
851 /*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700852 * If the current invoke is a {virtual,interface}, get the
853 * current class/method pair into the trace as well.
Ben Chengd44faf52010-06-02 15:33:51 -0700854 * If the next instruction is a variant of move-result, insert
Ben Cheng7a2697d2010-06-07 13:44:23 -0700855 * it to the trace too.
Ben Chengd44faf52010-06-02 15:33:51 -0700856 */
857 if (flags & kInstrInvoke) {
buzbee9f601a92011-02-11 17:48:20 -0800858 insertClassMethodInfo(self, thisClass, curMethod,
Ben Cheng7a2697d2010-06-07 13:44:23 -0700859 &decInsn);
buzbee9f601a92011-02-11 17:48:20 -0800860 insertMoveResult(lastPC, len, offset, self);
Ben Chengd44faf52010-06-02 15:33:51 -0700861 }
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700862 }
Bill Buzbee2ce8a6c2009-12-03 15:09:32 -0800863 /* Break on throw or self-loop */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800864 if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){
buzbee9f601a92011-02-11 17:48:20 -0800865 self->jitState = kJitTSelectEnd;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700866 }
buzbee9f601a92011-02-11 17:48:20 -0800867 if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) {
868 self->jitState = kJitTSelectEnd;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700869 }
Ben Chenga4973592010-03-31 11:59:18 -0700870 /* Abandon the trace request if debugger/profiler is attached */
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700871 if (debugOrProfile) {
buzbee9f601a92011-02-11 17:48:20 -0800872 self->jitState = kJitDone;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700873 break;
874 }
875 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
876 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700877 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700878 else {
879 /*
880 * Last instruction is a return - stay in the dbg interpreter
881 * for one more instruction if it is a non-void return, since
882 * we don't want to start a trace with move-result as the first
883 * instruction (which is already included in the trace
884 * containing the invoke.
885 */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800886 if (decInsn.opcode != OP_RETURN_VOID) {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700887 stayOneMoreInst = true;
888 }
889 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700890 /* NOTE: intentional fallthrough for returns */
891 case kJitTSelectEnd:
892 {
Bill Buzbee1b3da592011-02-03 07:38:22 -0800893 /* Empty trace - set to bail to interpreter */
buzbee9f601a92011-02-11 17:48:20 -0800894 if (self->totalTraceLen == 0) {
895 dvmJitSetCodeAddr(self->currTraceHead,
Bill Buzbee1b3da592011-02-03 07:38:22 -0800896 dvmCompilerGetInterpretTemplate(),
897 dvmCompilerGetInterpretTemplateSet(),
898 false /* Not method entry */, 0);
buzbee9f601a92011-02-11 17:48:20 -0800899 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -0700900 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700901 break;
902 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700903
buzbee9f601a92011-02-11 17:48:20 -0800904 int lastTraceDesc = self->currTraceRun;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700905
906 /* Extend a new empty desc if the last slot is meta info */
Ben Cheng385828e2011-03-04 16:48:33 -0800907 if (!self->trace[lastTraceDesc].isCode) {
buzbee9f601a92011-02-11 17:48:20 -0800908 lastTraceDesc = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800909 self->trace[lastTraceDesc].info.frag.startOffset = 0;
910 self->trace[lastTraceDesc].info.frag.numInsts = 0;
911 self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
912 self->trace[lastTraceDesc].isCode = true;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700913 }
914
915 /* Mark the end of the trace runs */
Ben Cheng385828e2011-03-04 16:48:33 -0800916 self->trace[lastTraceDesc].info.frag.runEnd = true;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700917
Ben Chengba4fc8b2009-06-01 13:00:29 -0700918 JitTraceDescription* desc =
919 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
buzbee9f601a92011-02-11 17:48:20 -0800920 sizeof(JitTraceRun) * (self->currTraceRun+1));
Ben Cheng7a2697d2010-06-07 13:44:23 -0700921
Ben Chengba4fc8b2009-06-01 13:00:29 -0700922 if (desc == NULL) {
923 LOGE("Out of memory in trace selection");
924 dvmJitStopTranslationRequests();
buzbee9f601a92011-02-11 17:48:20 -0800925 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -0700926 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700927 break;
928 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700929
buzbee9f601a92011-02-11 17:48:20 -0800930 desc->method = self->interpSave.method;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700931 memcpy((char*)&(desc->trace[0]),
buzbee9f601a92011-02-11 17:48:20 -0800932 (char*)&(self->trace[0]),
933 sizeof(JitTraceRun) * (self->currTraceRun+1));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700934#if defined(SHOW_TRACE)
935 LOGD("TraceGen: trace done, adding to queue");
936#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800937 if (dvmCompilerWorkEnqueue(
buzbee9f601a92011-02-11 17:48:20 -0800938 self->currTraceHead,kWorkOrderTrace,desc)) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800939 /* Work order successfully enqueued */
buzbeed82cebc2011-03-14 12:25:24 -0700940#if defined(SHOW_TRACE)
941 dvmJitDumpTraceDesc(desc);
942#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800943 if (gDvmJit.blockingMode) {
944 dvmCompilerDrainQueue();
945 }
Ben Cheng1357e942010-02-10 17:21:39 -0800946 } else {
947 /*
948 * Make sure the descriptor for the abandoned work order is
949 * freed.
950 */
951 free(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700952 }
buzbee9f601a92011-02-11 17:48:20 -0800953 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -0700954 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700955 }
956 break;
957 case kJitSingleStep:
buzbee9f601a92011-02-11 17:48:20 -0800958 self->jitState = kJitSingleStepEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700959 break;
960 case kJitSingleStepEnd:
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700961 /*
962 * Clear the inJitCodeCache flag and abandon the resume attempt if
963 * we cannot switch back to the translation due to corner-case
964 * conditions. If the flag is not cleared and the code cache is full
965 * we will be stuck in the debug interpreter as the code cache
966 * cannot be reset.
967 */
968 if (dvmJitStayInPortableInterpreter()) {
buzbee9f601a92011-02-11 17:48:20 -0800969 self->entryPoint = kInterpEntryInstr;
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700970 self->inJitCodeCache = 0;
971 } else {
buzbee9f601a92011-02-11 17:48:20 -0800972 self->entryPoint = kInterpEntryResume;
Ben Cheng1a7b9d72010-09-20 22:20:31 -0700973 }
buzbee9f601a92011-02-11 17:48:20 -0800974 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -0700975 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700976 break;
Ben Chenga4973592010-03-31 11:59:18 -0700977 case kJitDone:
978 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700979 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700980#if defined(WITH_SELF_VERIFICATION)
981 case kJitSelfVerification:
buzbee9f601a92011-02-11 17:48:20 -0800982 if (selfVerificationDebugInterp(pc, self)) {
Ben Chengd5adae12010-03-26 17:45:28 -0700983 /*
984 * If the next state is not single-step end, we can switch
985 * interpreter now.
986 */
buzbee9f601a92011-02-11 17:48:20 -0800987 if (self->jitState != kJitSingleStepEnd) {
988 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -0700989 switchInterp = true;
Ben Chengd5adae12010-03-26 17:45:28 -0700990 }
Jeff Hao97319a82009-08-12 16:57:15 -0700991 }
992 break;
993#endif
Ben Chenga4973592010-03-31 11:59:18 -0700994 case kJitNot:
Ben Cheng1c52e6d2010-07-02 13:00:39 -0700995 switchInterp = !debugOrProfile;
Ben Chenged79ff02009-10-13 13:26:40 -0700996 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700997 default:
Ben Chenga4973592010-03-31 11:59:18 -0700998 LOGE("Unexpected JIT state: %d entry point: %d",
buzbee9f601a92011-02-11 17:48:20 -0800999 self->jitState, self->entryPoint);
Ben Chenga4973592010-03-31 11:59:18 -07001000 dvmAbort();
Ben Cheng9c147b82009-10-07 16:41:46 -07001001 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001002 }
Ben Chenga4973592010-03-31 11:59:18 -07001003 /*
1004 * Final check to see if we can really switch the interpreter. Make sure
1005 * the jitState is kJitDone or kJitNot when switchInterp is set to true.
1006 */
buzbee9f601a92011-02-11 17:48:20 -08001007 assert(switchInterp == false || self->jitState == kJitDone ||
1008 self->jitState == kJitNot);
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001009 return switchInterp && !debugOrProfile && !stayOneMoreInst &&
1010 !dvmJitStayInPortableInterpreter();
Ben Chengba4fc8b2009-06-01 13:00:29 -07001011}
1012
Bill Buzbee1b3da592011-02-03 07:38:22 -08001013JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001014{
1015 int idx = dvmJitHash(pc);
1016
1017 /* Expect a high hit rate on 1st shot */
Bill Buzbee1b3da592011-02-03 07:38:22 -08001018 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
1019 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry))
Ben Chengba4fc8b2009-06-01 13:00:29 -07001020 return &gDvmJit.pJitEntryTable[idx];
1021 else {
Bill Buzbee27176222009-06-09 09:20:16 -07001022 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -07001023 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1024 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Bill Buzbee1b3da592011-02-03 07:38:22 -08001025 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
1026 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1027 isMethodEntry))
Ben Chengba4fc8b2009-06-01 13:00:29 -07001028 return &gDvmJit.pJitEntryTable[idx];
1029 }
1030 }
1031 return NULL;
1032}
1033
Bill Buzbee27176222009-06-09 09:20:16 -07001034/*
Ben Chengcfdeca32011-01-14 11:36:46 -08001035 * Walk through the JIT profile table and find the corresponding JIT code, in
1036 * the specified format (ie trace vs method). This routine needs to be fast.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001037 */
Ben Chengcfdeca32011-01-14 11:36:46 -08001038void* getCodeAddrCommon(const u2* dPC, bool methodEntry)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001039{
1040 int idx = dvmJitHash(dPC);
Ben Chengcfdeca32011-01-14 11:36:46 -08001041 const u2* pc = gDvmJit.pJitEntryTable[idx].dPC;
1042 if (pc != NULL) {
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001043 bool hideTranslation = dvmJitHideTranslation();
Ben Chengcfdeca32011-01-14 11:36:46 -08001044 if (pc == dPC &&
1045 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001046 int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ?
1047 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1048 intptr_t codeAddress =
1049 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001050#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001051 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001052#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001053 return hideTranslation || !codeAddress ? NULL :
1054 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001055 } else {
1056 int chainEndMarker = gDvmJit.jitTableSize;
1057 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1058 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001059 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
1060 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1061 methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001062 int offset = (gDvmJit.profileMode >=
1063 kTraceProfilingContinuous) ? 0 :
1064 gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1065 intptr_t codeAddress =
1066 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001067#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001068 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001069#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001070 return hideTranslation || !codeAddress ? NULL :
buzbee2e152ba2010-12-15 16:32:35 -08001071 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001072 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001073 }
1074 }
1075 }
Ben Cheng978738d2010-05-13 13:45:57 -07001076#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001077 gDvmJit.addrLookupsNotFound++;
1078#endif
1079 return NULL;
1080}
1081
1082/*
Ben Chengcfdeca32011-01-14 11:36:46 -08001083 * If a translated code address, in trace format, exists for the davik byte code
1084 * pointer return it.
1085 */
1086void* dvmJitGetTraceAddr(const u2* dPC)
1087{
1088 return getCodeAddrCommon(dPC, false /* method entry */);
1089}
1090
1091/*
1092 * If a translated code address, in whole-method format, exists for the davik
1093 * byte code pointer return it.
1094 */
1095void* dvmJitGetMethodAddr(const u2* dPC)
1096{
1097 return getCodeAddrCommon(dPC, true /* method entry */);
1098}
1099
1100/*
Ben Chengba4fc8b2009-06-01 13:00:29 -07001101 * Register the translated code pointer into the JitTable.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08001102 * NOTE: Once a codeAddress field transitions from initial state to
Ben Chengba4fc8b2009-06-01 13:00:29 -07001103 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -07001104 * threads. This routine should only be called by the compiler
buzbee2e152ba2010-12-15 16:32:35 -08001105 * thread. We defer the setting of the profile prefix size until
1106 * after the new code address is set to ensure that the prefix offset
1107 * is never applied to the initial interpret-only translation. All
1108 * translations with non-zero profile prefixes will still be correct
1109 * if entered as if the profile offset is 0, but the interpret-only
1110 * template cannot handle a non-zero prefix.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001111 */
buzbee2e152ba2010-12-15 16:32:35 -08001112void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
Ben Chengcfdeca32011-01-14 11:36:46 -08001113 bool isMethodEntry, int profilePrefixSize)
buzbee2e152ba2010-12-15 16:32:35 -08001114{
Bill Buzbee716f1202009-07-23 13:22:09 -07001115 JitEntryInfoUnion oldValue;
1116 JitEntryInfoUnion newValue;
Ben Cheng20d7e6c2011-02-18 17:12:42 -08001117 /*
1118 * Method-based JIT doesn't go through the normal profiling phase, so use
1119 * lookupAndAdd here to request a new entry in the table.
1120 */
1121 JitEntry *jitEntry = isMethodEntry ?
1122 lookupAndAdd(dPC, false /* caller locked */, true) :
1123 dvmJitFindEntry(dPC, isMethodEntry);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001124 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -07001125 /* Note: order of update is important */
1126 do {
1127 oldValue = jitEntry->u;
1128 newValue = oldValue;
Ben Chengcfdeca32011-01-14 11:36:46 -08001129 newValue.info.isMethodEntry = isMethodEntry;
Bill Buzbee716f1202009-07-23 13:22:09 -07001130 newValue.info.instructionSet = set;
buzbee99ddb1e2011-01-28 10:44:30 -08001131 newValue.info.profileOffset = profilePrefixSize;
Andy McFadden6e10b9a2010-06-14 15:24:39 -07001132 } while (android_atomic_release_cas(
1133 oldValue.infoWord, newValue.infoWord,
1134 &jitEntry->u.infoWord) != 0);
Bill Buzbee716f1202009-07-23 13:22:09 -07001135 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001136}
1137
1138/*
1139 * Determine if valid trace-bulding request is active. Return true
1140 * if we need to abort and switch back to the fast interpreter, false
Ben Chenga4973592010-03-31 11:59:18 -07001141 * otherwise.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001142 */
buzbee9f601a92011-02-11 17:48:20 -08001143bool dvmJitCheckTraceRequest(Thread* self)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001144{
Ben Chenga4973592010-03-31 11:59:18 -07001145 bool switchInterp = false; /* Assume success */
Bill Buzbee48f18242009-06-19 16:02:27 -07001146 int i;
buzbee852aacd2010-06-08 16:24:46 -07001147 /*
1148 * A note on trace "hotness" filtering:
1149 *
1150 * Our first level trigger is intentionally loose - we need it to
1151 * fire easily not just to identify potential traces to compile, but
1152 * also to allow re-entry into the code cache.
1153 *
1154 * The 2nd level filter (done here) exists to be selective about
1155 * what we actually compile. It works by requiring the same
1156 * trace head "key" (defined as filterKey below) to appear twice in
1157 * a relatively short period of time. The difficulty is defining the
1158 * shape of the filterKey. Unfortunately, there is no "one size fits
1159 * all" approach.
1160 *
1161 * For spiky execution profiles dominated by a smallish
1162 * number of very hot loops, we would want the second-level filter
1163 * to be very selective. A good selective filter is requiring an
1164 * exact match of the Dalvik PC. In other words, defining filterKey as:
buzbee9f601a92011-02-11 17:48:20 -08001165 * intptr_t filterKey = (intptr_t)self->interpSave.pc
buzbee852aacd2010-06-08 16:24:46 -07001166 *
1167 * However, for flat execution profiles we do best when aggressively
1168 * translating. A heuristically decent proxy for this is to use
1169 * the value of the method pointer containing the trace as the filterKey.
1170 * Intuitively, this is saying that once any trace in a method appears hot,
1171 * immediately translate any other trace from that same method that
1172 * survives the first-level filter. Here, filterKey would be defined as:
buzbee9f601a92011-02-11 17:48:20 -08001173 * intptr_t filterKey = (intptr_t)self->interpSave.method
buzbee852aacd2010-06-08 16:24:46 -07001174 *
1175 * The problem is that we can't easily detect whether we're dealing
1176 * with a spiky or flat profile. If we go with the "pc" match approach,
1177 * flat profiles perform poorly. If we go with the loose "method" match,
1178 * we end up generating a lot of useless translations. Probably the
1179 * best approach in the future will be to retain profile information
1180 * across runs of each application in order to determine it's profile,
1181 * and then choose once we have enough history.
1182 *
1183 * However, for now we've decided to chose a compromise filter scheme that
1184 * includes elements of both. The high order bits of the filter key
1185 * are drawn from the enclosing method, and are combined with a slice
1186 * of the low-order bits of the Dalvik pc of the trace head. The
1187 * looseness of the filter can be adjusted by changing with width of
1188 * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider
1189 * the slice, the tighter the filter.
1190 *
1191 * Note: the fixed shifts in the function below reflect assumed word
1192 * alignment for method pointers, and half-word alignment of the Dalvik pc.
1193 * for method pointers and half-word alignment for dalvik pc.
1194 */
buzbee9f601a92011-02-11 17:48:20 -08001195 u4 methodKey = (u4)self->interpSave.method <<
buzbeec35294d2010-06-09 14:22:50 -07001196 (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
buzbee9f601a92011-02-11 17:48:20 -08001197 u4 pcKey = ((u4)self->interpSave.pc >> 1) &
buzbeec35294d2010-06-09 14:22:50 -07001198 ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1199 intptr_t filterKey = (intptr_t)(methodKey | pcKey);
Ben Chenga4973592010-03-31 11:59:18 -07001200 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Ben Cheng40094c12010-02-24 20:58:44 -08001201
Ben Chenga4973592010-03-31 11:59:18 -07001202 /* Check if the JIT request can be handled now */
1203 if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
1204 /* Bypass the filter for hot trace requests or during stress mode */
buzbee9f601a92011-02-11 17:48:20 -08001205 if (self->jitState == kJitTSelectRequest &&
Ben Chenga4973592010-03-31 11:59:18 -07001206 gDvmJit.threshold > 6) {
Ben Cheng40094c12010-02-24 20:58:44 -08001207 /* Two-level filtering scheme */
1208 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
buzbee9f601a92011-02-11 17:48:20 -08001209 if (filterKey == self->threshFilter[i]) {
1210 self->threshFilter[i] = 0; // Reset filter entry
Ben Cheng40094c12010-02-24 20:58:44 -08001211 break;
1212 }
Bill Buzbee48f18242009-06-19 16:02:27 -07001213 }
Ben Cheng40094c12010-02-24 20:58:44 -08001214 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1215 /*
1216 * Use random replacement policy - otherwise we could miss a
1217 * large loop that contains more traces than the size of our
1218 * filter array.
1219 */
1220 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
buzbee9f601a92011-02-11 17:48:20 -08001221 self->threshFilter[i] = filterKey;
1222 self->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001223 }
Ben Chenga4973592010-03-31 11:59:18 -07001224 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001225
Ben Chenga4973592010-03-31 11:59:18 -07001226 /* If the compiler is backlogged, cancel any JIT actions */
1227 if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
buzbee9f601a92011-02-11 17:48:20 -08001228 self->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001229 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001230
Ben Chengba4fc8b2009-06-01 13:00:29 -07001231 /*
Ben Chenga4973592010-03-31 11:59:18 -07001232 * Check for additional reasons that might force the trace select
1233 * request to be dropped
Ben Chengba4fc8b2009-06-01 13:00:29 -07001234 */
buzbee9f601a92011-02-11 17:48:20 -08001235 if (self->jitState == kJitTSelectRequest ||
1236 self->jitState == kJitTSelectRequestHot) {
1237 if (dvmJitFindEntry(self->interpSave.pc, false)) {
Bill Buzbee1b3da592011-02-03 07:38:22 -08001238 /* In progress - nothing do do */
buzbee9f601a92011-02-11 17:48:20 -08001239 self->jitState = kJitDone;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001240 } else {
buzbee9f601a92011-02-11 17:48:20 -08001241 JitEntry *slot = lookupAndAdd(self->interpSave.pc,
Bill Buzbee1b3da592011-02-03 07:38:22 -08001242 false /* lock */,
1243 false /* method entry */);
1244 if (slot == NULL) {
1245 /*
1246 * Table is full. This should have been
1247 * detected by the compiler thread and the table
1248 * resized before we run into it here. Assume bad things
1249 * are afoot and disable profiling.
1250 */
buzbee9f601a92011-02-11 17:48:20 -08001251 self->jitState = kJitDone;
Bill Buzbee1b3da592011-02-03 07:38:22 -08001252 LOGD("JIT: JitTable full, disabling profiling");
1253 dvmJitStopTranslationRequests();
1254 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001255 }
1256 }
Ben Chenga4973592010-03-31 11:59:18 -07001257
buzbee9f601a92011-02-11 17:48:20 -08001258 switch (self->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -07001259 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -08001260 case kJitTSelectRequestHot:
buzbee9f601a92011-02-11 17:48:20 -08001261 self->jitState = kJitTSelect;
1262 self->currTraceHead = self->interpSave.pc;
1263 self->currTraceRun = 0;
1264 self->totalTraceLen = 0;
1265 self->currRunHead = self->interpSave.pc;
1266 self->currRunLen = 0;
Ben Cheng385828e2011-03-04 16:48:33 -08001267 self->trace[0].info.frag.startOffset =
buzbee9f601a92011-02-11 17:48:20 -08001268 self->interpSave.pc - self->interpSave.method->insns;
Ben Cheng385828e2011-03-04 16:48:33 -08001269 self->trace[0].info.frag.numInsts = 0;
1270 self->trace[0].info.frag.runEnd = false;
1271 self->trace[0].info.frag.hint = kJitHintNone;
1272 self->trace[0].isCode = true;
buzbee9f601a92011-02-11 17:48:20 -08001273 self->lastPC = 0;
Ben Chenga4973592010-03-31 11:59:18 -07001274 break;
1275 /*
1276 * For JIT's perspective there is no need to stay in the debug
1277 * interpreter unless debugger/profiler is attached.
1278 */
1279 case kJitDone:
1280 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001281 break;
1282 default:
Ben Chenga4973592010-03-31 11:59:18 -07001283 LOGE("Unexpected JIT state: %d entry point: %d",
buzbee9f601a92011-02-11 17:48:20 -08001284 self->jitState, self->entryPoint);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001285 dvmAbort();
1286 }
Ben Chenga4973592010-03-31 11:59:18 -07001287 } else {
1288 /*
1289 * Cannot build trace this time - ready to leave the dbg interpreter
1290 */
buzbee9f601a92011-02-11 17:48:20 -08001291 self->jitState = kJitDone;
Ben Chenga4973592010-03-31 11:59:18 -07001292 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001293 }
Ben Chenga4973592010-03-31 11:59:18 -07001294
1295 /*
1296 * Final check to see if we can really switch the interpreter. Make sure
1297 * the jitState is kJitDone when switchInterp is set to true.
1298 */
buzbee9f601a92011-02-11 17:48:20 -08001299 assert(switchInterp == false || self->jitState == kJitDone);
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001300 return switchInterp && !debugOrProfile &&
1301 !dvmJitStayInPortableInterpreter();
Ben Chengba4fc8b2009-06-01 13:00:29 -07001302}
1303
Bill Buzbee27176222009-06-09 09:20:16 -07001304/*
1305 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
Bill Buzbee964a7b02010-01-28 12:54:19 -08001306 * Stops all threads, and thus is a heavyweight operation. May only be called
1307 * by the compiler thread.
Bill Buzbee27176222009-06-09 09:20:16 -07001308 */
1309bool dvmJitResizeJitTable( unsigned int size )
1310{
Bill Buzbee716f1202009-07-23 13:22:09 -07001311 JitEntry *pNewTable;
1312 JitEntry *pOldTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001313 JitEntry tempEntry;
Bill Buzbee27176222009-06-09 09:20:16 -07001314 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -07001315 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001316 unsigned int i;
1317
Ben Cheng3f02aa42009-08-14 13:52:09 -07001318 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -07001319 assert(size && !(size & (size - 1))); /* Is power of 2? */
1320
Ben Chenga4973592010-03-31 11:59:18 -07001321 LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
Bill Buzbee27176222009-06-09 09:20:16 -07001322
1323 newMask = size - 1;
1324
1325 if (size <= gDvmJit.jitTableSize) {
1326 return true;
1327 }
1328
Bill Buzbee964a7b02010-01-28 12:54:19 -08001329 /* Make sure requested size is compatible with chain field width */
1330 tempEntry.u.info.chain = size;
1331 if (tempEntry.u.info.chain != size) {
1332 LOGD("Jit: JitTable request of %d too big", size);
1333 return true;
1334 }
1335
Bill Buzbee716f1202009-07-23 13:22:09 -07001336 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -07001337 if (pNewTable == NULL) {
1338 return true;
1339 }
1340 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -07001341 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -07001342 }
1343
1344 /* Stop all other interpreting/jit'ng threads */
Ben Chenga8e64a72009-10-20 13:01:36 -07001345 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001346
Bill Buzbee716f1202009-07-23 13:22:09 -07001347 pOldTable = gDvmJit.pJitEntryTable;
1348 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001349
1350 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -07001351 gDvmJit.pJitEntryTable = pNewTable;
1352 gDvmJit.jitTableSize = size;
1353 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -07001354 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -07001355
Bill Buzbee716f1202009-07-23 13:22:09 -07001356 for (i=0; i < oldSize; i++) {
1357 if (pOldTable[i].dPC) {
1358 JitEntry *p;
1359 u2 chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001360 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/,
1361 pOldTable[i].u.info.isMethodEntry);
Bill Buzbee964a7b02010-01-28 12:54:19 -08001362 p->codeAddress = pOldTable[i].codeAddress;
Bill Buzbee716f1202009-07-23 13:22:09 -07001363 /* We need to preserve the new chain field, but copy the rest */
Bill Buzbee716f1202009-07-23 13:22:09 -07001364 chain = p->u.info.chain;
1365 p->u = pOldTable[i].u;
1366 p->u.info.chain = chain;
Bill Buzbee716f1202009-07-23 13:22:09 -07001367 }
1368 }
buzbee2e152ba2010-12-15 16:32:35 -08001369
Bill Buzbee964a7b02010-01-28 12:54:19 -08001370 dvmUnlockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -07001371
1372 free(pOldTable);
1373
Bill Buzbee27176222009-06-09 09:20:16 -07001374 /* Restart the world */
Ben Chenga8e64a72009-10-20 13:01:36 -07001375 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001376
1377 return false;
1378}
1379
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001380/*
Ben Cheng60c24f42010-01-04 12:29:56 -08001381 * Reset the JitTable to the initial clean state.
1382 */
1383void dvmJitResetTable(void)
1384{
1385 JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1386 unsigned int size = gDvmJit.jitTableSize;
1387 unsigned int i;
1388
1389 dvmLockMutex(&gDvmJit.tableLock);
buzbee2e152ba2010-12-15 16:32:35 -08001390
1391 /* Note: If need to preserve any existing counts. Do so here. */
buzbee38c41342011-01-11 15:45:49 -08001392 if (gDvmJit.pJitTraceProfCounters) {
1393 for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) {
1394 if (gDvmJit.pJitTraceProfCounters->buckets[i])
1395 memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i],
1396 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES);
1397 }
1398 gDvmJit.pJitTraceProfCounters->next = 0;
buzbee2e152ba2010-12-15 16:32:35 -08001399 }
buzbee2e152ba2010-12-15 16:32:35 -08001400
Ben Cheng60c24f42010-01-04 12:29:56 -08001401 memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1402 for (i=0; i< size; i++) {
1403 jitEntry[i].u.info.chain = size; /* Initialize chain termination */
1404 }
1405 gDvmJit.jitTableEntriesUsed = 0;
1406 dvmUnlockMutex(&gDvmJit.tableLock);
1407}
1408
1409/*
buzbee2e152ba2010-12-15 16:32:35 -08001410 * Return the address of the next trace profile counter. This address
1411 * will be embedded in the generated code for the trace, and thus cannot
1412 * change while the trace exists.
1413 */
1414JitTraceCounter_t *dvmJitNextTraceCounter()
1415{
1416 int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES;
1417 int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES;
1418 JitTraceCounter_t *res;
1419 /* Lazily allocate blocks of counters */
1420 if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) {
1421 JitTraceCounter_t *p =
1422 (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p));
1423 if (!p) {
1424 LOGE("Failed to allocate block of trace profile counters");
1425 dvmAbort();
1426 }
1427 gDvmJit.pJitTraceProfCounters->buckets[idx] = p;
1428 }
1429 res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem];
1430 gDvmJit.pJitTraceProfCounters->next++;
1431 return res;
1432}
1433
1434/*
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001435 * Float/double conversion requires clamping to min and max of integer form. If
1436 * target doesn't support this normally, use these.
1437 */
1438s8 dvmJitd2l(double d)
1439{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001440 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1441 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001442 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001443 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001444 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001445 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001446 else if (d != d) // NaN case
1447 return 0;
1448 else
1449 return (s8)d;
1450}
1451
1452s8 dvmJitf2l(float f)
1453{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001454 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1455 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001456 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001457 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001458 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001459 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001460 else if (f != f) // NaN case
1461 return 0;
1462 else
1463 return (s8)f;
1464}
1465
buzbee2e152ba2010-12-15 16:32:35 -08001466/* Should only be called by the compiler thread */
1467void dvmJitChangeProfileMode(TraceProfilingModes newState)
1468{
1469 if (gDvmJit.profileMode != newState) {
1470 gDvmJit.profileMode = newState;
1471 dvmJitUnchainAll();
1472 }
1473}
1474
1475void dvmJitTraceProfilingOn()
1476{
1477 if (gDvmJit.profileMode == kTraceProfilingPeriodicOff)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001478 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1479 (void*) kTraceProfilingPeriodicOn);
buzbee2e152ba2010-12-15 16:32:35 -08001480 else if (gDvmJit.profileMode == kTraceProfilingDisabled)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001481 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1482 (void*) kTraceProfilingContinuous);
buzbee2e152ba2010-12-15 16:32:35 -08001483}
1484
1485void dvmJitTraceProfilingOff()
1486{
1487 if (gDvmJit.profileMode == kTraceProfilingPeriodicOn)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001488 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1489 (void*) kTraceProfilingPeriodicOff);
buzbee2e152ba2010-12-15 16:32:35 -08001490 else if (gDvmJit.profileMode == kTraceProfilingContinuous)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001491 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1492 (void*) kTraceProfilingDisabled);
buzbee2e152ba2010-12-15 16:32:35 -08001493}
1494
Ben Chengba4fc8b2009-06-01 13:00:29 -07001495#endif /* WITH_JIT */