blob: 82dd6b65c6366a2662e0a40a6820798bf27974f4 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
Dan Bornsteindf4daaf2010-12-01 14:23:44 -080025#include "libdex/DexOpcodes.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070026#include <unistd.h>
27#include <pthread.h>
28#include <sys/time.h>
29#include <signal.h>
30#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070031#include "compiler/CompilerUtility.h"
32#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070033#include <errno.h>
34
Jeff Hao97319a82009-08-12 16:57:15 -070035#if defined(WITH_SELF_VERIFICATION)
36/* Allocate space for per-thread ShadowSpace data structures */
37void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
38{
39 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
40 if (self->shadowSpace == NULL)
41 return NULL;
42
43 self->shadowSpace->registerSpaceSize = REG_SPACE;
44 self->shadowSpace->registerSpace =
45 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
46
47 return self->shadowSpace->registerSpace;
48}
49
50/* Free per-thread ShadowSpace data structures */
51void dvmSelfVerificationShadowSpaceFree(Thread* self)
52{
53 free(self->shadowSpace->registerSpace);
54 free(self->shadowSpace);
55}
56
57/*
buzbee9f601a92011-02-11 17:48:20 -080058 * Save out PC, FP, thread state, and registers to shadow space.
Jeff Hao97319a82009-08-12 16:57:15 -070059 * Return a pointer to the shadow space for JIT to use.
buzbee9f601a92011-02-11 17:48:20 -080060 *
61 * The set of saved state from the Thread structure is:
62 * pc (Dalvik PC)
63 * fp (Dalvik FP)
64 * retval
65 * method
66 * methodClassDex
67 * interpStackEnd
Jeff Hao97319a82009-08-12 16:57:15 -070068 */
buzbee9f601a92011-02-11 17:48:20 -080069void* dvmSelfVerificationSaveState(const u2* pc, u4* fp,
70 Thread* self, int targetTrace)
Jeff Hao97319a82009-08-12 16:57:15 -070071{
Jeff Hao97319a82009-08-12 16:57:15 -070072 ShadowSpace *shadowSpace = self->shadowSpace;
buzbee9f601a92011-02-11 17:48:20 -080073 unsigned preBytes = self->interpSave.method->outsSize*4 +
74 sizeof(StackSaveArea);
75 unsigned postBytes = self->interpSave.method->registersSize*4;
Jeff Hao97319a82009-08-12 16:57:15 -070076
77 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
78 // self->threadId, (int)pc, (int)fp);
79
80 if (shadowSpace->selfVerificationState != kSVSIdle) {
81 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
82 self->threadId, shadowSpace->selfVerificationState);
83 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -070084 LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
Jeff Hao97319a82009-08-12 16:57:15 -070085 }
86 shadowSpace->selfVerificationState = kSVSStart;
87
88 // Dynamically grow shadow register space if necessary
Ben Cheng11d8f142010-03-24 15:24:19 -070089 if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
Jeff Hao97319a82009-08-12 16:57:15 -070090 free(shadowSpace->registerSpace);
Ben Cheng11d8f142010-03-24 15:24:19 -070091 shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
Jeff Hao97319a82009-08-12 16:57:15 -070092 shadowSpace->registerSpace =
Ben Cheng11d8f142010-03-24 15:24:19 -070093 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
Jeff Hao97319a82009-08-12 16:57:15 -070094 }
95
96 // Remember original state
97 shadowSpace->startPC = pc;
98 shadowSpace->fp = fp;
buzbee9f601a92011-02-11 17:48:20 -080099 shadowSpace->retval = self->retval;
100 shadowSpace->interpStackEnd = self->interpStackEnd;
101
Ben Chengccd6c012009-10-15 14:52:45 -0700102 /*
103 * Store the original method here in case the trace ends with a
104 * return/invoke, the last method.
105 */
buzbee9f601a92011-02-11 17:48:20 -0800106 shadowSpace->method = self->interpSave.method;
107 shadowSpace->methodClassDex = self->interpSave.methodClassDex;
108
Jeff Hao97319a82009-08-12 16:57:15 -0700109 shadowSpace->shadowFP = shadowSpace->registerSpace +
110 shadowSpace->registerSpaceSize - postBytes/4;
111
buzbee9f601a92011-02-11 17:48:20 -0800112 self->interpSave.fp = (u4*)shadowSpace->shadowFP;
113 self->interpStackEnd = (u1*)shadowSpace->registerSpace;
Ben Chengf04b62a2011-04-13 17:08:29 -0700114 self->curFrame = self->interpSave.fp;
Jeff Hao97319a82009-08-12 16:57:15 -0700115
116 // Create a copy of the stack
117 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
118 preBytes+postBytes);
119
120 // Setup the shadowed heap space
121 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
122
123 // Reset trace length
124 shadowSpace->traceLength = 0;
125
126 return shadowSpace;
127}
128
129/*
130 * Save ending PC, FP and compiled code exit point to shadow space.
131 * Return a pointer to the shadow space for JIT to restore state.
132 */
buzbee9f601a92011-02-11 17:48:20 -0800133void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp,
134 SelfVerificationState exitState,
135 Thread* self)
Jeff Hao97319a82009-08-12 16:57:15 -0700136{
Jeff Hao97319a82009-08-12 16:57:15 -0700137 ShadowSpace *shadowSpace = self->shadowSpace;
138 shadowSpace->endPC = pc;
139 shadowSpace->endShadowFP = fp;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700140 shadowSpace->jitExitState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700141
142 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
143 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
144 // (int)pc);
145
146 if (shadowSpace->selfVerificationState != kSVSStart) {
147 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
148 self->threadId, shadowSpace->selfVerificationState);
149 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700150 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700151 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700152 LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
153 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700154 (int)shadowSpace->endShadowFP);
155 }
156
157 // Special case when punting after a single instruction
Ben Cheng7a2697d2010-06-07 13:44:23 -0700158 if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700159 shadowSpace->selfVerificationState = kSVSIdle;
160 } else {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700161 shadowSpace->selfVerificationState = exitState;
Jeff Hao97319a82009-08-12 16:57:15 -0700162 }
163
buzbee9f601a92011-02-11 17:48:20 -0800164 /* Restore state before returning */
165 self->interpSave.pc = shadowSpace->startPC;
166 self->interpSave.fp = shadowSpace->fp;
Ben Chengf04b62a2011-04-13 17:08:29 -0700167 self->curFrame = self->interpSave.fp;
buzbee9f601a92011-02-11 17:48:20 -0800168 self->interpSave.method = shadowSpace->method;
169 self->interpSave.methodClassDex = shadowSpace->methodClassDex;
170 self->retval = shadowSpace->retval;
171 self->interpStackEnd = shadowSpace->interpStackEnd;
172
Jeff Hao97319a82009-08-12 16:57:15 -0700173 return shadowSpace;
174}
175
176/* Print contents of virtual registers */
Ben Chengccd6c012009-10-15 14:52:45 -0700177static void selfVerificationPrintRegisters(int* addr, int* addrRef,
178 int numWords)
Jeff Hao97319a82009-08-12 16:57:15 -0700179{
180 int i;
181 for (i = 0; i < numWords; i++) {
Ben Chengccd6c012009-10-15 14:52:45 -0700182 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
Jeff Hao97319a82009-08-12 16:57:15 -0700183 }
184}
185
186/* Print values maintained in shadowSpace */
187static void selfVerificationDumpState(const u2* pc, Thread* self)
188{
189 ShadowSpace* shadowSpace = self->shadowSpace;
190 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
191 int frameBytes = (int) shadowSpace->registerSpace +
192 shadowSpace->registerSpaceSize*4 -
193 (int) shadowSpace->shadowFP;
194 int localRegs = 0;
195 int frameBytes2 = 0;
buzbee9f601a92011-02-11 17:48:20 -0800196 if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
Jeff Hao97319a82009-08-12 16:57:15 -0700197 localRegs = (stackSave->method->registersSize -
198 stackSave->method->insSize)*4;
199 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
200 }
201 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700202 LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
Jeff Hao97319a82009-08-12 16:57:15 -0700203 (int)(pc - stackSave->method->insns));
Ben Chengccd6c012009-10-15 14:52:45 -0700204 LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
205 LOGD("Method: %s", shadowSpace->method->name);
206 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700207 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700208 LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
Jeff Hao97319a82009-08-12 16:57:15 -0700209 (int)self->curFrame);
Ben Chengccd6c012009-10-15 14:52:45 -0700210 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700211 (int)shadowSpace->endShadowFP);
Ben Chengccd6c012009-10-15 14:52:45 -0700212 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
Jeff Hao97319a82009-08-12 16:57:15 -0700213 localRegs, frameBytes2);
Ben Chengccd6c012009-10-15 14:52:45 -0700214 LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
Jeff Hao97319a82009-08-12 16:57:15 -0700215 shadowSpace->selfVerificationState);
216}
217
218/* Print decoded instructions in the current trace */
219static void selfVerificationDumpTrace(const u2* pc, Thread* self)
220{
221 ShadowSpace* shadowSpace = self->shadowSpace;
222 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700223 int i, addr, offset;
224 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700225
226 LOGD("********** SHADOW TRACE DUMP **********");
227 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700228 addr = shadowSpace->trace[i].addr;
229 offset = (int)((u2*)addr - stackSave->method->insns);
230 decInsn = &(shadowSpace->trace[i].decInsn);
231 /* Not properly decoding instruction, some registers may be garbage */
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700232 LOGD("0x%x: (0x%04x) %s",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800233 addr, offset, dexGetOpcodeName(decInsn->opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700234 }
235}
236
Ben Chengbcdc1de2009-08-21 16:18:46 -0700237/* Code is forced into this spin loop when a divergence is detected */
Ben Chengccd6c012009-10-15 14:52:45 -0700238static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
Ben Chengbcdc1de2009-08-21 16:18:46 -0700239{
Ben Chengccd6c012009-10-15 14:52:45 -0700240 const u2 *startPC = shadowSpace->startPC;
Ben Cheng88a0f972010-02-24 15:00:40 -0800241 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
Ben Chengccd6c012009-10-15 14:52:45 -0700242 if (desc) {
243 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
Ben Cheng1357e942010-02-10 17:21:39 -0800244 /*
245 * This function effectively terminates the VM right here, so not
246 * freeing the desc pointer when the enqueuing fails is acceptable.
247 */
Ben Chengccd6c012009-10-15 14:52:45 -0700248 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700249 gDvmJit.selfVerificationSpin = true;
250 while(gDvmJit.selfVerificationSpin) sleep(10);
251}
252
buzbee9a3147c2011-03-02 15:43:48 -0800253/*
254 * If here, we're re-interpreting an instruction that was included
255 * in a trace that was just executed. This routine is called for
256 * each instruction in the original trace, and compares state
257 * when it reaches the end point.
258 *
259 * TUNING: the interpretation mechanism now supports a counted
260 * single-step mechanism. If we were to associate an instruction
261 * count with each trace exit, we could just single-step the right
262 * number of cycles and then compare. This would improve detection
263 * of control divergences, as well as (slightly) simplify this code.
264 */
265void dvmCheckSelfVerification(const u2* pc, Thread* self)
Jeff Hao97319a82009-08-12 16:57:15 -0700266{
267 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700268 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700269
270 DecodedInstruction decInsn;
Dan Bornstein54322392010-11-17 14:16:56 -0800271 dexDecodeInstruction(pc, &decInsn);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700272
Jeff Hao97319a82009-08-12 16:57:15 -0700273 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
274 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800275 // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode));
Jeff Hao97319a82009-08-12 16:57:15 -0700276
277 if (state == kSVSIdle || state == kSVSStart) {
278 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
279 self->threadId, state);
280 selfVerificationDumpState(pc, self);
281 selfVerificationDumpTrace(pc, self);
282 }
283
Ben Chengd5adae12010-03-26 17:45:28 -0700284 /*
Ben Chengf04b62a2011-04-13 17:08:29 -0700285 * Generalize the self verification state to kSVSDebugInterp unless the
286 * entry reason is kSVSBackwardBranch or kSVSSingleStep.
Ben Chengd5adae12010-03-26 17:45:28 -0700287 */
Ben Chengf04b62a2011-04-13 17:08:29 -0700288 if (state != kSVSBackwardBranch && state != kSVSSingleStep) {
Jeff Hao97319a82009-08-12 16:57:15 -0700289 shadowSpace->selfVerificationState = kSVSDebugInterp;
290 }
291
Ben Chengf04b62a2011-04-13 17:08:29 -0700292 /*
293 * Check that the current pc is the end of the trace when at least one
294 * instruction is interpreted.
295 */
296 if ((state == kSVSDebugInterp || state == kSVSSingleStep ||
297 state == kSVSBackwardBranch) &&
298 shadowSpace->traceLength != 0 &&
Ben Chengd5adae12010-03-26 17:45:28 -0700299 pc == shadowSpace->endPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700300
301 shadowSpace->selfVerificationState = kSVSIdle;
302
303 /* Check register space */
304 int frameBytes = (int) shadowSpace->registerSpace +
305 shadowSpace->registerSpaceSize*4 -
306 (int) shadowSpace->shadowFP;
307 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
Ben Chengf04b62a2011-04-13 17:08:29 -0700308 if (state == kSVSBackwardBranch) {
309 /* State mismatch on backward branch - try one more iteration */
310 shadowSpace->selfVerificationState = kSVSDebugInterp;
311 goto log_and_continue;
312 }
Ben Chengccd6c012009-10-15 14:52:45 -0700313 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
Jeff Hao97319a82009-08-12 16:57:15 -0700314 selfVerificationDumpState(pc, self);
315 selfVerificationDumpTrace(pc, self);
316 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
317 (int)shadowSpace->fp, frameBytes);
Ben Chengccd6c012009-10-15 14:52:45 -0700318 selfVerificationPrintRegisters((int*)shadowSpace->fp,
319 (int*)shadowSpace->shadowFP,
320 frameBytes/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700321 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
322 (int)shadowSpace->shadowFP, frameBytes);
323 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700324 (int*)shadowSpace->fp,
325 frameBytes/4);
326 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700327 }
328 /* Check new frame if it exists (invokes only) */
buzbee9f601a92011-02-11 17:48:20 -0800329 if ((uintptr_t)self->curFrame < (uintptr_t)shadowSpace->fp) {
Jeff Hao97319a82009-08-12 16:57:15 -0700330 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
331 int localRegs = (stackSave->method->registersSize -
332 stackSave->method->insSize)*4;
333 int frameBytes2 = (int) shadowSpace->fp -
334 (int) self->curFrame - localRegs;
335 if (memcmp(((char*)self->curFrame)+localRegs,
336 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
Ben Chengf04b62a2011-04-13 17:08:29 -0700337 if (state == kSVSBackwardBranch) {
338 /*
339 * State mismatch on backward branch - try one more
340 * iteration.
341 */
342 shadowSpace->selfVerificationState = kSVSDebugInterp;
343 goto log_and_continue;
344 }
Ben Chengccd6c012009-10-15 14:52:45 -0700345 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
Jeff Hao97319a82009-08-12 16:57:15 -0700346 self->threadId);
347 selfVerificationDumpState(pc, self);
348 selfVerificationDumpTrace(pc, self);
349 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
350 (int)self->curFrame, localRegs, frameBytes2);
351 selfVerificationPrintRegisters((int*)self->curFrame,
Ben Chengccd6c012009-10-15 14:52:45 -0700352 (int*)shadowSpace->endShadowFP,
353 (frameBytes2+localRegs)/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700354 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
355 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
356 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700357 (int*)self->curFrame,
358 (frameBytes2+localRegs)/4);
359 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700360 }
361 }
362
363 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700364 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700365 ShadowHeap* heapSpacePtr;
366 for (heapSpacePtr = shadowSpace->heapSpace;
367 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700368 int memData = *((unsigned int*) heapSpacePtr->addr);
369 if (heapSpacePtr->data != memData) {
Ben Chengf04b62a2011-04-13 17:08:29 -0700370 if (state == kSVSBackwardBranch) {
371 /*
372 * State mismatch on backward branch - try one more
373 * iteration.
374 */
375 shadowSpace->selfVerificationState = kSVSDebugInterp;
376 goto log_and_continue;
377 }
Ben Chengccd6c012009-10-15 14:52:45 -0700378 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
379 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700380 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700381 selfVerificationDumpState(pc, self);
382 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700383 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700384 }
385 }
Ben Chengccd6c012009-10-15 14:52:45 -0700386 if (memDiff) selfVerificationSpinLoop(shadowSpace);
Ben Chengd5adae12010-03-26 17:45:28 -0700387
buzbee9a3147c2011-03-02 15:43:48 -0800388
Ben Chengd5adae12010-03-26 17:45:28 -0700389 /*
buzbee9a3147c2011-03-02 15:43:48 -0800390 * Success. If this shadowed trace included a single-stepped
391 * instruction, we need to stay in the interpreter for one
392 * more interpretation before resuming.
Ben Chengd5adae12010-03-26 17:45:28 -0700393 */
394 if (state == kSVSSingleStep) {
buzbee9a3147c2011-03-02 15:43:48 -0800395 assert(self->jitResumeNPC != NULL);
396 assert(self->singleStepCount == 0);
397 self->singleStepCount = 1;
398 dvmUpdateInterpBreak(self, kInterpSingleStep, kSubModeNormal,
399 true /* enable */);
Ben Chengd5adae12010-03-26 17:45:28 -0700400 }
buzbee9a3147c2011-03-02 15:43:48 -0800401
402 /*
403 * Switch off shadow replay mode. The next shadowed trace
404 * execution will turn it back on.
405 */
406 dvmUpdateInterpBreak(self, kInterpJitBreak, kSubModeJitSV,
407 false /* disable */);
408 self->jitState = kJitDone;
409 return;
Ben Chengf04b62a2011-04-13 17:08:29 -0700410 }
411log_and_continue:
Jeff Hao97319a82009-08-12 16:57:15 -0700412 /* If end not been reached, make sure max length not exceeded */
Ben Chengf04b62a2011-04-13 17:08:29 -0700413 if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
Jeff Hao97319a82009-08-12 16:57:15 -0700414 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
Ben Chengccd6c012009-10-15 14:52:45 -0700415 LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
Jeff Hao97319a82009-08-12 16:57:15 -0700416 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
417 selfVerificationDumpState(pc, self);
418 selfVerificationDumpTrace(pc, self);
Ben Chengccd6c012009-10-15 14:52:45 -0700419 selfVerificationSpinLoop(shadowSpace);
buzbee9a3147c2011-03-02 15:43:48 -0800420 return;
Jeff Hao97319a82009-08-12 16:57:15 -0700421 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700422 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700423 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700424 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700425 shadowSpace->traceLength++;
Jeff Hao97319a82009-08-12 16:57:15 -0700426}
427#endif
428
Ben Chengba4fc8b2009-06-01 13:00:29 -0700429/*
430 * If one of our fixed tables or the translation buffer fills up,
431 * call this routine to avoid wasting cycles on future translation requests.
432 */
433void dvmJitStopTranslationRequests()
434{
435 /*
436 * Note 1: This won't necessarily stop all translation requests, and
437 * operates on a delayed mechanism. Running threads look to the copy
buzbee9f601a92011-02-11 17:48:20 -0800438 * of this value in their private thread structures and won't see
Ben Chengba4fc8b2009-06-01 13:00:29 -0700439 * this change until it is refreshed (which happens on interpreter
440 * entry).
441 * Note 2: This is a one-shot memory leak on this table. Because this is a
442 * permanent off switch for Jit profiling, it is a one-time leak of 1K
443 * bytes, and no further attempt will be made to re-allocate it. Can't
444 * free it because some thread may be holding a reference.
445 */
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800446 gDvmJit.pProfTable = NULL;
buzbee99e3e6e2011-03-29 10:26:07 -0700447 dvmJitUpdateThreadStateAll();
Ben Chengba4fc8b2009-06-01 13:00:29 -0700448}
449
Ben Cheng978738d2010-05-13 13:45:57 -0700450#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700451/* Convenience function to increment counter from assembly code */
Ben Cheng6c10a972009-10-29 14:39:18 -0700452void dvmBumpNoChain(int from)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700453{
Ben Cheng6c10a972009-10-29 14:39:18 -0700454 gDvmJit.noChainExit[from]++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700455}
456
457/* Convenience function to increment counter from assembly code */
458void dvmBumpNormal()
459{
Ben Cheng6c10a972009-10-29 14:39:18 -0700460 gDvmJit.normalExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700461}
462
463/* Convenience function to increment counter from assembly code */
464void dvmBumpPunt(int from)
465{
Ben Cheng6c10a972009-10-29 14:39:18 -0700466 gDvmJit.puntExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700467}
468#endif
469
470/* Dumps debugging & tuning stats to the log */
471void dvmJitStats()
472{
473 int i;
474 int hit;
475 int not_hit;
476 int chains;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800477 int stubs;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700478 if (gDvmJit.pJitEntryTable) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800479 for (i=0, stubs=chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700480 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700481 i++) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800482 if (gDvmJit.pJitEntryTable[i].dPC != 0) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700483 hit++;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800484 if (gDvmJit.pJitEntryTable[i].codeAddress ==
Bill Buzbeebd047242010-05-13 13:02:53 -0700485 dvmCompilerGetInterpretTemplate())
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800486 stubs++;
487 } else
Ben Chengba4fc8b2009-06-01 13:00:29 -0700488 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700489 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700490 chains++;
491 }
Ben Cheng72621c92010-03-10 13:12:55 -0800492 LOGD("JIT: table size is %d, entries used is %d",
Ben Cheng86717f72010-03-05 15:27:21 -0800493 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed);
Ben Cheng72621c92010-03-10 13:12:55 -0800494 LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
495 hit, not_hit + hit, chains, gDvmJit.threshold,
496 gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
Ben Cheng86717f72010-03-05 15:27:21 -0800497
Ben Cheng978738d2010-05-13 13:45:57 -0700498#if defined(WITH_JIT_TUNING)
499 LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
500
Ben Cheng72621c92010-03-10 13:12:55 -0800501 LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
502 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
503 gDvmJit.normalExit, gDvmJit.puntExit);
Ben Cheng452efba2010-04-30 15:14:00 -0700504
Ben Cheng978738d2010-05-13 13:45:57 -0700505 LOGD("JIT: ICHits: %d", gDvmICHitCount);
506
Ben Cheng72621c92010-03-10 13:12:55 -0800507 LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
508 "%d switch overflow",
509 gDvmJit.noChainExit[kInlineCacheMiss],
510 gDvmJit.noChainExit[kCallsiteInterpreted],
511 gDvmJit.noChainExit[kSwitchOverflow]);
Ben Cheng86717f72010-03-05 15:27:21 -0800512
Ben Chengb88ec3c2010-05-17 12:50:33 -0700513 LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
514 "%d dropped",
515 gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
516 gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
Ben Cheng452efba2010-04-30 15:14:00 -0700517 gDvmJit.icPatchDropped);
518
Ben Cheng86717f72010-03-05 15:27:21 -0800519 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
520 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
521 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700522 LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter",
523 gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined,
524 gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
Ben Cheng86717f72010-03-05 15:27:21 -0800525 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
526 LOGD("JIT: Avg unit compilation time: %llu us",
Andy McFaddenb7a797d2011-02-24 16:55:40 -0800527 gDvmJit.numCompilations == 0 ? 0 :
Ben Cheng86717f72010-03-05 15:27:21 -0800528 gDvmJit.jitTime / gDvmJit.numCompilations);
Ben Cheng385828e2011-03-04 16:48:33 -0800529 LOGD("JIT: Potential GC blocked by compiler: max %llu us / "
530 "avg %llu us (%d)",
531 gDvmJit.maxCompilerThreadBlockGCTime,
532 gDvmJit.numCompilerThreadBlockGC == 0 ?
533 0 : gDvmJit.compilerThreadBlockGCTime /
534 gDvmJit.numCompilerThreadBlockGC,
535 gDvmJit.numCompilerThreadBlockGC);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700536#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800537
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800538 LOGD("JIT: %d Translation chains, %d interp stubs",
539 gDvmJit.translationChains, stubs);
buzbee2e152ba2010-12-15 16:32:35 -0800540 if (gDvmJit.profileMode == kTraceProfilingContinuous) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700541 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700542 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700543 }
544}
545
Bill Buzbee716f1202009-07-23 13:22:09 -0700546
buzbee9a3147c2011-03-02 15:43:48 -0800547/* End current trace now & don't include current instruction */
548void dvmJitEndTraceSelect(Thread* self, const u2* dPC)
Bill Buzbeed7269912009-11-10 14:31:32 -0800549{
buzbee9a3147c2011-03-02 15:43:48 -0800550 if (self->jitState == kJitTSelect) {
buzbee9f601a92011-02-11 17:48:20 -0800551 self->jitState = kJitTSelectEnd;
buzbee9a3147c2011-03-02 15:43:48 -0800552 }
553 if (self->jitState == kJitTSelectEnd) {
554 // Clean up and finish now.
555 dvmCheckJit(dPC, self);
556 }
Bill Buzbeed7269912009-11-10 14:31:32 -0800557}
558
Ben Chengba4fc8b2009-06-01 13:00:29 -0700559/*
Bill Buzbee964a7b02010-01-28 12:54:19 -0800560 * Find an entry in the JitTable, creating if necessary.
561 * Returns null if table is full.
562 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800563static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked,
564 bool isMethodEntry)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800565{
566 u4 chainEndMarker = gDvmJit.jitTableSize;
567 u4 idx = dvmJitHash(dPC);
568
Ben Chengcfdeca32011-01-14 11:36:46 -0800569 /*
570 * Walk the bucket chain to find an exact match for our PC and trace/method
571 * type
572 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800573 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
Ben Chengcfdeca32011-01-14 11:36:46 -0800574 ((gDvmJit.pJitEntryTable[idx].dPC != dPC) ||
575 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry !=
576 isMethodEntry))) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800577 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
578 }
579
Ben Chengcfdeca32011-01-14 11:36:46 -0800580 if (gDvmJit.pJitEntryTable[idx].dPC != dPC ||
581 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800582 /*
583 * No match. Aquire jitTableLock and find the last
584 * slot in the chain. Possibly continue the chain walk in case
585 * some other thread allocated the slot we were looking
586 * at previuosly (perhaps even the dPC we're trying to enter).
587 */
588 if (!callerLocked)
589 dvmLockMutex(&gDvmJit.tableLock);
590 /*
591 * At this point, if .dPC is NULL, then the slot we're
592 * looking at is the target slot from the primary hash
593 * (the simple, and common case). Otherwise we're going
594 * to have to find a free slot and chain it.
595 */
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700596 ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800597 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
598 u4 prev;
599 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800600 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
601 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
602 isMethodEntry) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800603 /* Another thread got there first for this dPC */
604 if (!callerLocked)
605 dvmUnlockMutex(&gDvmJit.tableLock);
606 return &gDvmJit.pJitEntryTable[idx];
607 }
608 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
609 }
610 /* Here, idx should be pointing to the last cell of an
611 * active chain whose last member contains a valid dPC */
612 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
613 /* Linear walk to find a free cell and add it to the end */
614 prev = idx;
615 while (true) {
616 idx++;
617 if (idx == chainEndMarker)
618 idx = 0; /* Wraparound */
619 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
620 (idx == prev))
621 break;
622 }
623 if (idx != prev) {
624 JitEntryInfoUnion oldValue;
625 JitEntryInfoUnion newValue;
626 /*
627 * Although we hold the lock so that noone else will
628 * be trying to update a chain field, the other fields
629 * packed into the word may be in use by other threads.
630 */
631 do {
632 oldValue = gDvmJit.pJitEntryTable[prev].u;
633 newValue = oldValue;
634 newValue.info.chain = idx;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700635 } while (android_atomic_release_cas(oldValue.infoWord,
636 newValue.infoWord,
637 &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800638 }
639 }
640 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
Ben Chengcfdeca32011-01-14 11:36:46 -0800641 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800642 /*
643 * Initialize codeAddress and allocate the slot. Must
644 * happen in this order (since dPC is set, the entry is live.
645 */
Ben Chengcfdeca32011-01-14 11:36:46 -0800646 android_atomic_release_store((int32_t)dPC,
647 (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800648 gDvmJit.pJitEntryTable[idx].dPC = dPC;
649 gDvmJit.jitTableEntriesUsed++;
650 } else {
651 /* Table is full */
652 idx = chainEndMarker;
653 }
654 if (!callerLocked)
655 dvmUnlockMutex(&gDvmJit.tableLock);
656 }
657 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
658}
Ben Chenga4973592010-03-31 11:59:18 -0700659
buzbeed82cebc2011-03-14 12:25:24 -0700660/* Dump a trace description */
661void dvmJitDumpTraceDesc(JitTraceDescription *trace)
662{
663 int i;
664 bool done = false;
665 const u2* dpc;
666 const u2* dpcBase;
667 int curFrag = 0;
668 LOGD("===========================================");
buzbee9a3147c2011-03-02 15:43:48 -0800669 LOGD("Trace dump 0x%x, Method %s off 0x%x",(int)trace,
buzbeed82cebc2011-03-14 12:25:24 -0700670 trace->method->name,trace->trace[curFrag].info.frag.startOffset);
671 dpcBase = trace->method->insns;
672 while (!done) {
673 DecodedInstruction decInsn;
674 if (trace->trace[curFrag].isCode) {
675 LOGD("Frag[%d]- Insts: %d, start: 0x%x, hint: 0x%x, end: %d",
676 curFrag, trace->trace[curFrag].info.frag.numInsts,
677 trace->trace[curFrag].info.frag.startOffset,
678 trace->trace[curFrag].info.frag.hint,
679 trace->trace[curFrag].info.frag.runEnd);
680 dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset;
681 for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) {
682 dexDecodeInstruction(dpc, &decInsn);
buzbee9a3147c2011-03-02 15:43:48 -0800683 LOGD(" 0x%04x - %s 0x%x",(dpc-dpcBase),
684 dexGetOpcodeName(decInsn.opcode),(int)dpc);
buzbeed82cebc2011-03-14 12:25:24 -0700685 dpc += dexGetWidthFromOpcode(decInsn.opcode);
686 }
687 if (trace->trace[curFrag].info.frag.runEnd) {
688 done = true;
689 }
690 } else {
691 LOGD("Frag[%d]- META info: 0x%08x", curFrag,
692 (int)trace->trace[curFrag].info.meta);
693 }
694 curFrag++;
695 }
696 LOGD("-------------------------------------------");
697}
698
Bill Buzbee964a7b02010-01-28 12:54:19 -0800699/*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700700 * Append the class ptr of "this" and the current method ptr to the current
701 * trace. That is, the trace runs will contain the following components:
702 * + trace run that ends with an invoke (existing entry)
703 * + thisClass (new)
704 * + calleeMethod (new)
705 */
buzbee9f601a92011-02-11 17:48:20 -0800706static void insertClassMethodInfo(Thread* self,
Ben Cheng7a2697d2010-06-07 13:44:23 -0700707 const ClassObject* thisClass,
708 const Method* calleeMethod,
709 const DecodedInstruction* insn)
710{
buzbee9f601a92011-02-11 17:48:20 -0800711 int currTraceRun = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800712 self->trace[currTraceRun].info.meta = thisClass ?
713 (void *) thisClass->descriptor : NULL;
714 self->trace[currTraceRun].isCode = false;
715
buzbee9f601a92011-02-11 17:48:20 -0800716 currTraceRun = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800717 self->trace[currTraceRun].info.meta = thisClass ?
718 (void *) thisClass->classLoader : NULL;
719 self->trace[currTraceRun].isCode = false;
720
721 currTraceRun = ++self->currTraceRun;
722 self->trace[currTraceRun].info.meta = (void *) calleeMethod;
723 self->trace[currTraceRun].isCode = false;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700724}
725
726/*
Ben Chengd44faf52010-06-02 15:33:51 -0700727 * Check if the next instruction following the invoke is a move-result and if
Ben Cheng7a2697d2010-06-07 13:44:23 -0700728 * so add it to the trace. That is, this will add the trace run that includes
729 * the move-result to the trace list.
730 *
731 * + trace run that ends with an invoke (existing entry)
732 * + thisClass (existing entry)
733 * + calleeMethod (existing entry)
734 * + move result (new)
Ben Chengd44faf52010-06-02 15:33:51 -0700735 *
736 * lastPC, len, offset are all from the preceding invoke instruction
737 */
738static void insertMoveResult(const u2 *lastPC, int len, int offset,
buzbee9f601a92011-02-11 17:48:20 -0800739 Thread *self)
Ben Chengd44faf52010-06-02 15:33:51 -0700740{
741 DecodedInstruction nextDecInsn;
742 const u2 *moveResultPC = lastPC + len;
743
Dan Bornstein54322392010-11-17 14:16:56 -0800744 dexDecodeInstruction(moveResultPC, &nextDecInsn);
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800745 if ((nextDecInsn.opcode != OP_MOVE_RESULT) &&
746 (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) &&
747 (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT))
Ben Chengd44faf52010-06-02 15:33:51 -0700748 return;
749
750 /* We need to start a new trace run */
buzbee9f601a92011-02-11 17:48:20 -0800751 int currTraceRun = ++self->currTraceRun;
752 self->currRunHead = moveResultPC;
Ben Cheng385828e2011-03-04 16:48:33 -0800753 self->trace[currTraceRun].info.frag.startOffset = offset + len;
754 self->trace[currTraceRun].info.frag.numInsts = 1;
755 self->trace[currTraceRun].info.frag.runEnd = false;
756 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
757 self->trace[currTraceRun].isCode = true;
buzbee9f601a92011-02-11 17:48:20 -0800758 self->totalTraceLen++;
Ben Chengd44faf52010-06-02 15:33:51 -0700759
buzbee9f601a92011-02-11 17:48:20 -0800760 self->currRunLen = dexGetWidthFromInstruction(moveResultPC);
Ben Chengd44faf52010-06-02 15:33:51 -0700761}
762
763/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700764 * Adds to the current trace request one instruction at a time, just
765 * before that instruction is interpreted. This is the primary trace
766 * selection function. NOTE: return instruction are handled a little
767 * differently. In general, instructions are "proposed" to be added
768 * to the current trace prior to interpretation. If the interpreter
769 * then successfully completes the instruction, is will be considered
770 * part of the request. This allows us to examine machine state prior
771 * to interpretation, and also abort the trace request if the instruction
772 * throws or does something unexpected. However, return instructions
773 * will cause an immediate end to the translation request - which will
774 * be passed to the compiler before the return completes. This is done
775 * in response to special handling of returns by the interpreter (and
776 * because returns cannot throw in a way that causes problems for the
777 * translated code.
778 */
buzbee9a3147c2011-03-02 15:43:48 -0800779void dvmCheckJit(const u2* pc, Thread* self)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700780{
buzbee9a3147c2011-03-02 15:43:48 -0800781 const ClassObject *thisClass = self->callsiteClass;
782 const Method* curMethod = self->methodToCall;
Carl Shapiroe3c01da2010-05-20 22:54:18 -0700783 int flags, len;
buzbee9a3147c2011-03-02 15:43:48 -0800784 int allDone = false;
785 /* Stay in break/single-stop mode for the next instruction */
Ben Cheng7a2697d2010-06-07 13:44:23 -0700786 bool stayOneMoreInst = false;
Bill Buzbeed7269912009-11-10 14:31:32 -0800787
buzbee9a3147c2011-03-02 15:43:48 -0800788 /* Prepare to handle last PC and stage the current PC & method*/
buzbee9f601a92011-02-11 17:48:20 -0800789 const u2 *lastPC = self->lastPC;
buzbee9a3147c2011-03-02 15:43:48 -0800790
buzbee9f601a92011-02-11 17:48:20 -0800791 self->lastPC = pc;
Ben Cheng79d173c2009-09-29 16:12:51 -0700792
buzbee9f601a92011-02-11 17:48:20 -0800793 switch (self->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700794 int offset;
795 DecodedInstruction decInsn;
796 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700797 /* First instruction - just remember the PC and exit */
798 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700799 /* Grow the trace around the last PC if jitState is kJitTSelect */
Dan Bornstein54322392010-11-17 14:16:56 -0800800 dexDecodeInstruction(lastPC, &decInsn);
Ben Cheng6c10a972009-10-29 14:39:18 -0700801
802 /*
803 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
804 * to the amount of space it takes to generate the chaining
805 * cells.
806 */
buzbee9f601a92011-02-11 17:48:20 -0800807 if (self->totalTraceLen != 0 &&
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800808 (decInsn.opcode == OP_PACKED_SWITCH ||
809 decInsn.opcode == OP_SPARSE_SWITCH)) {
buzbee9f601a92011-02-11 17:48:20 -0800810 self->jitState = kJitTSelectEnd;
Ben Cheng6c10a972009-10-29 14:39:18 -0700811 break;
812 }
813
Ben Chengba4fc8b2009-06-01 13:00:29 -0700814#if defined(SHOW_TRACE)
buzbee9a3147c2011-03-02 15:43:48 -0800815 LOGD("TraceGen: adding %s. lpc:0x%x, pc:0x%x",
816 dexGetOpcodeName(decInsn.opcode), (int)lastPC, (int)pc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700817#endif
Dan Bornsteine4852762010-12-02 12:45:00 -0800818 flags = dexGetFlagsFromOpcode(decInsn.opcode);
819 len = dexGetWidthFromInstruction(lastPC);
buzbee9a3147c2011-03-02 15:43:48 -0800820 offset = lastPC - self->traceMethod->insns;
Ben Cheng79d173c2009-09-29 16:12:51 -0700821 assert((unsigned) offset <
buzbee9a3147c2011-03-02 15:43:48 -0800822 dvmGetMethodInsnsSize(self->traceMethod));
buzbee9f601a92011-02-11 17:48:20 -0800823 if (lastPC != self->currRunHead + self->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700824 int currTraceRun;
825 /* We need to start a new trace run */
buzbee9f601a92011-02-11 17:48:20 -0800826 currTraceRun = ++self->currTraceRun;
827 self->currRunLen = 0;
828 self->currRunHead = (u2*)lastPC;
Ben Cheng385828e2011-03-04 16:48:33 -0800829 self->trace[currTraceRun].info.frag.startOffset = offset;
830 self->trace[currTraceRun].info.frag.numInsts = 0;
831 self->trace[currTraceRun].info.frag.runEnd = false;
832 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
833 self->trace[currTraceRun].isCode = true;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700834 }
Ben Cheng385828e2011-03-04 16:48:33 -0800835 self->trace[self->currTraceRun].info.frag.numInsts++;
buzbee9f601a92011-02-11 17:48:20 -0800836 self->totalTraceLen++;
837 self->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700838
Ben Chengd44faf52010-06-02 15:33:51 -0700839 /*
840 * If the last instruction is an invoke, we will try to sneak in
841 * the move-result* (if existent) into a separate trace run.
842 */
Carl Shapiro1813ab22011-04-15 15:48:54 -0700843 {
844 int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
Ben Chengd44faf52010-06-02 15:33:51 -0700845
Carl Shapiro1813ab22011-04-15 15:48:54 -0700846 /* Will probably never hit this with the current trace builder */
847 if (self->currTraceRun ==
848 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
buzbee9f601a92011-02-11 17:48:20 -0800849 self->jitState = kJitTSelectEnd;
Carl Shapiro1813ab22011-04-15 15:48:54 -0700850 }
Ben Cheng79d173c2009-09-29 16:12:51 -0700851 }
852
Dan Bornsteinc2b486f2010-11-12 16:07:16 -0800853 if (!dexIsGoto(flags) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700854 ((flags & (kInstrCanBranch |
855 kInstrCanSwitch |
856 kInstrCanReturn |
857 kInstrInvoke)) != 0)) {
buzbee9f601a92011-02-11 17:48:20 -0800858 self->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700859#if defined(SHOW_TRACE)
Ben Chengd44faf52010-06-02 15:33:51 -0700860 LOGD("TraceGen: ending on %s, basic block end",
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800861 dexGetOpcodeName(decInsn.opcode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700862#endif
Ben Chengd44faf52010-06-02 15:33:51 -0700863
864 /*
Ben Cheng7a2697d2010-06-07 13:44:23 -0700865 * If the current invoke is a {virtual,interface}, get the
866 * current class/method pair into the trace as well.
Ben Chengd44faf52010-06-02 15:33:51 -0700867 * If the next instruction is a variant of move-result, insert
Ben Cheng7a2697d2010-06-07 13:44:23 -0700868 * it to the trace too.
Ben Chengd44faf52010-06-02 15:33:51 -0700869 */
870 if (flags & kInstrInvoke) {
buzbee9f601a92011-02-11 17:48:20 -0800871 insertClassMethodInfo(self, thisClass, curMethod,
Ben Cheng7a2697d2010-06-07 13:44:23 -0700872 &decInsn);
buzbee9f601a92011-02-11 17:48:20 -0800873 insertMoveResult(lastPC, len, offset, self);
Ben Chengd44faf52010-06-02 15:33:51 -0700874 }
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700875 }
Bill Buzbee2ce8a6c2009-12-03 15:09:32 -0800876 /* Break on throw or self-loop */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800877 if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){
buzbee9f601a92011-02-11 17:48:20 -0800878 self->jitState = kJitTSelectEnd;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700879 }
buzbee9f601a92011-02-11 17:48:20 -0800880 if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) {
881 self->jitState = kJitTSelectEnd;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700882 }
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700883 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
884 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700885 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700886 else {
887 /*
888 * Last instruction is a return - stay in the dbg interpreter
889 * for one more instruction if it is a non-void return, since
890 * we don't want to start a trace with move-result as the first
891 * instruction (which is already included in the trace
892 * containing the invoke.
893 */
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800894 if (decInsn.opcode != OP_RETURN_VOID) {
Ben Cheng7a2697d2010-06-07 13:44:23 -0700895 stayOneMoreInst = true;
896 }
897 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700898 /* NOTE: intentional fallthrough for returns */
899 case kJitTSelectEnd:
900 {
Bill Buzbee1b3da592011-02-03 07:38:22 -0800901 /* Empty trace - set to bail to interpreter */
buzbee9f601a92011-02-11 17:48:20 -0800902 if (self->totalTraceLen == 0) {
903 dvmJitSetCodeAddr(self->currTraceHead,
Bill Buzbee1b3da592011-02-03 07:38:22 -0800904 dvmCompilerGetInterpretTemplate(),
905 dvmCompilerGetInterpretTemplateSet(),
906 false /* Not method entry */, 0);
buzbee9f601a92011-02-11 17:48:20 -0800907 self->jitState = kJitDone;
buzbee9a3147c2011-03-02 15:43:48 -0800908 allDone = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700909 break;
910 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700911
buzbee9f601a92011-02-11 17:48:20 -0800912 int lastTraceDesc = self->currTraceRun;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700913
914 /* Extend a new empty desc if the last slot is meta info */
Ben Cheng385828e2011-03-04 16:48:33 -0800915 if (!self->trace[lastTraceDesc].isCode) {
buzbee9f601a92011-02-11 17:48:20 -0800916 lastTraceDesc = ++self->currTraceRun;
Ben Cheng385828e2011-03-04 16:48:33 -0800917 self->trace[lastTraceDesc].info.frag.startOffset = 0;
918 self->trace[lastTraceDesc].info.frag.numInsts = 0;
919 self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
920 self->trace[lastTraceDesc].isCode = true;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700921 }
922
923 /* Mark the end of the trace runs */
Ben Cheng385828e2011-03-04 16:48:33 -0800924 self->trace[lastTraceDesc].info.frag.runEnd = true;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700925
Ben Chengba4fc8b2009-06-01 13:00:29 -0700926 JitTraceDescription* desc =
927 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
buzbee9f601a92011-02-11 17:48:20 -0800928 sizeof(JitTraceRun) * (self->currTraceRun+1));
Ben Cheng7a2697d2010-06-07 13:44:23 -0700929
Ben Chengba4fc8b2009-06-01 13:00:29 -0700930 if (desc == NULL) {
931 LOGE("Out of memory in trace selection");
932 dvmJitStopTranslationRequests();
buzbee9f601a92011-02-11 17:48:20 -0800933 self->jitState = kJitDone;
buzbee9a3147c2011-03-02 15:43:48 -0800934 allDone = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700935 break;
936 }
Ben Cheng7a2697d2010-06-07 13:44:23 -0700937
buzbee9a3147c2011-03-02 15:43:48 -0800938 desc->method = self->traceMethod;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700939 memcpy((char*)&(desc->trace[0]),
buzbee9f601a92011-02-11 17:48:20 -0800940 (char*)&(self->trace[0]),
941 sizeof(JitTraceRun) * (self->currTraceRun+1));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700942#if defined(SHOW_TRACE)
943 LOGD("TraceGen: trace done, adding to queue");
buzbee9a3147c2011-03-02 15:43:48 -0800944 dvmJitDumpTraceDesc(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700945#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800946 if (dvmCompilerWorkEnqueue(
buzbee9f601a92011-02-11 17:48:20 -0800947 self->currTraceHead,kWorkOrderTrace,desc)) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800948 /* Work order successfully enqueued */
949 if (gDvmJit.blockingMode) {
950 dvmCompilerDrainQueue();
951 }
Ben Cheng1357e942010-02-10 17:21:39 -0800952 } else {
953 /*
954 * Make sure the descriptor for the abandoned work order is
955 * freed.
956 */
957 free(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700958 }
buzbee9f601a92011-02-11 17:48:20 -0800959 self->jitState = kJitDone;
buzbee9a3147c2011-03-02 15:43:48 -0800960 allDone = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700961 }
962 break;
Ben Chenga4973592010-03-31 11:59:18 -0700963 case kJitDone:
buzbee9a3147c2011-03-02 15:43:48 -0800964 allDone = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700965 break;
Ben Chenga4973592010-03-31 11:59:18 -0700966 case kJitNot:
buzbee9a3147c2011-03-02 15:43:48 -0800967 allDone = true;
Ben Chenged79ff02009-10-13 13:26:40 -0700968 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700969 default:
buzbee9a3147c2011-03-02 15:43:48 -0800970 LOGE("Unexpected JIT state: %d", self->jitState);
Ben Chenga4973592010-03-31 11:59:18 -0700971 dvmAbort();
Ben Cheng9c147b82009-10-07 16:41:46 -0700972 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700973 }
buzbee9a3147c2011-03-02 15:43:48 -0800974
Ben Chenga4973592010-03-31 11:59:18 -0700975 /*
buzbee9a3147c2011-03-02 15:43:48 -0800976 * If we're done with trace selection, switch off the control flags.
Ben Chenga4973592010-03-31 11:59:18 -0700977 */
buzbee9a3147c2011-03-02 15:43:48 -0800978 if (allDone) {
979 dvmUpdateInterpBreak(self, kInterpJitBreak,
980 kSubModeJitTraceBuild, false);
981 if (stayOneMoreInst) {
982 // Keep going in single-step mode for at least one more inst
983 assert(self->jitResumeNPC == NULL);
984 self->singleStepCount = MIN(1, self->singleStepCount);
985 dvmUpdateInterpBreak(self, kInterpSingleStep, kSubModeNormal,
986 true /* enable */);
987 }
988 }
989 return;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700990}
991
Bill Buzbee1b3da592011-02-03 07:38:22 -0800992JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700993{
994 int idx = dvmJitHash(pc);
995
996 /* Expect a high hit rate on 1st shot */
Bill Buzbee1b3da592011-02-03 07:38:22 -0800997 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
998 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry))
Ben Chengba4fc8b2009-06-01 13:00:29 -0700999 return &gDvmJit.pJitEntryTable[idx];
1000 else {
Bill Buzbee27176222009-06-09 09:20:16 -07001001 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -07001002 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1003 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Bill Buzbee1b3da592011-02-03 07:38:22 -08001004 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
1005 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1006 isMethodEntry))
Ben Chengba4fc8b2009-06-01 13:00:29 -07001007 return &gDvmJit.pJitEntryTable[idx];
1008 }
1009 }
1010 return NULL;
1011}
1012
Bill Buzbee27176222009-06-09 09:20:16 -07001013/*
Ben Chengcfdeca32011-01-14 11:36:46 -08001014 * Walk through the JIT profile table and find the corresponding JIT code, in
1015 * the specified format (ie trace vs method). This routine needs to be fast.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001016 */
Ben Chengcfdeca32011-01-14 11:36:46 -08001017void* getCodeAddrCommon(const u2* dPC, bool methodEntry)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001018{
1019 int idx = dvmJitHash(dPC);
Ben Chengcfdeca32011-01-14 11:36:46 -08001020 const u2* pc = gDvmJit.pJitEntryTable[idx].dPC;
1021 if (pc != NULL) {
Ben Cheng1a7b9d72010-09-20 22:20:31 -07001022 bool hideTranslation = dvmJitHideTranslation();
Ben Chengcfdeca32011-01-14 11:36:46 -08001023 if (pc == dPC &&
1024 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001025 int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ?
1026 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1027 intptr_t codeAddress =
1028 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001029#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001030 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001031#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001032 return hideTranslation || !codeAddress ? NULL :
1033 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001034 } else {
1035 int chainEndMarker = gDvmJit.jitTableSize;
1036 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1037 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001038 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
1039 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1040 methodEntry) {
buzbee2e152ba2010-12-15 16:32:35 -08001041 int offset = (gDvmJit.profileMode >=
1042 kTraceProfilingContinuous) ? 0 :
1043 gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1044 intptr_t codeAddress =
1045 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
Ben Cheng978738d2010-05-13 13:45:57 -07001046#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -08001047 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001048#endif
buzbee99ddb1e2011-01-28 10:44:30 -08001049 return hideTranslation || !codeAddress ? NULL :
buzbee2e152ba2010-12-15 16:32:35 -08001050 (void *)(codeAddress + offset);
Bill Buzbee9797a232010-01-12 12:20:13 -08001051 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001052 }
1053 }
1054 }
Ben Cheng978738d2010-05-13 13:45:57 -07001055#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001056 gDvmJit.addrLookupsNotFound++;
1057#endif
1058 return NULL;
1059}
1060
1061/*
Ben Chengcfdeca32011-01-14 11:36:46 -08001062 * If a translated code address, in trace format, exists for the davik byte code
1063 * pointer return it.
1064 */
1065void* dvmJitGetTraceAddr(const u2* dPC)
1066{
1067 return getCodeAddrCommon(dPC, false /* method entry */);
1068}
1069
1070/*
1071 * If a translated code address, in whole-method format, exists for the davik
1072 * byte code pointer return it.
1073 */
1074void* dvmJitGetMethodAddr(const u2* dPC)
1075{
1076 return getCodeAddrCommon(dPC, true /* method entry */);
1077}
1078
1079/*
buzbee9a3147c2011-03-02 15:43:48 -08001080 * Similar to dvmJitGetTraceAddr, but returns null if the calling
1081 * thread is in a single-step mode.
1082 */
1083void* dvmJitGetTraceAddrThread(const u2* dPC, Thread* self)
1084{
1085 return (self->interpBreak.ctl.breakFlags != 0) ? NULL :
1086 getCodeAddrCommon(dPC, false /* method entry */);
1087}
1088
1089/*
1090 * Similar to dvmJitGetMethodAddr, but returns null if the calling
1091 * thread is in a single-step mode.
1092 */
1093void* dvmJitGetMethodAddrThread(const u2* dPC, Thread* self)
1094{
1095 return (self->interpBreak.ctl.breakFlags != 0) ? NULL :
1096 getCodeAddrCommon(dPC, true /* method entry */);
1097}
1098
1099/*
Ben Chengba4fc8b2009-06-01 13:00:29 -07001100 * Register the translated code pointer into the JitTable.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08001101 * NOTE: Once a codeAddress field transitions from initial state to
Ben Chengba4fc8b2009-06-01 13:00:29 -07001102 * JIT'd code, it must not be altered without first halting all
buzbee5867bea2011-04-09 14:47:32 -07001103 * threads. We defer the setting of the profile prefix size until
buzbee2e152ba2010-12-15 16:32:35 -08001104 * after the new code address is set to ensure that the prefix offset
1105 * is never applied to the initial interpret-only translation. All
1106 * translations with non-zero profile prefixes will still be correct
1107 * if entered as if the profile offset is 0, but the interpret-only
1108 * template cannot handle a non-zero prefix.
buzbee5867bea2011-04-09 14:47:32 -07001109 * NOTE: JitTable must not be in danger of reset while this
1110 * code is executing. see Issue 4271784 for details.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001111 */
buzbee2e152ba2010-12-15 16:32:35 -08001112void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
Ben Chengcfdeca32011-01-14 11:36:46 -08001113 bool isMethodEntry, int profilePrefixSize)
buzbee2e152ba2010-12-15 16:32:35 -08001114{
Bill Buzbee716f1202009-07-23 13:22:09 -07001115 JitEntryInfoUnion oldValue;
1116 JitEntryInfoUnion newValue;
Ben Cheng20d7e6c2011-02-18 17:12:42 -08001117 /*
buzbee5867bea2011-04-09 14:47:32 -07001118 * Get the JitTable slot for this dPC (or create one if JitTable
1119 * has been reset between the time the trace was requested and
1120 * now.
Ben Cheng20d7e6c2011-02-18 17:12:42 -08001121 */
1122 JitEntry *jitEntry = isMethodEntry ?
buzbee5867bea2011-04-09 14:47:32 -07001123 lookupAndAdd(dPC, false /* caller holds tableLock */, isMethodEntry) :
1124 dvmJitFindEntry(dPC, isMethodEntry);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001125 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -07001126 /* Note: order of update is important */
1127 do {
1128 oldValue = jitEntry->u;
1129 newValue = oldValue;
Ben Chengcfdeca32011-01-14 11:36:46 -08001130 newValue.info.isMethodEntry = isMethodEntry;
Bill Buzbee716f1202009-07-23 13:22:09 -07001131 newValue.info.instructionSet = set;
buzbee99ddb1e2011-01-28 10:44:30 -08001132 newValue.info.profileOffset = profilePrefixSize;
Andy McFadden6e10b9a2010-06-14 15:24:39 -07001133 } while (android_atomic_release_cas(
1134 oldValue.infoWord, newValue.infoWord,
1135 &jitEntry->u.infoWord) != 0);
Bill Buzbee716f1202009-07-23 13:22:09 -07001136 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001137}
1138
1139/*
buzbee9a3147c2011-03-02 15:43:48 -08001140 * Determine if valid trace-bulding request is active. If so, set
1141 * the proper flags in interpBreak and return. Trace selection will
1142 * then begin normally via dvmCheckBefore.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001143 */
buzbee9a3147c2011-03-02 15:43:48 -08001144void dvmJitCheckTraceRequest(Thread* self)
Ben Chengba4fc8b2009-06-01 13:00:29 -07001145{
Bill Buzbee48f18242009-06-19 16:02:27 -07001146 int i;
buzbee852aacd2010-06-08 16:24:46 -07001147 /*
1148 * A note on trace "hotness" filtering:
1149 *
1150 * Our first level trigger is intentionally loose - we need it to
1151 * fire easily not just to identify potential traces to compile, but
1152 * also to allow re-entry into the code cache.
1153 *
1154 * The 2nd level filter (done here) exists to be selective about
1155 * what we actually compile. It works by requiring the same
1156 * trace head "key" (defined as filterKey below) to appear twice in
1157 * a relatively short period of time. The difficulty is defining the
1158 * shape of the filterKey. Unfortunately, there is no "one size fits
1159 * all" approach.
1160 *
1161 * For spiky execution profiles dominated by a smallish
1162 * number of very hot loops, we would want the second-level filter
1163 * to be very selective. A good selective filter is requiring an
1164 * exact match of the Dalvik PC. In other words, defining filterKey as:
buzbee9f601a92011-02-11 17:48:20 -08001165 * intptr_t filterKey = (intptr_t)self->interpSave.pc
buzbee852aacd2010-06-08 16:24:46 -07001166 *
1167 * However, for flat execution profiles we do best when aggressively
1168 * translating. A heuristically decent proxy for this is to use
1169 * the value of the method pointer containing the trace as the filterKey.
1170 * Intuitively, this is saying that once any trace in a method appears hot,
1171 * immediately translate any other trace from that same method that
1172 * survives the first-level filter. Here, filterKey would be defined as:
buzbee9f601a92011-02-11 17:48:20 -08001173 * intptr_t filterKey = (intptr_t)self->interpSave.method
buzbee852aacd2010-06-08 16:24:46 -07001174 *
1175 * The problem is that we can't easily detect whether we're dealing
1176 * with a spiky or flat profile. If we go with the "pc" match approach,
1177 * flat profiles perform poorly. If we go with the loose "method" match,
1178 * we end up generating a lot of useless translations. Probably the
1179 * best approach in the future will be to retain profile information
1180 * across runs of each application in order to determine it's profile,
1181 * and then choose once we have enough history.
1182 *
1183 * However, for now we've decided to chose a compromise filter scheme that
1184 * includes elements of both. The high order bits of the filter key
1185 * are drawn from the enclosing method, and are combined with a slice
1186 * of the low-order bits of the Dalvik pc of the trace head. The
1187 * looseness of the filter can be adjusted by changing with width of
1188 * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider
1189 * the slice, the tighter the filter.
1190 *
1191 * Note: the fixed shifts in the function below reflect assumed word
1192 * alignment for method pointers, and half-word alignment of the Dalvik pc.
1193 * for method pointers and half-word alignment for dalvik pc.
1194 */
buzbee9f601a92011-02-11 17:48:20 -08001195 u4 methodKey = (u4)self->interpSave.method <<
buzbeec35294d2010-06-09 14:22:50 -07001196 (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
buzbee9f601a92011-02-11 17:48:20 -08001197 u4 pcKey = ((u4)self->interpSave.pc >> 1) &
buzbeec35294d2010-06-09 14:22:50 -07001198 ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1199 intptr_t filterKey = (intptr_t)(methodKey | pcKey);
buzbee9a3147c2011-03-02 15:43:48 -08001200
1201 // Shouldn't be here if already building a trace.
1202 assert((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)==0);
Ben Cheng40094c12010-02-24 20:58:44 -08001203
Ben Chenga4973592010-03-31 11:59:18 -07001204 /* Check if the JIT request can be handled now */
buzbee9a3147c2011-03-02 15:43:48 -08001205 if ((gDvmJit.pJitEntryTable != NULL) &&
1206 ((self->interpBreak.ctl.breakFlags & kInterpSingleStep) == 0)){
Ben Chenga4973592010-03-31 11:59:18 -07001207 /* Bypass the filter for hot trace requests or during stress mode */
buzbee9f601a92011-02-11 17:48:20 -08001208 if (self->jitState == kJitTSelectRequest &&
Ben Chenga4973592010-03-31 11:59:18 -07001209 gDvmJit.threshold > 6) {
Ben Cheng40094c12010-02-24 20:58:44 -08001210 /* Two-level filtering scheme */
1211 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
buzbee9f601a92011-02-11 17:48:20 -08001212 if (filterKey == self->threshFilter[i]) {
1213 self->threshFilter[i] = 0; // Reset filter entry
Ben Cheng40094c12010-02-24 20:58:44 -08001214 break;
1215 }
Bill Buzbee48f18242009-06-19 16:02:27 -07001216 }
Ben Cheng40094c12010-02-24 20:58:44 -08001217 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1218 /*
1219 * Use random replacement policy - otherwise we could miss a
1220 * large loop that contains more traces than the size of our
1221 * filter array.
1222 */
1223 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
buzbee9f601a92011-02-11 17:48:20 -08001224 self->threshFilter[i] = filterKey;
1225 self->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001226 }
Ben Chenga4973592010-03-31 11:59:18 -07001227 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001228
Ben Chenga4973592010-03-31 11:59:18 -07001229 /* If the compiler is backlogged, cancel any JIT actions */
1230 if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
buzbee9f601a92011-02-11 17:48:20 -08001231 self->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001232 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001233
Ben Chengba4fc8b2009-06-01 13:00:29 -07001234 /*
Ben Chenga4973592010-03-31 11:59:18 -07001235 * Check for additional reasons that might force the trace select
1236 * request to be dropped
Ben Chengba4fc8b2009-06-01 13:00:29 -07001237 */
buzbee9f601a92011-02-11 17:48:20 -08001238 if (self->jitState == kJitTSelectRequest ||
1239 self->jitState == kJitTSelectRequestHot) {
1240 if (dvmJitFindEntry(self->interpSave.pc, false)) {
Bill Buzbee1b3da592011-02-03 07:38:22 -08001241 /* In progress - nothing do do */
buzbee9f601a92011-02-11 17:48:20 -08001242 self->jitState = kJitDone;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001243 } else {
buzbee9f601a92011-02-11 17:48:20 -08001244 JitEntry *slot = lookupAndAdd(self->interpSave.pc,
Bill Buzbee1b3da592011-02-03 07:38:22 -08001245 false /* lock */,
1246 false /* method entry */);
1247 if (slot == NULL) {
1248 /*
1249 * Table is full. This should have been
1250 * detected by the compiler thread and the table
1251 * resized before we run into it here. Assume bad things
1252 * are afoot and disable profiling.
1253 */
buzbee9f601a92011-02-11 17:48:20 -08001254 self->jitState = kJitDone;
Bill Buzbee1b3da592011-02-03 07:38:22 -08001255 LOGD("JIT: JitTable full, disabling profiling");
1256 dvmJitStopTranslationRequests();
1257 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001258 }
1259 }
Ben Chenga4973592010-03-31 11:59:18 -07001260
buzbee9f601a92011-02-11 17:48:20 -08001261 switch (self->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -07001262 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -08001263 case kJitTSelectRequestHot:
buzbee9f601a92011-02-11 17:48:20 -08001264 self->jitState = kJitTSelect;
buzbee9a3147c2011-03-02 15:43:48 -08001265 self->traceMethod = self->interpSave.method;
buzbee9f601a92011-02-11 17:48:20 -08001266 self->currTraceHead = self->interpSave.pc;
1267 self->currTraceRun = 0;
1268 self->totalTraceLen = 0;
1269 self->currRunHead = self->interpSave.pc;
1270 self->currRunLen = 0;
Ben Cheng385828e2011-03-04 16:48:33 -08001271 self->trace[0].info.frag.startOffset =
buzbee9f601a92011-02-11 17:48:20 -08001272 self->interpSave.pc - self->interpSave.method->insns;
Ben Cheng385828e2011-03-04 16:48:33 -08001273 self->trace[0].info.frag.numInsts = 0;
1274 self->trace[0].info.frag.runEnd = false;
1275 self->trace[0].info.frag.hint = kJitHintNone;
1276 self->trace[0].isCode = true;
buzbee9f601a92011-02-11 17:48:20 -08001277 self->lastPC = 0;
buzbee9a3147c2011-03-02 15:43:48 -08001278 /* Turn on trace selection mode */
1279 dvmUpdateInterpBreak(self, kInterpJitBreak,
1280 kSubModeJitTraceBuild, true);
1281#if defined(SHOW_TRACE)
1282 LOGD("Starting trace for %s at 0x%x",
1283 self->interpSave.method->name, (int)self->interpSave.pc);
1284#endif
Ben Chenga4973592010-03-31 11:59:18 -07001285 break;
Ben Chenga4973592010-03-31 11:59:18 -07001286 case kJitDone:
Ben Chengba4fc8b2009-06-01 13:00:29 -07001287 break;
1288 default:
buzbee9a3147c2011-03-02 15:43:48 -08001289 LOGE("Unexpected JIT state: %d", self->jitState);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001290 dvmAbort();
1291 }
Ben Chenga4973592010-03-31 11:59:18 -07001292 } else {
buzbee9a3147c2011-03-02 15:43:48 -08001293 /* Cannot build trace this time */
buzbee9f601a92011-02-11 17:48:20 -08001294 self->jitState = kJitDone;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001295 }
Ben Chengba4fc8b2009-06-01 13:00:29 -07001296}
1297
Bill Buzbee27176222009-06-09 09:20:16 -07001298/*
1299 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
Bill Buzbee964a7b02010-01-28 12:54:19 -08001300 * Stops all threads, and thus is a heavyweight operation. May only be called
1301 * by the compiler thread.
Bill Buzbee27176222009-06-09 09:20:16 -07001302 */
1303bool dvmJitResizeJitTable( unsigned int size )
1304{
Bill Buzbee716f1202009-07-23 13:22:09 -07001305 JitEntry *pNewTable;
1306 JitEntry *pOldTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001307 JitEntry tempEntry;
Bill Buzbee27176222009-06-09 09:20:16 -07001308 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -07001309 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001310 unsigned int i;
1311
Ben Cheng3f02aa42009-08-14 13:52:09 -07001312 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -07001313 assert(size && !(size & (size - 1))); /* Is power of 2? */
1314
Ben Chenga4973592010-03-31 11:59:18 -07001315 LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
Bill Buzbee27176222009-06-09 09:20:16 -07001316
1317 newMask = size - 1;
1318
1319 if (size <= gDvmJit.jitTableSize) {
1320 return true;
1321 }
1322
Bill Buzbee964a7b02010-01-28 12:54:19 -08001323 /* Make sure requested size is compatible with chain field width */
1324 tempEntry.u.info.chain = size;
1325 if (tempEntry.u.info.chain != size) {
1326 LOGD("Jit: JitTable request of %d too big", size);
1327 return true;
1328 }
1329
Bill Buzbee716f1202009-07-23 13:22:09 -07001330 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -07001331 if (pNewTable == NULL) {
1332 return true;
1333 }
1334 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -07001335 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -07001336 }
1337
1338 /* Stop all other interpreting/jit'ng threads */
Ben Chenga8e64a72009-10-20 13:01:36 -07001339 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001340
Bill Buzbee716f1202009-07-23 13:22:09 -07001341 pOldTable = gDvmJit.pJitEntryTable;
1342 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001343
1344 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -07001345 gDvmJit.pJitEntryTable = pNewTable;
1346 gDvmJit.jitTableSize = size;
1347 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -07001348 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -07001349
Bill Buzbee716f1202009-07-23 13:22:09 -07001350 for (i=0; i < oldSize; i++) {
1351 if (pOldTable[i].dPC) {
1352 JitEntry *p;
1353 u2 chain;
Ben Chengcfdeca32011-01-14 11:36:46 -08001354 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/,
1355 pOldTable[i].u.info.isMethodEntry);
Bill Buzbee964a7b02010-01-28 12:54:19 -08001356 p->codeAddress = pOldTable[i].codeAddress;
Bill Buzbee716f1202009-07-23 13:22:09 -07001357 /* We need to preserve the new chain field, but copy the rest */
Bill Buzbee716f1202009-07-23 13:22:09 -07001358 chain = p->u.info.chain;
1359 p->u = pOldTable[i].u;
1360 p->u.info.chain = chain;
Bill Buzbee716f1202009-07-23 13:22:09 -07001361 }
1362 }
buzbee2e152ba2010-12-15 16:32:35 -08001363
Bill Buzbee964a7b02010-01-28 12:54:19 -08001364 dvmUnlockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -07001365
1366 free(pOldTable);
1367
Bill Buzbee27176222009-06-09 09:20:16 -07001368 /* Restart the world */
Ben Chenga8e64a72009-10-20 13:01:36 -07001369 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001370
1371 return false;
1372}
1373
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001374/*
Ben Cheng60c24f42010-01-04 12:29:56 -08001375 * Reset the JitTable to the initial clean state.
1376 */
1377void dvmJitResetTable(void)
1378{
1379 JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1380 unsigned int size = gDvmJit.jitTableSize;
1381 unsigned int i;
1382
1383 dvmLockMutex(&gDvmJit.tableLock);
buzbee2e152ba2010-12-15 16:32:35 -08001384
1385 /* Note: If need to preserve any existing counts. Do so here. */
buzbee38c41342011-01-11 15:45:49 -08001386 if (gDvmJit.pJitTraceProfCounters) {
1387 for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) {
1388 if (gDvmJit.pJitTraceProfCounters->buckets[i])
1389 memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i],
1390 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES);
1391 }
1392 gDvmJit.pJitTraceProfCounters->next = 0;
buzbee2e152ba2010-12-15 16:32:35 -08001393 }
buzbee2e152ba2010-12-15 16:32:35 -08001394
Ben Cheng60c24f42010-01-04 12:29:56 -08001395 memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1396 for (i=0; i< size; i++) {
1397 jitEntry[i].u.info.chain = size; /* Initialize chain termination */
1398 }
1399 gDvmJit.jitTableEntriesUsed = 0;
1400 dvmUnlockMutex(&gDvmJit.tableLock);
1401}
1402
1403/*
buzbee2e152ba2010-12-15 16:32:35 -08001404 * Return the address of the next trace profile counter. This address
1405 * will be embedded in the generated code for the trace, and thus cannot
1406 * change while the trace exists.
1407 */
1408JitTraceCounter_t *dvmJitNextTraceCounter()
1409{
1410 int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES;
1411 int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES;
1412 JitTraceCounter_t *res;
1413 /* Lazily allocate blocks of counters */
1414 if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) {
1415 JitTraceCounter_t *p =
1416 (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p));
1417 if (!p) {
1418 LOGE("Failed to allocate block of trace profile counters");
1419 dvmAbort();
1420 }
1421 gDvmJit.pJitTraceProfCounters->buckets[idx] = p;
1422 }
1423 res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem];
1424 gDvmJit.pJitTraceProfCounters->next++;
1425 return res;
1426}
1427
1428/*
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001429 * Float/double conversion requires clamping to min and max of integer form. If
1430 * target doesn't support this normally, use these.
1431 */
1432s8 dvmJitd2l(double d)
1433{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001434 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1435 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001436 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001437 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001438 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001439 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001440 else if (d != d) // NaN case
1441 return 0;
1442 else
1443 return (s8)d;
1444}
1445
1446s8 dvmJitf2l(float f)
1447{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001448 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1449 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001450 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001451 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001452 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001453 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001454 else if (f != f) // NaN case
1455 return 0;
1456 else
1457 return (s8)f;
1458}
1459
buzbee2e152ba2010-12-15 16:32:35 -08001460/* Should only be called by the compiler thread */
1461void dvmJitChangeProfileMode(TraceProfilingModes newState)
1462{
1463 if (gDvmJit.profileMode != newState) {
1464 gDvmJit.profileMode = newState;
1465 dvmJitUnchainAll();
1466 }
1467}
1468
1469void dvmJitTraceProfilingOn()
1470{
1471 if (gDvmJit.profileMode == kTraceProfilingPeriodicOff)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001472 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1473 (void*) kTraceProfilingPeriodicOn);
buzbee2e152ba2010-12-15 16:32:35 -08001474 else if (gDvmJit.profileMode == kTraceProfilingDisabled)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001475 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1476 (void*) kTraceProfilingContinuous);
buzbee2e152ba2010-12-15 16:32:35 -08001477}
1478
1479void dvmJitTraceProfilingOff()
1480{
1481 if (gDvmJit.profileMode == kTraceProfilingPeriodicOn)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001482 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1483 (void*) kTraceProfilingPeriodicOff);
buzbee2e152ba2010-12-15 16:32:35 -08001484 else if (gDvmJit.profileMode == kTraceProfilingContinuous)
Bill Buzbee1b3da592011-02-03 07:38:22 -08001485 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1486 (void*) kTraceProfilingDisabled);
buzbee2e152ba2010-12-15 16:32:35 -08001487}
1488
buzbee9a3147c2011-03-02 15:43:48 -08001489/*
buzbee99e3e6e2011-03-29 10:26:07 -07001490 * Update JIT-specific info in Thread structure for a single thread
1491 */
1492void dvmJitUpdateThreadStateSingle(Thread* thread)
1493{
1494 thread->pJitProfTable = gDvmJit.pProfTable;
1495 thread->jitThreshold = gDvmJit.threshold;
1496}
1497
1498/*
buzbee9a3147c2011-03-02 15:43:48 -08001499 * Walk through the thread list and refresh all local copies of
1500 * JIT global state (which was placed there for fast access).
1501 */
buzbee99e3e6e2011-03-29 10:26:07 -07001502void dvmJitUpdateThreadStateAll()
buzbee9a3147c2011-03-02 15:43:48 -08001503{
1504 Thread* self = dvmThreadSelf();
1505 Thread* thread;
1506
1507 dvmLockThreadList(self);
1508 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
buzbee99e3e6e2011-03-29 10:26:07 -07001509 dvmJitUpdateThreadStateSingle(thread);
buzbee9a3147c2011-03-02 15:43:48 -08001510 }
1511 dvmUnlockThreadList();
1512
1513}
Ben Chengba4fc8b2009-06-01 13:00:29 -07001514#endif /* WITH_JIT */