blob: 185182f53bbaef51bb4030602fec8a1255abb242 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "dexdump/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
Bill Buzbee9a8c75a2009-11-08 14:31:20 -080063 InterpState* interpState, int targetTrace)
Jeff Hao97319a82009-08-12 16:57:15 -070064{
65 Thread *self = dvmThreadSelf();
66 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -070067 int preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
68 int postBytes = interpState->method->registersSize*4;
69
70 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
71 // self->threadId, (int)pc, (int)fp);
72
73 if (shadowSpace->selfVerificationState != kSVSIdle) {
74 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
75 self->threadId, shadowSpace->selfVerificationState);
76 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -070077 LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
Jeff Hao97319a82009-08-12 16:57:15 -070078 }
79 shadowSpace->selfVerificationState = kSVSStart;
80
81 // Dynamically grow shadow register space if necessary
82 while (preBytes + postBytes > shadowSpace->registerSpaceSize) {
83 shadowSpace->registerSpaceSize *= 2;
84 free(shadowSpace->registerSpace);
85 shadowSpace->registerSpace =
86 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(int));
87 }
88
89 // Remember original state
90 shadowSpace->startPC = pc;
91 shadowSpace->fp = fp;
Ben Chengccd6c012009-10-15 14:52:45 -070092 shadowSpace->glue = interpState;
93 /*
94 * Store the original method here in case the trace ends with a
95 * return/invoke, the last method.
96 */
97 shadowSpace->method = interpState->method;
Jeff Hao97319a82009-08-12 16:57:15 -070098 shadowSpace->shadowFP = shadowSpace->registerSpace +
99 shadowSpace->registerSpaceSize - postBytes/4;
100
101 // Create a copy of the InterpState
Ben Chengccd6c012009-10-15 14:52:45 -0700102 //shadowSpace->interpState = *interpState;
103 memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
Jeff Hao97319a82009-08-12 16:57:15 -0700104 shadowSpace->interpState.fp = shadowSpace->shadowFP;
105 shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
106
107 // Create a copy of the stack
108 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
109 preBytes+postBytes);
110
111 // Setup the shadowed heap space
112 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
113
114 // Reset trace length
115 shadowSpace->traceLength = 0;
116
117 return shadowSpace;
118}
119
120/*
121 * Save ending PC, FP and compiled code exit point to shadow space.
122 * Return a pointer to the shadow space for JIT to restore state.
123 */
124void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
125 SelfVerificationState exitPoint)
126{
127 Thread *self = dvmThreadSelf();
128 ShadowSpace *shadowSpace = self->shadowSpace;
129 shadowSpace->endPC = pc;
130 shadowSpace->endShadowFP = fp;
131
132 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
133 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
134 // (int)pc);
135
136 if (shadowSpace->selfVerificationState != kSVSStart) {
137 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
138 self->threadId, shadowSpace->selfVerificationState);
139 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700140 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700141 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700142 LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
143 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700144 (int)shadowSpace->endShadowFP);
145 }
146
147 // Special case when punting after a single instruction
148 if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
149 shadowSpace->selfVerificationState = kSVSIdle;
150 } else {
151 shadowSpace->selfVerificationState = exitPoint;
152 }
153
154 return shadowSpace;
155}
156
157/* Print contents of virtual registers */
Ben Chengccd6c012009-10-15 14:52:45 -0700158static void selfVerificationPrintRegisters(int* addr, int* addrRef,
159 int numWords)
Jeff Hao97319a82009-08-12 16:57:15 -0700160{
161 int i;
162 for (i = 0; i < numWords; i++) {
Ben Chengccd6c012009-10-15 14:52:45 -0700163 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
Jeff Hao97319a82009-08-12 16:57:15 -0700164 }
165}
166
167/* Print values maintained in shadowSpace */
168static void selfVerificationDumpState(const u2* pc, Thread* self)
169{
170 ShadowSpace* shadowSpace = self->shadowSpace;
171 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
172 int frameBytes = (int) shadowSpace->registerSpace +
173 shadowSpace->registerSpaceSize*4 -
174 (int) shadowSpace->shadowFP;
175 int localRegs = 0;
176 int frameBytes2 = 0;
177 if (self->curFrame < shadowSpace->fp) {
178 localRegs = (stackSave->method->registersSize -
179 stackSave->method->insSize)*4;
180 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
181 }
182 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700183 LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
Jeff Hao97319a82009-08-12 16:57:15 -0700184 (int)(pc - stackSave->method->insns));
Ben Chengccd6c012009-10-15 14:52:45 -0700185 LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
186 LOGD("Method: %s", shadowSpace->method->name);
187 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700188 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700189 LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
Jeff Hao97319a82009-08-12 16:57:15 -0700190 (int)self->curFrame);
Ben Chengccd6c012009-10-15 14:52:45 -0700191 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700192 (int)shadowSpace->endShadowFP);
Ben Chengccd6c012009-10-15 14:52:45 -0700193 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
Jeff Hao97319a82009-08-12 16:57:15 -0700194 localRegs, frameBytes2);
Ben Chengccd6c012009-10-15 14:52:45 -0700195 LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
Jeff Hao97319a82009-08-12 16:57:15 -0700196 shadowSpace->selfVerificationState);
197}
198
199/* Print decoded instructions in the current trace */
200static void selfVerificationDumpTrace(const u2* pc, Thread* self)
201{
202 ShadowSpace* shadowSpace = self->shadowSpace;
203 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700204 int i, addr, offset;
205 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700206
207 LOGD("********** SHADOW TRACE DUMP **********");
208 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700209 addr = shadowSpace->trace[i].addr;
210 offset = (int)((u2*)addr - stackSave->method->insns);
211 decInsn = &(shadowSpace->trace[i].decInsn);
212 /* Not properly decoding instruction, some registers may be garbage */
Ben Chengccd6c012009-10-15 14:52:45 -0700213 LOGD("0x%x: (0x%04x) %s", addr, offset, getOpcodeName(decInsn->opCode));
Jeff Hao97319a82009-08-12 16:57:15 -0700214 }
215}
216
Ben Chengbcdc1de2009-08-21 16:18:46 -0700217/* Code is forced into this spin loop when a divergence is detected */
Ben Chengccd6c012009-10-15 14:52:45 -0700218static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
Ben Chengbcdc1de2009-08-21 16:18:46 -0700219{
Ben Chengccd6c012009-10-15 14:52:45 -0700220 const u2 *startPC = shadowSpace->startPC;
Ben Cheng88a0f972010-02-24 15:00:40 -0800221 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
Ben Chengccd6c012009-10-15 14:52:45 -0700222 if (desc) {
223 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
Ben Cheng1357e942010-02-10 17:21:39 -0800224 /*
225 * This function effectively terminates the VM right here, so not
226 * freeing the desc pointer when the enqueuing fails is acceptable.
227 */
Ben Chengccd6c012009-10-15 14:52:45 -0700228 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700229 gDvmJit.selfVerificationSpin = true;
230 while(gDvmJit.selfVerificationSpin) sleep(10);
231}
232
Jeff Hao97319a82009-08-12 16:57:15 -0700233/* Manage self verification while in the debug interpreter */
234static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
235{
236 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700237 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700238
239 DecodedInstruction decInsn;
240 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
241
Jeff Hao97319a82009-08-12 16:57:15 -0700242 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
243 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Ben Chengbcdc1de2009-08-21 16:18:46 -0700244 // shadowSpace->traceLength, getOpcodeName(decInsn.opCode));
Jeff Hao97319a82009-08-12 16:57:15 -0700245
246 if (state == kSVSIdle || state == kSVSStart) {
247 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
248 self->threadId, state);
249 selfVerificationDumpState(pc, self);
250 selfVerificationDumpTrace(pc, self);
251 }
252
253 /* Skip endPC once when trace has a backward branch */
254 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
255 state != kSVSBackwardBranch) {
256 shadowSpace->selfVerificationState = kSVSDebugInterp;
257 }
258
259 /* Check that the current pc is the end of the trace */
260 if ((state == kSVSSingleStep || state == kSVSDebugInterp) &&
261 pc == shadowSpace->endPC) {
262
263 shadowSpace->selfVerificationState = kSVSIdle;
264
265 /* Check register space */
266 int frameBytes = (int) shadowSpace->registerSpace +
267 shadowSpace->registerSpaceSize*4 -
268 (int) shadowSpace->shadowFP;
269 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700270 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
Jeff Hao97319a82009-08-12 16:57:15 -0700271 selfVerificationDumpState(pc, self);
272 selfVerificationDumpTrace(pc, self);
273 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
274 (int)shadowSpace->fp, frameBytes);
Ben Chengccd6c012009-10-15 14:52:45 -0700275 selfVerificationPrintRegisters((int*)shadowSpace->fp,
276 (int*)shadowSpace->shadowFP,
277 frameBytes/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700278 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
279 (int)shadowSpace->shadowFP, frameBytes);
280 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700281 (int*)shadowSpace->fp,
282 frameBytes/4);
283 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700284 }
285 /* Check new frame if it exists (invokes only) */
286 if (self->curFrame < shadowSpace->fp) {
287 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
288 int localRegs = (stackSave->method->registersSize -
289 stackSave->method->insSize)*4;
290 int frameBytes2 = (int) shadowSpace->fp -
291 (int) self->curFrame - localRegs;
292 if (memcmp(((char*)self->curFrame)+localRegs,
293 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700294 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
Jeff Hao97319a82009-08-12 16:57:15 -0700295 self->threadId);
296 selfVerificationDumpState(pc, self);
297 selfVerificationDumpTrace(pc, self);
298 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
299 (int)self->curFrame, localRegs, frameBytes2);
300 selfVerificationPrintRegisters((int*)self->curFrame,
Ben Chengccd6c012009-10-15 14:52:45 -0700301 (int*)shadowSpace->endShadowFP,
302 (frameBytes2+localRegs)/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700303 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
304 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
305 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700306 (int*)self->curFrame,
307 (frameBytes2+localRegs)/4);
308 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700309 }
310 }
311
312 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700313 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700314 ShadowHeap* heapSpacePtr;
315 for (heapSpacePtr = shadowSpace->heapSpace;
316 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700317 int memData = *((unsigned int*) heapSpacePtr->addr);
318 if (heapSpacePtr->data != memData) {
Ben Chengccd6c012009-10-15 14:52:45 -0700319 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
320 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700321 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700322 selfVerificationDumpState(pc, self);
323 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700324 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700325 }
326 }
Ben Chengccd6c012009-10-15 14:52:45 -0700327 if (memDiff) selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700328 return true;
329
330 /* If end not been reached, make sure max length not exceeded */
331 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
332 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
Ben Chengccd6c012009-10-15 14:52:45 -0700333 LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
Jeff Hao97319a82009-08-12 16:57:15 -0700334 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
335 selfVerificationDumpState(pc, self);
336 selfVerificationDumpTrace(pc, self);
Ben Chengccd6c012009-10-15 14:52:45 -0700337 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700338
339 return true;
340 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700341 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700342 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700343 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700344 shadowSpace->traceLength++;
345
346 return false;
347}
348#endif
349
Ben Chengba4fc8b2009-06-01 13:00:29 -0700350/*
351 * If one of our fixed tables or the translation buffer fills up,
352 * call this routine to avoid wasting cycles on future translation requests.
353 */
354void dvmJitStopTranslationRequests()
355{
356 /*
357 * Note 1: This won't necessarily stop all translation requests, and
358 * operates on a delayed mechanism. Running threads look to the copy
359 * of this value in their private InterpState structures and won't see
360 * this change until it is refreshed (which happens on interpreter
361 * entry).
362 * Note 2: This is a one-shot memory leak on this table. Because this is a
363 * permanent off switch for Jit profiling, it is a one-time leak of 1K
364 * bytes, and no further attempt will be made to re-allocate it. Can't
365 * free it because some thread may be holding a reference.
366 */
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800367 gDvmJit.pProfTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700368}
369
Ben Cheng86717f72010-03-05 15:27:21 -0800370#if defined(JIT_STATS)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700371/* Convenience function to increment counter from assembly code */
Ben Cheng6c10a972009-10-29 14:39:18 -0700372void dvmBumpNoChain(int from)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700373{
Ben Cheng6c10a972009-10-29 14:39:18 -0700374 gDvmJit.noChainExit[from]++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700375}
376
377/* Convenience function to increment counter from assembly code */
378void dvmBumpNormal()
379{
Ben Cheng6c10a972009-10-29 14:39:18 -0700380 gDvmJit.normalExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700381}
382
383/* Convenience function to increment counter from assembly code */
384void dvmBumpPunt(int from)
385{
Ben Cheng6c10a972009-10-29 14:39:18 -0700386 gDvmJit.puntExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700387}
388#endif
389
390/* Dumps debugging & tuning stats to the log */
391void dvmJitStats()
392{
393 int i;
394 int hit;
395 int not_hit;
396 int chains;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800397 int stubs;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700398 if (gDvmJit.pJitEntryTable) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800399 for (i=0, stubs=chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700400 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700401 i++) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800402 if (gDvmJit.pJitEntryTable[i].dPC != 0) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700403 hit++;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800404 if (gDvmJit.pJitEntryTable[i].codeAddress ==
405 gDvmJit.interpretTemplate)
406 stubs++;
407 } else
Ben Chengba4fc8b2009-06-01 13:00:29 -0700408 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700409 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700410 chains++;
411 }
Ben Cheng86717f72010-03-05 15:27:21 -0800412 LOGD("size if %d, entries used is %d",
413 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700414 LOGD(
Ben Cheng86717f72010-03-05 15:27:21 -0800415 "JIT: %d traces, %d slots, %d chains, %d thresh, %s",
416 hit, not_hit + hit, chains, gDvmJit.threshold,
417 gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
418
419#if defined(JIT_STATS)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700420 LOGD(
Ben Cheng6c10a972009-10-29 14:39:18 -0700421 "JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
Ben Chengba4fc8b2009-06-01 13:00:29 -0700422 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
Ben Cheng6c10a972009-10-29 14:39:18 -0700423 gDvmJit.normalExit, gDvmJit.puntExit);
424 LOGD(
425 "JIT: noChainExit: %d IC miss, %d interp callsite, %d switch overflow",
426 gDvmJit.noChainExit[kInlineCacheMiss],
427 gDvmJit.noChainExit[kCallsiteInterpreted],
428 gDvmJit.noChainExit[kSwitchOverflow]);
Ben Cheng86717f72010-03-05 15:27:21 -0800429
430 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
431 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
432 gDvmJit.invokeNative, gDvmJit.returnOp);
433 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
434 LOGD("JIT: Avg unit compilation time: %llu us",
435 gDvmJit.jitTime / gDvmJit.numCompilations);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700436#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800437
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800438 LOGD("JIT: %d Translation chains, %d interp stubs",
439 gDvmJit.translationChains, stubs);
Ben Chenge80cd942009-07-17 15:54:23 -0700440 if (gDvmJit.profile) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700441 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700442 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700443 }
444}
445
Bill Buzbee716f1202009-07-23 13:22:09 -0700446
Bill Buzbeed7269912009-11-10 14:31:32 -0800447void setTraceConstruction(JitEntry *slot, bool value)
448{
449
450 JitEntryInfoUnion oldValue;
451 JitEntryInfoUnion newValue;
452 do {
453 oldValue = slot->u;
454 newValue = oldValue;
455 newValue.info.traceConstruction = value;
456 } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
457 oldValue.infoWord, newValue.infoWord));
458}
459
460void resetTracehead(InterpState* interpState, JitEntry *slot)
461{
462 slot->codeAddress = gDvmJit.interpretTemplate;
463 setTraceConstruction(slot, false);
464}
465
466/* Clean up any pending trace builds */
467void dvmJitAbortTraceSelect(InterpState* interpState)
468{
469 if (interpState->jitState == kJitTSelect)
470 interpState->jitState = kJitTSelectAbort;
471}
472
Bill Buzbeef9f33282009-11-22 12:45:30 -0800473#if defined(WITH_SELF_VERIFICATION)
474static bool selfVerificationPuntOps(DecodedInstruction *decInsn)
475{
476 OpCode op = decInsn->opCode;
477 int flags = dexGetInstrFlags(gDvm.instrFlags, op);
478 return (op == OP_MONITOR_ENTER || op == OP_MONITOR_EXIT ||
479 op == OP_NEW_INSTANCE || op == OP_NEW_ARRAY ||
480 op == OP_CHECK_CAST || op == OP_MOVE_EXCEPTION ||
481 (flags & kInstrInvoke));
482}
483#endif
484
Ben Chengba4fc8b2009-06-01 13:00:29 -0700485/*
Bill Buzbee964a7b02010-01-28 12:54:19 -0800486 * Find an entry in the JitTable, creating if necessary.
487 * Returns null if table is full.
488 */
489static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked)
490{
491 u4 chainEndMarker = gDvmJit.jitTableSize;
492 u4 idx = dvmJitHash(dPC);
493
494 /* Walk the bucket chain to find an exact match for our PC */
495 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
496 (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
497 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
498 }
499
500 if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
501 /*
502 * No match. Aquire jitTableLock and find the last
503 * slot in the chain. Possibly continue the chain walk in case
504 * some other thread allocated the slot we were looking
505 * at previuosly (perhaps even the dPC we're trying to enter).
506 */
507 if (!callerLocked)
508 dvmLockMutex(&gDvmJit.tableLock);
509 /*
510 * At this point, if .dPC is NULL, then the slot we're
511 * looking at is the target slot from the primary hash
512 * (the simple, and common case). Otherwise we're going
513 * to have to find a free slot and chain it.
514 */
515 MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
516 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
517 u4 prev;
518 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
519 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
520 /* Another thread got there first for this dPC */
521 if (!callerLocked)
522 dvmUnlockMutex(&gDvmJit.tableLock);
523 return &gDvmJit.pJitEntryTable[idx];
524 }
525 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
526 }
527 /* Here, idx should be pointing to the last cell of an
528 * active chain whose last member contains a valid dPC */
529 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
530 /* Linear walk to find a free cell and add it to the end */
531 prev = idx;
532 while (true) {
533 idx++;
534 if (idx == chainEndMarker)
535 idx = 0; /* Wraparound */
536 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
537 (idx == prev))
538 break;
539 }
540 if (idx != prev) {
541 JitEntryInfoUnion oldValue;
542 JitEntryInfoUnion newValue;
543 /*
544 * Although we hold the lock so that noone else will
545 * be trying to update a chain field, the other fields
546 * packed into the word may be in use by other threads.
547 */
548 do {
549 oldValue = gDvmJit.pJitEntryTable[prev].u;
550 newValue = oldValue;
551 newValue.info.chain = idx;
552 } while (!ATOMIC_CMP_SWAP(
553 &gDvmJit.pJitEntryTable[prev].u.infoWord,
554 oldValue.infoWord, newValue.infoWord));
555 }
556 }
557 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
558 /*
559 * Initialize codeAddress and allocate the slot. Must
560 * happen in this order (since dPC is set, the entry is live.
561 */
562 gDvmJit.pJitEntryTable[idx].dPC = dPC;
563 gDvmJit.jitTableEntriesUsed++;
564 } else {
565 /* Table is full */
566 idx = chainEndMarker;
567 }
568 if (!callerLocked)
569 dvmUnlockMutex(&gDvmJit.tableLock);
570 }
571 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
572}
573/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700574 * Adds to the current trace request one instruction at a time, just
575 * before that instruction is interpreted. This is the primary trace
576 * selection function. NOTE: return instruction are handled a little
577 * differently. In general, instructions are "proposed" to be added
578 * to the current trace prior to interpretation. If the interpreter
579 * then successfully completes the instruction, is will be considered
580 * part of the request. This allows us to examine machine state prior
581 * to interpretation, and also abort the trace request if the instruction
582 * throws or does something unexpected. However, return instructions
583 * will cause an immediate end to the translation request - which will
584 * be passed to the compiler before the return completes. This is done
585 * in response to special handling of returns by the interpreter (and
586 * because returns cannot throw in a way that causes problems for the
587 * translated code.
588 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700589int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
590{
591 int flags,i,len;
592 int switchInterp = false;
593 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
594#if defined(WITH_PROFILER)
595 || gDvm.activeProfilers
596#endif
597 );
Bill Buzbeed7269912009-11-10 14:31:32 -0800598
Ben Cheng79d173c2009-09-29 16:12:51 -0700599 /* Prepare to handle last PC and stage the current PC */
600 const u2 *lastPC = interpState->lastPC;
601 interpState->lastPC = pc;
602
Bill Buzbeef9f33282009-11-22 12:45:30 -0800603#if defined(WITH_SELF_VERIFICATION)
604 /*
605 * We can't allow some instructions to be executed twice, and so they
606 * must not appear in any translations. End the trace before they
607 * are inlcluded.
608 */
609 if (lastPC && interpState->jitState == kJitTSelect) {
610 DecodedInstruction decInsn;
611 dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
612 if (selfVerificationPuntOps(&decInsn)) {
613 interpState->jitState = kJitTSelectEnd;
614 }
615 }
616#endif
617
Ben Chengba4fc8b2009-06-01 13:00:29 -0700618 switch (interpState->jitState) {
619 char* nopStr;
620 int target;
621 int offset;
622 DecodedInstruction decInsn;
623 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700624 /* First instruction - just remember the PC and exit */
625 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700626 /* Grow the trace around the last PC if jitState is kJitTSelect */
627 dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
Ben Cheng6c10a972009-10-29 14:39:18 -0700628
629 /*
630 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
631 * to the amount of space it takes to generate the chaining
632 * cells.
633 */
634 if (interpState->totalTraceLen != 0 &&
635 (decInsn.opCode == OP_PACKED_SWITCH ||
636 decInsn.opCode == OP_SPARSE_SWITCH)) {
637 interpState->jitState = kJitTSelectEnd;
638 break;
639 }
640
Bill Buzbeef9f33282009-11-22 12:45:30 -0800641
Ben Chengba4fc8b2009-06-01 13:00:29 -0700642#if defined(SHOW_TRACE)
643 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
644#endif
645 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
Ben Cheng79d173c2009-09-29 16:12:51 -0700646 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, lastPC);
647 offset = lastPC - interpState->method->insns;
648 assert((unsigned) offset <
649 dvmGetMethodInsnsSize(interpState->method));
650 if (lastPC != interpState->currRunHead + interpState->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700651 int currTraceRun;
652 /* We need to start a new trace run */
653 currTraceRun = ++interpState->currTraceRun;
654 interpState->currRunLen = 0;
Ben Cheng79d173c2009-09-29 16:12:51 -0700655 interpState->currRunHead = (u2*)lastPC;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700656 interpState->trace[currTraceRun].frag.startOffset = offset;
657 interpState->trace[currTraceRun].frag.numInsts = 0;
658 interpState->trace[currTraceRun].frag.runEnd = false;
659 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
660 }
661 interpState->trace[interpState->currTraceRun].frag.numInsts++;
662 interpState->totalTraceLen++;
663 interpState->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700664
665 /* Will probably never hit this with the current trace buildier */
666 if (interpState->currTraceRun == (MAX_JIT_RUN_LEN - 1)) {
667 interpState->jitState = kJitTSelectEnd;
668 }
669
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700670 if ( ((flags & kInstrUnconditional) == 0) &&
Bill Buzbeef4ce16f2009-07-28 13:28:25 -0700671 /* don't end trace on INVOKE_DIRECT_EMPTY */
672 (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700673 ((flags & (kInstrCanBranch |
674 kInstrCanSwitch |
675 kInstrCanReturn |
676 kInstrInvoke)) != 0)) {
677 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700678#if defined(SHOW_TRACE)
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700679 LOGD("TraceGen: ending on %s, basic block end",
680 getOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700681#endif
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700682 }
Bill Buzbee2ce8a6c2009-12-03 15:09:32 -0800683 /* Break on throw or self-loop */
Bill Buzbee324b3ac2009-12-04 13:17:36 -0800684 if ((decInsn.opCode == OP_THROW) || (lastPC == pc)){
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700685 interpState->jitState = kJitTSelectEnd;
686 }
687 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
688 interpState->jitState = kJitTSelectEnd;
689 }
690 if (debugOrProfile) {
691 interpState->jitState = kJitTSelectAbort;
692 switchInterp = !debugOrProfile;
693 break;
694 }
695 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
696 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700697 }
698 /* NOTE: intentional fallthrough for returns */
699 case kJitTSelectEnd:
700 {
701 if (interpState->totalTraceLen == 0) {
Bill Buzbeed7269912009-11-10 14:31:32 -0800702 /* Bad trace - mark as untranslatable */
703 dvmJitAbortTraceSelect(interpState);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700704 switchInterp = !debugOrProfile;
705 break;
706 }
707 JitTraceDescription* desc =
708 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
709 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
710 if (desc == NULL) {
711 LOGE("Out of memory in trace selection");
712 dvmJitStopTranslationRequests();
713 interpState->jitState = kJitTSelectAbort;
Bill Buzbeed7269912009-11-10 14:31:32 -0800714 dvmJitAbortTraceSelect(interpState);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700715 switchInterp = !debugOrProfile;
716 break;
717 }
718 interpState->trace[interpState->currTraceRun].frag.runEnd =
719 true;
720 interpState->jitState = kJitNormal;
721 desc->method = interpState->method;
722 memcpy((char*)&(desc->trace[0]),
723 (char*)&(interpState->trace[0]),
724 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
725#if defined(SHOW_TRACE)
726 LOGD("TraceGen: trace done, adding to queue");
727#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800728 if (dvmCompilerWorkEnqueue(
729 interpState->currTraceHead,kWorkOrderTrace,desc)) {
730 /* Work order successfully enqueued */
731 if (gDvmJit.blockingMode) {
732 dvmCompilerDrainQueue();
733 }
Ben Cheng1357e942010-02-10 17:21:39 -0800734 } else {
735 /*
736 * Make sure the descriptor for the abandoned work order is
737 * freed.
738 */
739 free(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700740 }
Bill Buzbee964a7b02010-01-28 12:54:19 -0800741 /*
742 * Reset "trace in progress" flag whether or not we
743 * successfully entered a work order.
744 */
Ben Cheng6999d842010-01-26 16:46:15 -0800745 JitEntry *jitEntry =
746 lookupAndAdd(interpState->currTraceHead, false);
747 if (jitEntry) {
748 setTraceConstruction(jitEntry, false);
749 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700750 switchInterp = !debugOrProfile;
751 }
752 break;
753 case kJitSingleStep:
754 interpState->jitState = kJitSingleStepEnd;
755 break;
756 case kJitSingleStepEnd:
757 interpState->entryPoint = kInterpEntryResume;
758 switchInterp = !debugOrProfile;
759 break;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800760 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -0800761 case kJitTSelectRequestHot:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700762 case kJitTSelectAbort:
763#if defined(SHOW_TRACE)
764 LOGD("TraceGen: trace abort");
765#endif
Bill Buzbeed7269912009-11-10 14:31:32 -0800766 dvmJitAbortTraceSelect(interpState);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700767 interpState->jitState = kJitNormal;
768 switchInterp = !debugOrProfile;
769 break;
770 case kJitNormal:
Ben Cheng38329f52009-07-07 14:19:20 -0700771 switchInterp = !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700772 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700773#if defined(WITH_SELF_VERIFICATION)
774 case kJitSelfVerification:
775 if (selfVerificationDebugInterp(pc, self)) {
776 interpState->jitState = kJitNormal;
777 switchInterp = !debugOrProfile;
778 }
779 break;
780#endif
Ben Chenged79ff02009-10-13 13:26:40 -0700781 /* If JIT is off stay out of interpreter selections */
782 case kJitOff:
783 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700784 default:
Ben Cheng9c147b82009-10-07 16:41:46 -0700785 if (!debugOrProfile) {
786 LOGE("Unexpected JIT state: %d", interpState->jitState);
787 dvmAbort();
788 }
789 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700790 }
791 return switchInterp;
792}
793
Ben Chengccd6c012009-10-15 14:52:45 -0700794JitEntry *dvmFindJitEntry(const u2* pc)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700795{
796 int idx = dvmJitHash(pc);
797
798 /* Expect a high hit rate on 1st shot */
799 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
800 return &gDvmJit.pJitEntryTable[idx];
801 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700802 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700803 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
804 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700805 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
806 return &gDvmJit.pJitEntryTable[idx];
807 }
808 }
809 return NULL;
810}
811
Bill Buzbee27176222009-06-09 09:20:16 -0700812/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700813 * If a translated code address exists for the davik byte code
814 * pointer return it. This routine needs to be fast.
815 */
816void* dvmJitGetCodeAddr(const u2* dPC)
817{
818 int idx = dvmJitHash(dPC);
Ben Cheng6999d842010-01-26 16:46:15 -0800819 const u2* npc = gDvmJit.pJitEntryTable[idx].dPC;
Bill Buzbee9797a232010-01-12 12:20:13 -0800820 if (npc != NULL) {
Ben Cheng6999d842010-01-26 16:46:15 -0800821 bool hideTranslation = (gDvm.sumThreadSuspendCount != 0) ||
822 (gDvmJit.codeCacheFull == true) ||
823 (gDvmJit.pProfTable == NULL);
824
Bill Buzbee9797a232010-01-12 12:20:13 -0800825 if (npc == dPC) {
Ben Cheng86717f72010-03-05 15:27:21 -0800826#if defined(JIT_STATS)
Bill Buzbee9797a232010-01-12 12:20:13 -0800827 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700828#endif
Ben Cheng6999d842010-01-26 16:46:15 -0800829 return hideTranslation ?
830 NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
Bill Buzbee9797a232010-01-12 12:20:13 -0800831 } else {
832 int chainEndMarker = gDvmJit.jitTableSize;
833 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
834 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
835 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
Ben Cheng86717f72010-03-05 15:27:21 -0800836#if defined(JIT_STATS)
Bill Buzbee9797a232010-01-12 12:20:13 -0800837 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700838#endif
Ben Cheng6999d842010-01-26 16:46:15 -0800839 return hideTranslation ?
840 NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
Bill Buzbee9797a232010-01-12 12:20:13 -0800841 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700842 }
843 }
844 }
Ben Cheng86717f72010-03-05 15:27:21 -0800845#if defined(JIT_STATS)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700846 gDvmJit.addrLookupsNotFound++;
847#endif
848 return NULL;
849}
850
851/*
852 * Register the translated code pointer into the JitTable.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800853 * NOTE: Once a codeAddress field transitions from initial state to
Ben Chengba4fc8b2009-06-01 13:00:29 -0700854 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -0700855 * threads. This routine should only be called by the compiler
856 * thread.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700857 */
Bill Buzbee716f1202009-07-23 13:22:09 -0700858void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
859 JitEntryInfoUnion oldValue;
860 JitEntryInfoUnion newValue;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800861 JitEntry *jitEntry = lookupAndAdd(dPC, false);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700862 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -0700863 /* Note: order of update is important */
864 do {
865 oldValue = jitEntry->u;
866 newValue = oldValue;
867 newValue.info.instructionSet = set;
868 } while (!ATOMIC_CMP_SWAP(
869 &jitEntry->u.infoWord,
870 oldValue.infoWord, newValue.infoWord));
871 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700872}
873
874/*
875 * Determine if valid trace-bulding request is active. Return true
876 * if we need to abort and switch back to the fast interpreter, false
877 * otherwise. NOTE: may be called even when trace selection is not being
878 * requested
879 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700880bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
881{
Bill Buzbee48f18242009-06-19 16:02:27 -0700882 bool res = false; /* Assume success */
883 int i;
Ben Cheng40094c12010-02-24 20:58:44 -0800884 intptr_t filterKey = ((intptr_t) interpState->pc) >>
885 JIT_TRACE_THRESH_FILTER_GRAN_LOG2;
886
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800887 /*
888 * If previous trace-building attempt failed, force it's head to be
889 * interpret-only.
890 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700891 if (gDvmJit.pJitEntryTable != NULL) {
Ben Cheng40094c12010-02-24 20:58:44 -0800892 /* Bypass the filter for hot trace requests */
893 if (interpState->jitState != kJitTSelectRequestHot) {
894 /* Two-level filtering scheme */
895 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
896 if (filterKey == interpState->threshFilter[i]) {
897 break;
898 }
Bill Buzbee48f18242009-06-19 16:02:27 -0700899 }
Ben Cheng40094c12010-02-24 20:58:44 -0800900 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
901 /*
902 * Use random replacement policy - otherwise we could miss a
903 * large loop that contains more traces than the size of our
904 * filter array.
905 */
906 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
907 interpState->threshFilter[i] = filterKey;
908 res = true;
909 }
Bill Buzbeed7269912009-11-10 14:31:32 -0800910
Ben Cheng40094c12010-02-24 20:58:44 -0800911 /* If stress mode (threshold <= 6), always translate */
912 res &= (gDvmJit.threshold > 6);
913 }
Bill Buzbeed7269912009-11-10 14:31:32 -0800914
Ben Chengba4fc8b2009-06-01 13:00:29 -0700915 /*
916 * If the compiler is backlogged, or if a debugger or profiler is
917 * active, cancel any JIT actions
918 */
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800919 if (res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater)
920 || gDvm.debuggerActive || self->suspendCount
Ben Chengba4fc8b2009-06-01 13:00:29 -0700921#if defined(WITH_PROFILER)
Ben Cheng86717f72010-03-05 15:27:21 -0800922 || gDvm.activeProfilers
Ben Chengba4fc8b2009-06-01 13:00:29 -0700923#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800924 ) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700925 if (interpState->jitState != kJitOff) {
926 interpState->jitState = kJitNormal;
927 }
Ben Cheng40094c12010-02-24 20:58:44 -0800928 } else if (interpState->jitState == kJitTSelectRequest ||
929 interpState->jitState == kJitTSelectRequestHot) {
Bill Buzbee964a7b02010-01-28 12:54:19 -0800930 JitEntry *slot = lookupAndAdd(interpState->pc, false);
Bill Buzbee716f1202009-07-23 13:22:09 -0700931 if (slot == NULL) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700932 /*
Bill Buzbee716f1202009-07-23 13:22:09 -0700933 * Table is full. This should have been
934 * detected by the compiler thread and the table
935 * resized before we run into it here. Assume bad things
936 * are afoot and disable profiling.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700937 */
938 interpState->jitState = kJitTSelectAbort;
Bill Buzbee716f1202009-07-23 13:22:09 -0700939 LOGD("JIT: JitTable full, disabling profiling");
940 dvmJitStopTranslationRequests();
Bill Buzbeed7269912009-11-10 14:31:32 -0800941 } else if (slot->u.info.traceConstruction) {
942 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800943 * Trace request already in progress, but most likely it
Bill Buzbeed7269912009-11-10 14:31:32 -0800944 * aborted without cleaning up. Assume the worst and
945 * mark trace head as untranslatable. If we're wrong,
946 * the compiler thread will correct the entry when the
947 * translation is completed. The downside here is that
948 * some existing translation may chain to the interpret-only
949 * template instead of the real translation during this
950 * window. Performance, but not correctness, issue.
951 */
952 interpState->jitState = kJitTSelectAbort;
953 resetTracehead(interpState, slot);
954 } else if (slot->codeAddress) {
955 /* Nothing to do here - just return */
Bill Buzbee716f1202009-07-23 13:22:09 -0700956 interpState->jitState = kJitTSelectAbort;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700957 } else {
Bill Buzbeed7269912009-11-10 14:31:32 -0800958 /*
959 * Mark request. Note, we are not guaranteed exclusivity
960 * here. A window exists for another thread to be
961 * attempting to build this same trace. Rather than
962 * bear the cost of locking, we'll just allow that to
963 * happen. The compiler thread, if it chooses, can
964 * discard redundant requests.
965 */
966 setTraceConstruction(slot, true);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700967 }
968 }
969 switch (interpState->jitState) {
970 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -0800971 case kJitTSelectRequestHot:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700972 interpState->jitState = kJitTSelect;
973 interpState->currTraceHead = interpState->pc;
974 interpState->currTraceRun = 0;
975 interpState->totalTraceLen = 0;
976 interpState->currRunHead = interpState->pc;
977 interpState->currRunLen = 0;
978 interpState->trace[0].frag.startOffset =
979 interpState->pc - interpState->method->insns;
980 interpState->trace[0].frag.numInsts = 0;
981 interpState->trace[0].frag.runEnd = false;
982 interpState->trace[0].frag.hint = kJitHintNone;
Ben Cheng79d173c2009-09-29 16:12:51 -0700983 interpState->lastPC = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700984 break;
985 case kJitTSelect:
986 case kJitTSelectAbort:
987 res = true;
988 case kJitSingleStep:
989 case kJitSingleStepEnd:
990 case kJitOff:
991 case kJitNormal:
Jeff Hao97319a82009-08-12 16:57:15 -0700992#if defined(WITH_SELF_VERIFICATION)
993 case kJitSelfVerification:
994#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700995 break;
996 default:
Ben Cheng9c147b82009-10-07 16:41:46 -0700997 LOGE("Unexpected JIT state: %d", interpState->jitState);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700998 dvmAbort();
999 }
1000 }
1001 return res;
1002}
1003
Bill Buzbee27176222009-06-09 09:20:16 -07001004/*
1005 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
Bill Buzbee964a7b02010-01-28 12:54:19 -08001006 * Stops all threads, and thus is a heavyweight operation. May only be called
1007 * by the compiler thread.
Bill Buzbee27176222009-06-09 09:20:16 -07001008 */
1009bool dvmJitResizeJitTable( unsigned int size )
1010{
Bill Buzbee716f1202009-07-23 13:22:09 -07001011 JitEntry *pNewTable;
1012 JitEntry *pOldTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001013 JitEntry tempEntry;
Bill Buzbee27176222009-06-09 09:20:16 -07001014 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -07001015 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001016 unsigned int i;
1017
Ben Cheng3f02aa42009-08-14 13:52:09 -07001018 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -07001019 assert(size && !(size & (size - 1))); /* Is power of 2? */
1020
1021 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
1022
1023 newMask = size - 1;
1024
1025 if (size <= gDvmJit.jitTableSize) {
1026 return true;
1027 }
1028
Bill Buzbee964a7b02010-01-28 12:54:19 -08001029 /* Make sure requested size is compatible with chain field width */
1030 tempEntry.u.info.chain = size;
1031 if (tempEntry.u.info.chain != size) {
1032 LOGD("Jit: JitTable request of %d too big", size);
1033 return true;
1034 }
1035
Bill Buzbee716f1202009-07-23 13:22:09 -07001036 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -07001037 if (pNewTable == NULL) {
1038 return true;
1039 }
1040 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -07001041 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -07001042 }
1043
1044 /* Stop all other interpreting/jit'ng threads */
Ben Chenga8e64a72009-10-20 13:01:36 -07001045 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001046
Bill Buzbee716f1202009-07-23 13:22:09 -07001047 pOldTable = gDvmJit.pJitEntryTable;
1048 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001049
1050 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -07001051 gDvmJit.pJitEntryTable = pNewTable;
1052 gDvmJit.jitTableSize = size;
1053 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -07001054 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -07001055
Bill Buzbee716f1202009-07-23 13:22:09 -07001056 for (i=0; i < oldSize; i++) {
1057 if (pOldTable[i].dPC) {
1058 JitEntry *p;
1059 u2 chain;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001060 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ );
1061 p->codeAddress = pOldTable[i].codeAddress;
Bill Buzbee716f1202009-07-23 13:22:09 -07001062 /* We need to preserve the new chain field, but copy the rest */
Bill Buzbee716f1202009-07-23 13:22:09 -07001063 chain = p->u.info.chain;
1064 p->u = pOldTable[i].u;
1065 p->u.info.chain = chain;
Bill Buzbee716f1202009-07-23 13:22:09 -07001066 }
1067 }
Bill Buzbee964a7b02010-01-28 12:54:19 -08001068 dvmUnlockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -07001069
1070 free(pOldTable);
1071
Bill Buzbee27176222009-06-09 09:20:16 -07001072 /* Restart the world */
Ben Chenga8e64a72009-10-20 13:01:36 -07001073 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001074
1075 return false;
1076}
1077
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001078/*
Ben Cheng60c24f42010-01-04 12:29:56 -08001079 * Reset the JitTable to the initial clean state.
1080 */
1081void dvmJitResetTable(void)
1082{
1083 JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1084 unsigned int size = gDvmJit.jitTableSize;
1085 unsigned int i;
1086
1087 dvmLockMutex(&gDvmJit.tableLock);
1088 memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1089 for (i=0; i< size; i++) {
1090 jitEntry[i].u.info.chain = size; /* Initialize chain termination */
1091 }
1092 gDvmJit.jitTableEntriesUsed = 0;
1093 dvmUnlockMutex(&gDvmJit.tableLock);
1094}
1095
1096/*
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001097 * Float/double conversion requires clamping to min and max of integer form. If
1098 * target doesn't support this normally, use these.
1099 */
1100s8 dvmJitd2l(double d)
1101{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001102 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1103 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001104 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001105 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001106 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001107 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001108 else if (d != d) // NaN case
1109 return 0;
1110 else
1111 return (s8)d;
1112}
1113
1114s8 dvmJitf2l(float f)
1115{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001116 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1117 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001118 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001119 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001120 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001121 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001122 else if (f != f) // NaN case
1123 return 0;
1124 else
1125 return (s8)f;
1126}
1127
Ben Chengba4fc8b2009-06-01 13:00:29 -07001128#endif /* WITH_JIT */