blob: 3dca45bc9bc1d430fc50c6d2fcc27b43f45e4f1a [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "dexdump/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
63 void* interpStatePtr)
64{
65 Thread *self = dvmThreadSelf();
66 ShadowSpace *shadowSpace = self->shadowSpace;
67 InterpState *interpState = (InterpState *) interpStatePtr;
68 int preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
69 int postBytes = interpState->method->registersSize*4;
70
71 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
72 // self->threadId, (int)pc, (int)fp);
73
74 if (shadowSpace->selfVerificationState != kSVSIdle) {
75 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
76 self->threadId, shadowSpace->selfVerificationState);
77 LOGD("********** SHADOW STATE DUMP **********");
78 LOGD("* PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
79 }
80 shadowSpace->selfVerificationState = kSVSStart;
81
82 // Dynamically grow shadow register space if necessary
83 while (preBytes + postBytes > shadowSpace->registerSpaceSize) {
84 shadowSpace->registerSpaceSize *= 2;
85 free(shadowSpace->registerSpace);
86 shadowSpace->registerSpace =
87 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(int));
88 }
89
90 // Remember original state
91 shadowSpace->startPC = pc;
92 shadowSpace->fp = fp;
93 shadowSpace->glue = interpStatePtr;
94 shadowSpace->shadowFP = shadowSpace->registerSpace +
95 shadowSpace->registerSpaceSize - postBytes/4;
96
97 // Create a copy of the InterpState
98 memcpy(&(shadowSpace->interpState), interpStatePtr, sizeof(InterpState));
99 shadowSpace->interpState.fp = shadowSpace->shadowFP;
100 shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
101
102 // Create a copy of the stack
103 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
104 preBytes+postBytes);
105
106 // Setup the shadowed heap space
107 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
108
109 // Reset trace length
110 shadowSpace->traceLength = 0;
111
112 return shadowSpace;
113}
114
115/*
116 * Save ending PC, FP and compiled code exit point to shadow space.
117 * Return a pointer to the shadow space for JIT to restore state.
118 */
119void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
120 SelfVerificationState exitPoint)
121{
122 Thread *self = dvmThreadSelf();
123 ShadowSpace *shadowSpace = self->shadowSpace;
124 shadowSpace->endPC = pc;
125 shadowSpace->endShadowFP = fp;
126
127 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
128 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
129 // (int)pc);
130
131 if (shadowSpace->selfVerificationState != kSVSStart) {
132 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
133 self->threadId, shadowSpace->selfVerificationState);
134 LOGD("********** SHADOW STATE DUMP **********");
135 LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
136 (int)shadowSpace->endPC);
137 LOGD("* Interp FP: 0x%x", (int)shadowSpace->fp);
138 LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
139 (int)shadowSpace->endShadowFP);
140 }
141
142 // Special case when punting after a single instruction
143 if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
144 shadowSpace->selfVerificationState = kSVSIdle;
145 } else {
146 shadowSpace->selfVerificationState = exitPoint;
147 }
148
149 return shadowSpace;
150}
151
152/* Print contents of virtual registers */
153static void selfVerificationPrintRegisters(int* addr, int numWords)
154{
155 int i;
156 for (i = 0; i < numWords; i++) {
157 LOGD("* 0x%x: (v%d) 0x%8x", (int)(addr+i), i, *(addr+i));
158 }
159}
160
161/* Print values maintained in shadowSpace */
162static void selfVerificationDumpState(const u2* pc, Thread* self)
163{
164 ShadowSpace* shadowSpace = self->shadowSpace;
165 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
166 int frameBytes = (int) shadowSpace->registerSpace +
167 shadowSpace->registerSpaceSize*4 -
168 (int) shadowSpace->shadowFP;
169 int localRegs = 0;
170 int frameBytes2 = 0;
171 if (self->curFrame < shadowSpace->fp) {
172 localRegs = (stackSave->method->registersSize -
173 stackSave->method->insSize)*4;
174 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
175 }
176 LOGD("********** SHADOW STATE DUMP **********");
177 LOGD("* CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
178 (int)(pc - stackSave->method->insns));
179 LOGD("* Class: %s Method: %s", stackSave->method->clazz->descriptor,
180 stackSave->method->name);
181 LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
182 (int)shadowSpace->endPC);
183 LOGD("* Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
184 (int)self->curFrame);
185 LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
186 (int)shadowSpace->endShadowFP);
187 LOGD("* Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
188 localRegs, frameBytes2);
189 LOGD("* Trace length: %d State: %d", shadowSpace->traceLength,
190 shadowSpace->selfVerificationState);
191}
192
193/* Print decoded instructions in the current trace */
194static void selfVerificationDumpTrace(const u2* pc, Thread* self)
195{
196 ShadowSpace* shadowSpace = self->shadowSpace;
197 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
198 int i;
199 u2 *addr, *offset;
200 OpCode opcode;
201
202 LOGD("********** SHADOW TRACE DUMP **********");
203 for (i = 0; i < shadowSpace->traceLength; i++) {
204 addr = (u2*) shadowSpace->trace[i].addr;
205 offset = (u2*)(addr - stackSave->method->insns);
206 opcode = (OpCode) shadowSpace->trace[i].opcode;
207 LOGD("* 0x%x: (0x%04x) %s", (int)addr, (int)offset,
208 getOpcodeName(opcode));
209 }
210}
211
212/* Manage self verification while in the debug interpreter */
213static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
214{
215 ShadowSpace *shadowSpace = self->shadowSpace;
216 OpCode opcode = *pc & 0xff;
217 SelfVerificationState state = shadowSpace->selfVerificationState;
218 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
219 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
220 // shadowSpace->traceLength, getOpcodeName(opcode));
221
222 if (state == kSVSIdle || state == kSVSStart) {
223 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
224 self->threadId, state);
225 selfVerificationDumpState(pc, self);
226 selfVerificationDumpTrace(pc, self);
227 }
228
229 /* Skip endPC once when trace has a backward branch */
230 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
231 state != kSVSBackwardBranch) {
232 shadowSpace->selfVerificationState = kSVSDebugInterp;
233 }
234
235 /* Check that the current pc is the end of the trace */
236 if ((state == kSVSSingleStep || state == kSVSDebugInterp) &&
237 pc == shadowSpace->endPC) {
238
239 shadowSpace->selfVerificationState = kSVSIdle;
240
241 /* Check register space */
242 int frameBytes = (int) shadowSpace->registerSpace +
243 shadowSpace->registerSpaceSize*4 -
244 (int) shadowSpace->shadowFP;
245 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
246 LOGD("~~~ DbgIntp(%d): REGISTERS UNEQUAL!", self->threadId);
247 selfVerificationDumpState(pc, self);
248 selfVerificationDumpTrace(pc, self);
249 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
250 (int)shadowSpace->fp, frameBytes);
251 selfVerificationPrintRegisters((int*)shadowSpace->fp, frameBytes/4);
252 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
253 (int)shadowSpace->shadowFP, frameBytes);
254 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
255 frameBytes/4);
256 }
257 /* Check new frame if it exists (invokes only) */
258 if (self->curFrame < shadowSpace->fp) {
259 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
260 int localRegs = (stackSave->method->registersSize -
261 stackSave->method->insSize)*4;
262 int frameBytes2 = (int) shadowSpace->fp -
263 (int) self->curFrame - localRegs;
264 if (memcmp(((char*)self->curFrame)+localRegs,
265 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
266 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) UNEQUAL!",
267 self->threadId);
268 selfVerificationDumpState(pc, self);
269 selfVerificationDumpTrace(pc, self);
270 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
271 (int)self->curFrame, localRegs, frameBytes2);
272 selfVerificationPrintRegisters((int*)self->curFrame,
273 (frameBytes2+localRegs)/4);
274 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
275 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
276 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
277 (frameBytes2+localRegs)/4);
278 }
279 }
280
281 /* Check memory space */
282 ShadowHeap* heapSpacePtr;
283 for (heapSpacePtr = shadowSpace->heapSpace;
284 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
285 int mem_data = *((unsigned int*) heapSpacePtr->addr);
286 if (heapSpacePtr->data != mem_data) {
287 LOGD("~~~ DbgIntp(%d): MEMORY UNEQUAL!", self->threadId);
288 LOGD("* Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
289 heapSpacePtr->addr, mem_data, heapSpacePtr->data);
290 selfVerificationDumpState(pc, self);
291 selfVerificationDumpTrace(pc, self);
292 }
293 }
294 return true;
295
296 /* If end not been reached, make sure max length not exceeded */
297 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
298 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
299 LOGD("* startPC: 0x%x endPC: 0x%x currPC: 0x%x",
300 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
301 selfVerificationDumpState(pc, self);
302 selfVerificationDumpTrace(pc, self);
303
304 return true;
305 }
306 /* Log the instruction address and opcode for debug */
307 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
308 shadowSpace->trace[shadowSpace->traceLength].opcode = opcode;
309 shadowSpace->traceLength++;
310
311 return false;
312}
313#endif
314
Ben Chengba4fc8b2009-06-01 13:00:29 -0700315int dvmJitStartup(void)
316{
317 unsigned int i;
318 bool res = true; /* Assume success */
319
320 // Create the compiler thread and setup miscellaneous chores */
321 res &= dvmCompilerStartup();
322
323 dvmInitMutex(&gDvmJit.tableLock);
324 if (res && gDvm.executionMode == kExecutionModeJit) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700325 JitEntry *pJitTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700326 unsigned char *pJitProfTable = NULL;
Ben Cheng3f02aa42009-08-14 13:52:09 -0700327 // Power of 2?
328 assert(gDvmJit.jitTableSize &&
329 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700330 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -0700331 pJitTable = (JitEntry*)
Bill Buzbee27176222009-06-09 09:20:16 -0700332 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700333 if (!pJitTable) {
334 LOGE("jit table allocation failed\n");
335 res = false;
336 goto done;
337 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700338 /*
339 * NOTE: the profile table must only be allocated once, globally.
340 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
341 * and then restoring its original value. However, this action
342 * is not syncronized for speed so threads may continue to hold
343 * and update the profile table after profiling has been turned
344 * off by null'ng the global pointer. Be aware.
345 */
346 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
347 if (!pJitProfTable) {
348 LOGE("jit prof table allocation failed\n");
349 res = false;
350 goto done;
351 }
352 memset(pJitProfTable,0,JIT_PROF_SIZE);
Bill Buzbee27176222009-06-09 09:20:16 -0700353 for (i=0; i < gDvmJit.jitTableSize; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700354 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700355 }
356 /* Is chain field wide enough for termination pattern? */
Ben Cheng3f02aa42009-08-14 13:52:09 -0700357 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700358
359done:
360 gDvmJit.pJitEntryTable = pJitTable;
Bill Buzbee27176222009-06-09 09:20:16 -0700361 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
362 gDvmJit.jitTableEntriesUsed = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700363 gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
364 dvmUnlockMutex(&gDvmJit.tableLock);
365 }
366 return res;
367}
368
369/*
370 * If one of our fixed tables or the translation buffer fills up,
371 * call this routine to avoid wasting cycles on future translation requests.
372 */
373void dvmJitStopTranslationRequests()
374{
375 /*
376 * Note 1: This won't necessarily stop all translation requests, and
377 * operates on a delayed mechanism. Running threads look to the copy
378 * of this value in their private InterpState structures and won't see
379 * this change until it is refreshed (which happens on interpreter
380 * entry).
381 * Note 2: This is a one-shot memory leak on this table. Because this is a
382 * permanent off switch for Jit profiling, it is a one-time leak of 1K
383 * bytes, and no further attempt will be made to re-allocate it. Can't
384 * free it because some thread may be holding a reference.
385 */
386 gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
387}
388
389#if defined(EXIT_STATS)
390/* Convenience function to increment counter from assembly code */
391void dvmBumpNoChain()
392{
393 gDvm.jitNoChainExit++;
394}
395
396/* Convenience function to increment counter from assembly code */
397void dvmBumpNormal()
398{
399 gDvm.jitNormalExit++;
400}
401
402/* Convenience function to increment counter from assembly code */
403void dvmBumpPunt(int from)
404{
405 gDvm.jitPuntExit++;
406}
407#endif
408
409/* Dumps debugging & tuning stats to the log */
410void dvmJitStats()
411{
412 int i;
413 int hit;
414 int not_hit;
415 int chains;
416 if (gDvmJit.pJitEntryTable) {
417 for (i=0, chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700418 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700419 i++) {
420 if (gDvmJit.pJitEntryTable[i].dPC != 0)
421 hit++;
422 else
423 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700424 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700425 chains++;
426 }
427 LOGD(
428 "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
429 hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
430 gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
431#if defined(EXIT_STATS)
432 LOGD(
433 "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
434 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
435 gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
436#endif
437 LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
438#if defined(INVOKE_STATS)
Ben Cheng38329f52009-07-07 14:19:20 -0700439 LOGD("JIT: Invoke: %d chainable, %d pred. chain, %d native, "
440 "%d return",
441 gDvmJit.invokeChain, gDvmJit.invokePredictedChain,
442 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700443#endif
Ben Chenge80cd942009-07-17 15:54:23 -0700444 if (gDvmJit.profile) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700445 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700446 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700447 }
448}
449
Bill Buzbee716f1202009-07-23 13:22:09 -0700450
Ben Chengba4fc8b2009-06-01 13:00:29 -0700451/*
452 * Final JIT shutdown. Only do this once, and do not attempt to restart
453 * the JIT later.
454 */
455void dvmJitShutdown(void)
456{
457 /* Shutdown the compiler thread */
458 dvmCompilerShutdown();
459
460 dvmCompilerDumpStats();
461
462 dvmDestroyMutex(&gDvmJit.tableLock);
463
464 if (gDvmJit.pJitEntryTable) {
465 free(gDvmJit.pJitEntryTable);
466 gDvmJit.pJitEntryTable = NULL;
467 }
468
469 if (gDvmJit.pProfTable) {
470 free(gDvmJit.pProfTable);
471 gDvmJit.pProfTable = NULL;
472 }
473}
474
Ben Chengba4fc8b2009-06-01 13:00:29 -0700475/*
476 * Adds to the current trace request one instruction at a time, just
477 * before that instruction is interpreted. This is the primary trace
478 * selection function. NOTE: return instruction are handled a little
479 * differently. In general, instructions are "proposed" to be added
480 * to the current trace prior to interpretation. If the interpreter
481 * then successfully completes the instruction, is will be considered
482 * part of the request. This allows us to examine machine state prior
483 * to interpretation, and also abort the trace request if the instruction
484 * throws or does something unexpected. However, return instructions
485 * will cause an immediate end to the translation request - which will
486 * be passed to the compiler before the return completes. This is done
487 * in response to special handling of returns by the interpreter (and
488 * because returns cannot throw in a way that causes problems for the
489 * translated code.
490 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700491int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
492{
493 int flags,i,len;
494 int switchInterp = false;
495 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
496#if defined(WITH_PROFILER)
497 || gDvm.activeProfilers
498#endif
499 );
500
501 switch (interpState->jitState) {
502 char* nopStr;
503 int target;
504 int offset;
505 DecodedInstruction decInsn;
506 case kJitTSelect:
507 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
508#if defined(SHOW_TRACE)
509 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
510#endif
511 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
512 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, pc);
513 offset = pc - interpState->method->insns;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700514 if (pc != interpState->currRunHead + interpState->currRunLen) {
515 int currTraceRun;
516 /* We need to start a new trace run */
517 currTraceRun = ++interpState->currTraceRun;
518 interpState->currRunLen = 0;
519 interpState->currRunHead = (u2*)pc;
520 interpState->trace[currTraceRun].frag.startOffset = offset;
521 interpState->trace[currTraceRun].frag.numInsts = 0;
522 interpState->trace[currTraceRun].frag.runEnd = false;
523 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
524 }
525 interpState->trace[interpState->currTraceRun].frag.numInsts++;
526 interpState->totalTraceLen++;
527 interpState->currRunLen += len;
528 if ( ((flags & kInstrUnconditional) == 0) &&
Bill Buzbeef4ce16f2009-07-28 13:28:25 -0700529 /* don't end trace on INVOKE_DIRECT_EMPTY */
530 (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700531 ((flags & (kInstrCanBranch |
532 kInstrCanSwitch |
533 kInstrCanReturn |
534 kInstrInvoke)) != 0)) {
535 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700536#if defined(SHOW_TRACE)
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700537 LOGD("TraceGen: ending on %s, basic block end",
538 getOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700539#endif
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700540 }
541 if (decInsn.opCode == OP_THROW) {
542 interpState->jitState = kJitTSelectEnd;
543 }
544 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
545 interpState->jitState = kJitTSelectEnd;
546 }
547 if (debugOrProfile) {
548 interpState->jitState = kJitTSelectAbort;
549 switchInterp = !debugOrProfile;
550 break;
551 }
552 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
553 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700554 }
555 /* NOTE: intentional fallthrough for returns */
556 case kJitTSelectEnd:
557 {
558 if (interpState->totalTraceLen == 0) {
559 switchInterp = !debugOrProfile;
560 break;
561 }
562 JitTraceDescription* desc =
563 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
564 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
565 if (desc == NULL) {
566 LOGE("Out of memory in trace selection");
567 dvmJitStopTranslationRequests();
568 interpState->jitState = kJitTSelectAbort;
569 switchInterp = !debugOrProfile;
570 break;
571 }
572 interpState->trace[interpState->currTraceRun].frag.runEnd =
573 true;
574 interpState->jitState = kJitNormal;
575 desc->method = interpState->method;
576 memcpy((char*)&(desc->trace[0]),
577 (char*)&(interpState->trace[0]),
578 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
579#if defined(SHOW_TRACE)
580 LOGD("TraceGen: trace done, adding to queue");
581#endif
582 dvmCompilerWorkEnqueue(
583 interpState->currTraceHead,kWorkOrderTrace,desc);
584 if (gDvmJit.blockingMode) {
585 dvmCompilerDrainQueue();
586 }
587 switchInterp = !debugOrProfile;
588 }
589 break;
590 case kJitSingleStep:
591 interpState->jitState = kJitSingleStepEnd;
592 break;
593 case kJitSingleStepEnd:
594 interpState->entryPoint = kInterpEntryResume;
595 switchInterp = !debugOrProfile;
596 break;
597 case kJitTSelectAbort:
598#if defined(SHOW_TRACE)
599 LOGD("TraceGen: trace abort");
600#endif
601 interpState->jitState = kJitNormal;
602 switchInterp = !debugOrProfile;
603 break;
604 case kJitNormal:
Ben Cheng38329f52009-07-07 14:19:20 -0700605 switchInterp = !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700606 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700607#if defined(WITH_SELF_VERIFICATION)
608 case kJitSelfVerification:
609 if (selfVerificationDebugInterp(pc, self)) {
610 interpState->jitState = kJitNormal;
611 switchInterp = !debugOrProfile;
612 }
613 break;
614#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700615 default:
616 dvmAbort();
617 }
618 return switchInterp;
619}
620
Bill Buzbee716f1202009-07-23 13:22:09 -0700621static inline JitEntry *findJitEntry(const u2* pc)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700622{
623 int idx = dvmJitHash(pc);
624
625 /* Expect a high hit rate on 1st shot */
626 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
627 return &gDvmJit.pJitEntryTable[idx];
628 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700629 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700630 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
631 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700632 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
633 return &gDvmJit.pJitEntryTable[idx];
634 }
635 }
636 return NULL;
637}
638
Bill Buzbee716f1202009-07-23 13:22:09 -0700639JitEntry *dvmFindJitEntry(const u2* pc)
Bill Buzbee27176222009-06-09 09:20:16 -0700640{
641 return findJitEntry(pc);
642}
643
644/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700645 * If a translated code address exists for the davik byte code
646 * pointer return it. This routine needs to be fast.
647 */
648void* dvmJitGetCodeAddr(const u2* dPC)
649{
650 int idx = dvmJitHash(dPC);
651
Bill Buzbee46cd5b62009-06-05 15:36:06 -0700652 /* If anything is suspended, don't re-enter the code cache */
653 if (gDvm.sumThreadSuspendCount > 0) {
654 return NULL;
655 }
656
Ben Chengba4fc8b2009-06-01 13:00:29 -0700657 /* Expect a high hit rate on 1st shot */
658 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
659#if defined(EXIT_STATS)
660 gDvmJit.addrLookupsFound++;
661#endif
662 return gDvmJit.pJitEntryTable[idx].codeAddress;
663 } else {
Bill Buzbee27176222009-06-09 09:20:16 -0700664 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700665 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
666 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700667 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
668#if defined(EXIT_STATS)
669 gDvmJit.addrLookupsFound++;
670#endif
671 return gDvmJit.pJitEntryTable[idx].codeAddress;
672 }
673 }
674 }
675#if defined(EXIT_STATS)
676 gDvmJit.addrLookupsNotFound++;
677#endif
678 return NULL;
679}
680
681/*
Bill Buzbee716f1202009-07-23 13:22:09 -0700682 * Find an entry in the JitTable, creating if necessary.
683 * Returns null if table is full.
684 */
685JitEntry *dvmJitLookupAndAdd(const u2* dPC)
686{
687 u4 chainEndMarker = gDvmJit.jitTableSize;
688 u4 idx = dvmJitHash(dPC);
689
690 /* Walk the bucket chain to find an exact match for our PC */
691 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
692 (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
693 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
694 }
695
696 if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
697 /*
698 * No match. Aquire jitTableLock and find the last
699 * slot in the chain. Possibly continue the chain walk in case
700 * some other thread allocated the slot we were looking
701 * at previuosly (perhaps even the dPC we're trying to enter).
702 */
703 dvmLockMutex(&gDvmJit.tableLock);
704 /*
705 * At this point, if .dPC is NULL, then the slot we're
706 * looking at is the target slot from the primary hash
707 * (the simple, and common case). Otherwise we're going
708 * to have to find a free slot and chain it.
709 */
710 MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
711 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
712 u4 prev;
713 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
714 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
715 /* Another thread got there first for this dPC */
716 dvmUnlockMutex(&gDvmJit.tableLock);
717 return &gDvmJit.pJitEntryTable[idx];
718 }
719 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
720 }
721 /* Here, idx should be pointing to the last cell of an
722 * active chain whose last member contains a valid dPC */
723 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
724 /* Linear walk to find a free cell and add it to the end */
725 prev = idx;
726 while (true) {
727 idx++;
728 if (idx == chainEndMarker)
729 idx = 0; /* Wraparound */
730 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
731 (idx == prev))
732 break;
733 }
734 if (idx != prev) {
735 JitEntryInfoUnion oldValue;
736 JitEntryInfoUnion newValue;
737 /*
738 * Although we hold the lock so that noone else will
739 * be trying to update a chain field, the other fields
740 * packed into the word may be in use by other threads.
741 */
742 do {
743 oldValue = gDvmJit.pJitEntryTable[prev].u;
744 newValue = oldValue;
745 newValue.info.chain = idx;
746 } while (!ATOMIC_CMP_SWAP(
747 &gDvmJit.pJitEntryTable[prev].u.infoWord,
748 oldValue.infoWord, newValue.infoWord));
749 }
750 }
751 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
752 /* Allocate the slot */
753 gDvmJit.pJitEntryTable[idx].dPC = dPC;
754 gDvmJit.jitTableEntriesUsed++;
755 } else {
756 /* Table is full */
757 idx = chainEndMarker;
758 }
759 dvmUnlockMutex(&gDvmJit.tableLock);
760 }
761 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
762}
763/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700764 * Register the translated code pointer into the JitTable.
765 * NOTE: Once a codeAddress field transitions from NULL to
766 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -0700767 * threads. This routine should only be called by the compiler
768 * thread.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700769 */
Bill Buzbee716f1202009-07-23 13:22:09 -0700770void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
771 JitEntryInfoUnion oldValue;
772 JitEntryInfoUnion newValue;
773 JitEntry *jitEntry = dvmJitLookupAndAdd(dPC);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700774 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -0700775 /* Note: order of update is important */
776 do {
777 oldValue = jitEntry->u;
778 newValue = oldValue;
779 newValue.info.instructionSet = set;
780 } while (!ATOMIC_CMP_SWAP(
781 &jitEntry->u.infoWord,
782 oldValue.infoWord, newValue.infoWord));
783 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700784}
785
786/*
787 * Determine if valid trace-bulding request is active. Return true
788 * if we need to abort and switch back to the fast interpreter, false
789 * otherwise. NOTE: may be called even when trace selection is not being
790 * requested
791 */
792
Ben Chengba4fc8b2009-06-01 13:00:29 -0700793bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
794{
Bill Buzbee48f18242009-06-19 16:02:27 -0700795 bool res = false; /* Assume success */
796 int i;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700797 if (gDvmJit.pJitEntryTable != NULL) {
Bill Buzbee48f18242009-06-19 16:02:27 -0700798 /* Two-level filtering scheme */
799 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
800 if (interpState->pc == interpState->threshFilter[i]) {
801 break;
802 }
803 }
804 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
805 /*
806 * Use random replacement policy - otherwise we could miss a large
807 * loop that contains more traces than the size of our filter array.
808 */
809 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
810 interpState->threshFilter[i] = interpState->pc;
811 res = true;
812 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700813 /*
814 * If the compiler is backlogged, or if a debugger or profiler is
815 * active, cancel any JIT actions
816 */
Bill Buzbee48f18242009-06-19 16:02:27 -0700817 if ( res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
Ben Chengba4fc8b2009-06-01 13:00:29 -0700818 gDvm.debuggerActive || self->suspendCount
819#if defined(WITH_PROFILER)
820 || gDvm.activeProfilers
821#endif
822 ) {
823 if (interpState->jitState != kJitOff) {
824 interpState->jitState = kJitNormal;
825 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700826 } else if (interpState->jitState == kJitTSelectRequest) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700827 JitEntry *slot = dvmJitLookupAndAdd(interpState->pc);
828 if (slot == NULL) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700829 /*
Bill Buzbee716f1202009-07-23 13:22:09 -0700830 * Table is full. This should have been
831 * detected by the compiler thread and the table
832 * resized before we run into it here. Assume bad things
833 * are afoot and disable profiling.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700834 */
835 interpState->jitState = kJitTSelectAbort;
Bill Buzbee716f1202009-07-23 13:22:09 -0700836 LOGD("JIT: JitTable full, disabling profiling");
837 dvmJitStopTranslationRequests();
838 } else if (slot->u.info.traceRequested) {
839 /* Trace already requested - revert to interpreter */
840 interpState->jitState = kJitTSelectAbort;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700841 } else {
Bill Buzbee716f1202009-07-23 13:22:09 -0700842 /* Mark request */
843 JitEntryInfoUnion oldValue;
844 JitEntryInfoUnion newValue;
845 do {
846 oldValue = slot->u;
847 newValue = oldValue;
848 newValue.info.traceRequested = true;
849 } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
850 oldValue.infoWord, newValue.infoWord));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700851 }
852 }
853 switch (interpState->jitState) {
854 case kJitTSelectRequest:
855 interpState->jitState = kJitTSelect;
856 interpState->currTraceHead = interpState->pc;
857 interpState->currTraceRun = 0;
858 interpState->totalTraceLen = 0;
859 interpState->currRunHead = interpState->pc;
860 interpState->currRunLen = 0;
861 interpState->trace[0].frag.startOffset =
862 interpState->pc - interpState->method->insns;
863 interpState->trace[0].frag.numInsts = 0;
864 interpState->trace[0].frag.runEnd = false;
865 interpState->trace[0].frag.hint = kJitHintNone;
866 break;
867 case kJitTSelect:
868 case kJitTSelectAbort:
869 res = true;
870 case kJitSingleStep:
871 case kJitSingleStepEnd:
872 case kJitOff:
873 case kJitNormal:
Jeff Hao97319a82009-08-12 16:57:15 -0700874#if defined(WITH_SELF_VERIFICATION)
875 case kJitSelfVerification:
876#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700877 break;
878 default:
879 dvmAbort();
880 }
881 }
882 return res;
883}
884
Bill Buzbee27176222009-06-09 09:20:16 -0700885/*
886 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
887 * Stops all threads, and thus is a heavyweight operation.
888 */
889bool dvmJitResizeJitTable( unsigned int size )
890{
Bill Buzbee716f1202009-07-23 13:22:09 -0700891 JitEntry *pNewTable;
892 JitEntry *pOldTable;
Bill Buzbee27176222009-06-09 09:20:16 -0700893 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -0700894 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -0700895 unsigned int i;
896
Ben Cheng3f02aa42009-08-14 13:52:09 -0700897 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -0700898 assert(size && !(size & (size - 1))); /* Is power of 2? */
899
900 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
901
902 newMask = size - 1;
903
904 if (size <= gDvmJit.jitTableSize) {
905 return true;
906 }
907
Bill Buzbee716f1202009-07-23 13:22:09 -0700908 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -0700909 if (pNewTable == NULL) {
910 return true;
911 }
912 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700913 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -0700914 }
915
916 /* Stop all other interpreting/jit'ng threads */
917 dvmSuspendAllThreads(SUSPEND_FOR_JIT);
918
Bill Buzbee716f1202009-07-23 13:22:09 -0700919 pOldTable = gDvmJit.pJitEntryTable;
920 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -0700921
922 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -0700923 gDvmJit.pJitEntryTable = pNewTable;
924 gDvmJit.jitTableSize = size;
925 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -0700926 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -0700927 dvmUnlockMutex(&gDvmJit.tableLock);
928
Bill Buzbee716f1202009-07-23 13:22:09 -0700929 for (i=0; i < oldSize; i++) {
930 if (pOldTable[i].dPC) {
931 JitEntry *p;
932 u2 chain;
933 p = dvmJitLookupAndAdd(pOldTable[i].dPC);
934 p->dPC = pOldTable[i].dPC;
935 /*
936 * Compiler thread may have just updated the new entry's
937 * code address field, so don't blindly copy null.
938 */
939 if (pOldTable[i].codeAddress != NULL) {
940 p->codeAddress = pOldTable[i].codeAddress;
941 }
942 /* We need to preserve the new chain field, but copy the rest */
943 dvmLockMutex(&gDvmJit.tableLock);
944 chain = p->u.info.chain;
945 p->u = pOldTable[i].u;
946 p->u.info.chain = chain;
947 dvmUnlockMutex(&gDvmJit.tableLock);
948 }
949 }
950
951 free(pOldTable);
952
Bill Buzbee27176222009-06-09 09:20:16 -0700953 /* Restart the world */
954 dvmResumeAllThreads(SUSPEND_FOR_JIT);
955
956 return false;
957}
958
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700959/*
960 * Float/double conversion requires clamping to min and max of integer form. If
961 * target doesn't support this normally, use these.
962 */
963s8 dvmJitd2l(double d)
964{
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700965 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
966 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700967 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700968 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700969 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700970 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700971 else if (d != d) // NaN case
972 return 0;
973 else
974 return (s8)d;
975}
976
977s8 dvmJitf2l(float f)
978{
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700979 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
980 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700981 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700982 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700983 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -0700984 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700985 else if (f != f) // NaN case
986 return 0;
987 else
988 return (s8)f;
989}
990
Bill Buzbee27176222009-06-09 09:20:16 -0700991
Ben Chengba4fc8b2009-06-01 13:00:29 -0700992#endif /* WITH_JIT */