blob: 2d6ac5adb95d80f5e829555ca36fe3e02f355785 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "dexdump/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Ben Chengba4fc8b2009-06-01 13:00:29 -070036int dvmJitStartup(void)
37{
38 unsigned int i;
39 bool res = true; /* Assume success */
40
41 // Create the compiler thread and setup miscellaneous chores */
42 res &= dvmCompilerStartup();
43
44 dvmInitMutex(&gDvmJit.tableLock);
45 if (res && gDvm.executionMode == kExecutionModeJit) {
46 struct JitEntry *pJitTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -070047 unsigned char *pJitProfTable = NULL;
Bill Buzbee27176222009-06-09 09:20:16 -070048 assert(gDvm.jitTableSize &&
49 !(gDvm.jitTableSize & (gDvmJit.jitTableSize - 1))); // Power of 2?
Ben Chengba4fc8b2009-06-01 13:00:29 -070050 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -070051 pJitTable = (struct JitEntry*)
52 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
Ben Chengba4fc8b2009-06-01 13:00:29 -070053 if (!pJitTable) {
54 LOGE("jit table allocation failed\n");
55 res = false;
56 goto done;
57 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070058 /*
59 * NOTE: the profile table must only be allocated once, globally.
60 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
61 * and then restoring its original value. However, this action
62 * is not syncronized for speed so threads may continue to hold
63 * and update the profile table after profiling has been turned
64 * off by null'ng the global pointer. Be aware.
65 */
66 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
67 if (!pJitProfTable) {
68 LOGE("jit prof table allocation failed\n");
69 res = false;
70 goto done;
71 }
72 memset(pJitProfTable,0,JIT_PROF_SIZE);
Bill Buzbee27176222009-06-09 09:20:16 -070073 for (i=0; i < gDvmJit.jitTableSize; i++) {
74 pJitTable[i].chain = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -070075 }
76 /* Is chain field wide enough for termination pattern? */
77 assert(pJitTable[0].chain == gDvm.maxJitTableEntries);
Ben Chengba4fc8b2009-06-01 13:00:29 -070078
79done:
80 gDvmJit.pJitEntryTable = pJitTable;
Bill Buzbee27176222009-06-09 09:20:16 -070081 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
82 gDvmJit.jitTableEntriesUsed = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -070083 gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
84 dvmUnlockMutex(&gDvmJit.tableLock);
85 }
86 return res;
87}
88
89/*
90 * If one of our fixed tables or the translation buffer fills up,
91 * call this routine to avoid wasting cycles on future translation requests.
92 */
93void dvmJitStopTranslationRequests()
94{
95 /*
96 * Note 1: This won't necessarily stop all translation requests, and
97 * operates on a delayed mechanism. Running threads look to the copy
98 * of this value in their private InterpState structures and won't see
99 * this change until it is refreshed (which happens on interpreter
100 * entry).
101 * Note 2: This is a one-shot memory leak on this table. Because this is a
102 * permanent off switch for Jit profiling, it is a one-time leak of 1K
103 * bytes, and no further attempt will be made to re-allocate it. Can't
104 * free it because some thread may be holding a reference.
105 */
106 gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
107}
108
109#if defined(EXIT_STATS)
110/* Convenience function to increment counter from assembly code */
111void dvmBumpNoChain()
112{
113 gDvm.jitNoChainExit++;
114}
115
116/* Convenience function to increment counter from assembly code */
117void dvmBumpNormal()
118{
119 gDvm.jitNormalExit++;
120}
121
122/* Convenience function to increment counter from assembly code */
123void dvmBumpPunt(int from)
124{
125 gDvm.jitPuntExit++;
126}
127#endif
128
Ben Chengdfc24792009-07-21 10:22:22 -0700129typedef struct jitProfileAddrToLine {
130 u4 lineNum;
131 u4 bytecodeOffset;
132} jitProfileAddrToLine;
133
134
135/* Callback function to track the bytecode offset/line number relationiship */
136static int addrToLineCb (void *cnxt, u4 bytecodeOffset, u4 lineNum)
137{
138 jitProfileAddrToLine *addrToLine = (jitProfileAddrToLine *) cnxt;
139
140 /* Best match so far for this offset */
141 if (addrToLine->bytecodeOffset >= bytecodeOffset) {
142 addrToLine->lineNum = lineNum;
143 }
Ben Chengdfc24792009-07-21 10:22:22 -0700144 return 0;
145}
146
Bill Buzbee6e963e12009-06-17 16:56:19 -0700147/* Dumps profile info for a single trace */
Bill Buzbee48f18242009-06-19 16:02:27 -0700148int dvmCompilerDumpTraceProfile(struct JitEntry *p)
Bill Buzbee6e963e12009-06-17 16:56:19 -0700149{
150 ChainCellCounts* pCellCounts;
151 char* traceBase;
152 u4* pExecutionCount;
153 u2* pCellOffset;
154 JitTraceDescription *desc;
155 const Method* method;
156
157 /*
158 * The codeAddress field has the low bit set to mark thumb
159 * mode. We need to strip that off before reconstructing the
160 * trace data. See the diagram in Assemble.c for more info
161 * on the trace layout in memory.
162 */
163 traceBase = (char*)p->codeAddress - 7;
164
165 if (p->codeAddress == NULL) {
166 LOGD("TRACEPROFILE 0x%08x 0 NULL 0 0", (int)traceBase);
Bill Buzbee48f18242009-06-19 16:02:27 -0700167 return 0;
Bill Buzbee6e963e12009-06-17 16:56:19 -0700168 }
169
170 pExecutionCount = (u4*) (traceBase);
171 pCellOffset = (u2*) (traceBase + 4);
Ben Chenge80cd942009-07-17 15:54:23 -0700172 pCellCounts = (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
Bill Buzbee6e963e12009-06-17 16:56:19 -0700173 desc = (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
174 method = desc->method;
Ben Chenge80cd942009-07-17 15:54:23 -0700175 char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
Ben Chengdfc24792009-07-21 10:22:22 -0700176 jitProfileAddrToLine addrToLine = {0, desc->trace[0].frag.startOffset};
177
178 /*
179 * We may end up decoding the debug information for the same method
180 * multiple times, but the tradeoff is we don't need to allocate extra
181 * space to store the addr/line mapping. Since this is a debugging feature
182 * and done infrequently so the slower but simpler mechanism should work
183 * just fine.
184 */
185 dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile,
186 dvmGetMethodCode(method),
187 method->clazz->descriptor,
188 method->prototype.protoIdx,
189 method->accessFlags,
190 addrToLineCb, NULL, &addrToLine);
191
192 LOGD("TRACEPROFILE 0x%08x % 10d [%#x(+%d), %d] %s%s;%s",
193 (int)traceBase,
194 *pExecutionCount,
195 desc->trace[0].frag.startOffset,
196 desc->trace[0].frag.numInsts,
197 addrToLine.lineNum,
198 method->clazz->descriptor, method->name, methodDesc);
Ben Chenge80cd942009-07-17 15:54:23 -0700199 free(methodDesc);
Ben Chengdfc24792009-07-21 10:22:22 -0700200
Bill Buzbee48f18242009-06-19 16:02:27 -0700201 return *pExecutionCount;
Bill Buzbee6e963e12009-06-17 16:56:19 -0700202}
203
Ben Chenge80cd942009-07-17 15:54:23 -0700204/* Handy function to retrieve the profile count */
205static inline int getProfileCount(const JitEntry *entry)
206{
207 if (entry->dPC == 0 || entry->codeAddress == 0)
208 return 0;
209 /*
210 * The codeAddress field has the low bit set to mark thumb
211 * mode. We need to strip that off before reconstructing the
212 * trace data. See the diagram in Assemble.c for more info
213 * on the trace layout in memory.
214 */
215 u4 *pExecutionCount = (u4 *) ((char*)entry->codeAddress - 7);
216
217 return *pExecutionCount;
218}
219
220/* qsort callback function */
221static int sortTraceProfileCount(const void *entry1, const void *entry2)
222{
223 const JitEntry *jitEntry1 = entry1;
224 const JitEntry *jitEntry2 = entry2;
225
226 int count1 = getProfileCount(jitEntry1);
227 int count2 = getProfileCount(jitEntry2);
228 return (count1 == count2) ? 0 : ((count1 > count2) ? -1 : 1);
229}
230
231/* Sort the trace profile counts and dump them */
232static void sortAndPrintTraceProfiles()
233{
234 JitEntry *sortedEntries;
235 int numTraces = 0;
236 unsigned long counts = 0;
237 unsigned int i;
238
239 /* Make sure that the table is not changing */
240 dvmLockMutex(&gDvmJit.tableLock);
241
242 /* Sort the entries by descending order */
243 sortedEntries = malloc(sizeof(JitEntry) * gDvmJit.jitTableSize);
244 if (sortedEntries == NULL)
245 goto done;
246 memcpy(sortedEntries, gDvmJit.pJitEntryTable,
247 sizeof(JitEntry) * gDvmJit.jitTableSize);
248 qsort(sortedEntries, gDvmJit.jitTableSize, sizeof(JitEntry),
249 sortTraceProfileCount);
250
251 /* Dump the sorted entries */
252 for (i=0; i < gDvmJit.jitTableSize; i++) {
253 if (sortedEntries[i].dPC != 0) {
254 counts += dvmCompilerDumpTraceProfile(&sortedEntries[i]);
255 numTraces++;
256 }
257 }
258 if (numTraces == 0)
259 numTraces = 1;
260 LOGD("JIT: Average execution count -> %d",(int)(counts / numTraces));
261
262 free(sortedEntries);
263done:
264 dvmUnlockMutex(&gDvmJit.tableLock);
265 return;
266}
267
Ben Chengba4fc8b2009-06-01 13:00:29 -0700268/* Dumps debugging & tuning stats to the log */
269void dvmJitStats()
270{
271 int i;
272 int hit;
273 int not_hit;
274 int chains;
275 if (gDvmJit.pJitEntryTable) {
276 for (i=0, chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700277 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700278 i++) {
279 if (gDvmJit.pJitEntryTable[i].dPC != 0)
280 hit++;
281 else
282 not_hit++;
Bill Buzbee27176222009-06-09 09:20:16 -0700283 if (gDvmJit.pJitEntryTable[i].chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700284 chains++;
285 }
286 LOGD(
287 "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
288 hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
289 gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
290#if defined(EXIT_STATS)
291 LOGD(
292 "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
293 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
294 gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
295#endif
296 LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
297#if defined(INVOKE_STATS)
Ben Cheng38329f52009-07-07 14:19:20 -0700298 LOGD("JIT: Invoke: %d chainable, %d pred. chain, %d native, "
299 "%d return",
300 gDvmJit.invokeChain, gDvmJit.invokePredictedChain,
301 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700302#endif
Ben Chenge80cd942009-07-17 15:54:23 -0700303 if (gDvmJit.profile) {
304 sortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700305 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700306 }
307}
308
309/*
310 * Final JIT shutdown. Only do this once, and do not attempt to restart
311 * the JIT later.
312 */
313void dvmJitShutdown(void)
314{
315 /* Shutdown the compiler thread */
316 dvmCompilerShutdown();
317
318 dvmCompilerDumpStats();
319
320 dvmDestroyMutex(&gDvmJit.tableLock);
321
322 if (gDvmJit.pJitEntryTable) {
323 free(gDvmJit.pJitEntryTable);
324 gDvmJit.pJitEntryTable = NULL;
325 }
326
327 if (gDvmJit.pProfTable) {
328 free(gDvmJit.pProfTable);
329 gDvmJit.pProfTable = NULL;
330 }
331}
332
Ben Chengba4fc8b2009-06-01 13:00:29 -0700333/*
334 * Adds to the current trace request one instruction at a time, just
335 * before that instruction is interpreted. This is the primary trace
336 * selection function. NOTE: return instruction are handled a little
337 * differently. In general, instructions are "proposed" to be added
338 * to the current trace prior to interpretation. If the interpreter
339 * then successfully completes the instruction, is will be considered
340 * part of the request. This allows us to examine machine state prior
341 * to interpretation, and also abort the trace request if the instruction
342 * throws or does something unexpected. However, return instructions
343 * will cause an immediate end to the translation request - which will
344 * be passed to the compiler before the return completes. This is done
345 * in response to special handling of returns by the interpreter (and
346 * because returns cannot throw in a way that causes problems for the
347 * translated code.
348 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700349int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
350{
351 int flags,i,len;
352 int switchInterp = false;
353 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
354#if defined(WITH_PROFILER)
355 || gDvm.activeProfilers
356#endif
357 );
358
359 switch (interpState->jitState) {
360 char* nopStr;
361 int target;
362 int offset;
363 DecodedInstruction decInsn;
364 case kJitTSelect:
365 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
366#if defined(SHOW_TRACE)
367 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
368#endif
369 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
370 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, pc);
371 offset = pc - interpState->method->insns;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700372 if (pc != interpState->currRunHead + interpState->currRunLen) {
373 int currTraceRun;
374 /* We need to start a new trace run */
375 currTraceRun = ++interpState->currTraceRun;
376 interpState->currRunLen = 0;
377 interpState->currRunHead = (u2*)pc;
378 interpState->trace[currTraceRun].frag.startOffset = offset;
379 interpState->trace[currTraceRun].frag.numInsts = 0;
380 interpState->trace[currTraceRun].frag.runEnd = false;
381 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
382 }
383 interpState->trace[interpState->currTraceRun].frag.numInsts++;
384 interpState->totalTraceLen++;
385 interpState->currRunLen += len;
386 if ( ((flags & kInstrUnconditional) == 0) &&
387 ((flags & (kInstrCanBranch |
388 kInstrCanSwitch |
389 kInstrCanReturn |
390 kInstrInvoke)) != 0)) {
391 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700392#if defined(SHOW_TRACE)
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700393 LOGD("TraceGen: ending on %s, basic block end",
394 getOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700395#endif
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700396 }
397 if (decInsn.opCode == OP_THROW) {
398 interpState->jitState = kJitTSelectEnd;
399 }
400 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
401 interpState->jitState = kJitTSelectEnd;
402 }
403 if (debugOrProfile) {
404 interpState->jitState = kJitTSelectAbort;
405 switchInterp = !debugOrProfile;
406 break;
407 }
408 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
409 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700410 }
411 /* NOTE: intentional fallthrough for returns */
412 case kJitTSelectEnd:
413 {
414 if (interpState->totalTraceLen == 0) {
415 switchInterp = !debugOrProfile;
416 break;
417 }
418 JitTraceDescription* desc =
419 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
420 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
421 if (desc == NULL) {
422 LOGE("Out of memory in trace selection");
423 dvmJitStopTranslationRequests();
424 interpState->jitState = kJitTSelectAbort;
425 switchInterp = !debugOrProfile;
426 break;
427 }
428 interpState->trace[interpState->currTraceRun].frag.runEnd =
429 true;
430 interpState->jitState = kJitNormal;
431 desc->method = interpState->method;
432 memcpy((char*)&(desc->trace[0]),
433 (char*)&(interpState->trace[0]),
434 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
435#if defined(SHOW_TRACE)
436 LOGD("TraceGen: trace done, adding to queue");
437#endif
438 dvmCompilerWorkEnqueue(
439 interpState->currTraceHead,kWorkOrderTrace,desc);
440 if (gDvmJit.blockingMode) {
441 dvmCompilerDrainQueue();
442 }
443 switchInterp = !debugOrProfile;
444 }
445 break;
446 case kJitSingleStep:
447 interpState->jitState = kJitSingleStepEnd;
448 break;
449 case kJitSingleStepEnd:
450 interpState->entryPoint = kInterpEntryResume;
451 switchInterp = !debugOrProfile;
452 break;
453 case kJitTSelectAbort:
454#if defined(SHOW_TRACE)
455 LOGD("TraceGen: trace abort");
456#endif
457 interpState->jitState = kJitNormal;
458 switchInterp = !debugOrProfile;
459 break;
460 case kJitNormal:
Ben Cheng38329f52009-07-07 14:19:20 -0700461 switchInterp = !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700462 break;
463 default:
464 dvmAbort();
465 }
466 return switchInterp;
467}
468
469static inline struct JitEntry *findJitEntry(const u2* pc)
470{
471 int idx = dvmJitHash(pc);
472
473 /* Expect a high hit rate on 1st shot */
474 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
475 return &gDvmJit.pJitEntryTable[idx];
476 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700477 int chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700478 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
479 idx = gDvmJit.pJitEntryTable[idx].chain;
480 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
481 return &gDvmJit.pJitEntryTable[idx];
482 }
483 }
484 return NULL;
485}
486
Bill Buzbee27176222009-06-09 09:20:16 -0700487struct JitEntry *dvmFindJitEntry(const u2* pc)
488{
489 return findJitEntry(pc);
490}
491
492/*
493 * Allocate an entry in a JitTable. Assumes caller holds lock, if
494 * applicable. Normally used for table resizing. Will complain (die)
495 * if entry already exists in the table or if table is full.
496 */
497static struct JitEntry *allocateJitEntry(const u2* pc, struct JitEntry *table,
498 u4 size)
499{
500 struct JitEntry *p;
501 unsigned int idx;
502 unsigned int prev;
503 idx = dvmJitHashMask(pc, size-1);
504 while ((table[idx].chain != size) && (table[idx].dPC != pc)) {
505 idx = table[idx].chain;
506 }
507 assert(table[idx].dPC != pc); /* Already there */
508 if (table[idx].dPC == NULL) {
509 /* use this slot */
510 return &table[idx];
511 }
512 /* Find a free entry and chain it in */
513 prev = idx;
514 while (true) {
515 idx++;
516 if (idx == size)
517 idx = 0; /* Wraparound */
518 if ((table[idx].dPC == NULL) || (idx == prev))
519 break;
520 }
521 assert(idx != prev);
522 table[prev].chain = idx;
523 assert(table[idx].dPC == NULL);
524 return &table[idx];
525}
526
Ben Chengba4fc8b2009-06-01 13:00:29 -0700527/*
528 * If a translated code address exists for the davik byte code
529 * pointer return it. This routine needs to be fast.
530 */
531void* dvmJitGetCodeAddr(const u2* dPC)
532{
533 int idx = dvmJitHash(dPC);
534
Bill Buzbee46cd5b62009-06-05 15:36:06 -0700535 /* If anything is suspended, don't re-enter the code cache */
536 if (gDvm.sumThreadSuspendCount > 0) {
537 return NULL;
538 }
539
Ben Chengba4fc8b2009-06-01 13:00:29 -0700540 /* Expect a high hit rate on 1st shot */
541 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
542#if defined(EXIT_STATS)
543 gDvmJit.addrLookupsFound++;
544#endif
545 return gDvmJit.pJitEntryTable[idx].codeAddress;
546 } else {
Bill Buzbee27176222009-06-09 09:20:16 -0700547 int chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700548 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
549 idx = gDvmJit.pJitEntryTable[idx].chain;
550 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
551#if defined(EXIT_STATS)
552 gDvmJit.addrLookupsFound++;
553#endif
554 return gDvmJit.pJitEntryTable[idx].codeAddress;
555 }
556 }
557 }
558#if defined(EXIT_STATS)
559 gDvmJit.addrLookupsNotFound++;
560#endif
561 return NULL;
562}
563
564/*
565 * Register the translated code pointer into the JitTable.
566 * NOTE: Once a codeAddress field transitions from NULL to
567 * JIT'd code, it must not be altered without first halting all
568 * threads.
569 */
570void dvmJitSetCodeAddr(const u2* dPC, void *nPC) {
571 struct JitEntry *jitEntry = findJitEntry(dPC);
572 assert(jitEntry);
573 /* Thumb code has odd PC */
574 jitEntry->codeAddress = (void *) ((intptr_t) nPC |1);
575}
576
577/*
578 * Determine if valid trace-bulding request is active. Return true
579 * if we need to abort and switch back to the fast interpreter, false
580 * otherwise. NOTE: may be called even when trace selection is not being
581 * requested
582 */
583
Bill Buzbee6e963e12009-06-17 16:56:19 -0700584#define PROFILE_STALENESS_THRESHOLD 100000LL
Ben Chengba4fc8b2009-06-01 13:00:29 -0700585bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
586{
Bill Buzbee48f18242009-06-19 16:02:27 -0700587 bool res = false; /* Assume success */
588 int i;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700589 if (gDvmJit.pJitEntryTable != NULL) {
Bill Buzbee48f18242009-06-19 16:02:27 -0700590 /* Two-level filtering scheme */
591 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
592 if (interpState->pc == interpState->threshFilter[i]) {
593 break;
594 }
595 }
596 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
597 /*
598 * Use random replacement policy - otherwise we could miss a large
599 * loop that contains more traces than the size of our filter array.
600 */
601 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
602 interpState->threshFilter[i] = interpState->pc;
603 res = true;
604 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700605 /*
606 * If the compiler is backlogged, or if a debugger or profiler is
607 * active, cancel any JIT actions
608 */
Bill Buzbee48f18242009-06-19 16:02:27 -0700609 if ( res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
Ben Chengba4fc8b2009-06-01 13:00:29 -0700610 gDvm.debuggerActive || self->suspendCount
611#if defined(WITH_PROFILER)
612 || gDvm.activeProfilers
613#endif
614 ) {
615 if (interpState->jitState != kJitOff) {
616 interpState->jitState = kJitNormal;
617 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700618 } else if (interpState->jitState == kJitTSelectRequest) {
Bill Buzbee27176222009-06-09 09:20:16 -0700619 u4 chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700620 u4 idx = dvmJitHash(interpState->pc);
621
622 /* Walk the bucket chain to find an exact match for our PC */
623 while ((gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) &&
624 (gDvmJit.pJitEntryTable[idx].dPC != interpState->pc)) {
625 idx = gDvmJit.pJitEntryTable[idx].chain;
626 }
627
628 if (gDvmJit.pJitEntryTable[idx].dPC == interpState->pc) {
629 /*
630 * Got a match. This means a trace has already
631 * been requested for this address. Bail back to
632 * mterp, which will check if the translation is ready
633 * for execution
634 */
635 interpState->jitState = kJitTSelectAbort;
636 } else {
637 /*
638 * No match. Aquire jitTableLock and find the last
639 * slot in the chain. Possibly continue the chain walk in case
640 * some other thread allocated the slot we were looking
641 * at previuosly
642 */
643 dvmLockMutex(&gDvmJit.tableLock);
644 /*
645 * At this point, if .dPC is NULL, then the slot we're
646 * looking at is the target slot from the primary hash
647 * (the simple, and expected case). Otherwise we're going
648 * to have to find a free slot and chain it.
649 */
650 MEM_BARRIER();
651 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
652 u4 prev;
653 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
654 idx = gDvmJit.pJitEntryTable[idx].chain;
655 }
656 /* Here, idx should be pointing to the last cell of an
657 * active chain whose last member contains a valid dPC */
658 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
659 /* Now, do a linear walk to find a free cell and add it to
660 * end of this chain */
661 prev = idx;
662 while (true) {
663 idx++;
664 if (idx == chainEndMarker)
665 idx = 0; /* Wraparound */
666 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
667 (idx == prev))
668 break;
669 }
670 if (idx != prev) {
671 /* Got it - chain */
672 gDvmJit.pJitEntryTable[prev].chain = idx;
673 }
674 }
675 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
676 /* Allocate the slot */
677 gDvmJit.pJitEntryTable[idx].dPC = interpState->pc;
Bill Buzbee27176222009-06-09 09:20:16 -0700678 gDvmJit.jitTableEntriesUsed++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700679 } else {
680 /*
681 * Table is full. We could resize it, but that would
682 * be better handled by the translator thread. It
683 * will be aware of how full the table is getting.
684 * Disable further profiling and continue.
685 */
686 interpState->jitState = kJitTSelectAbort;
687 LOGD("JIT: JitTable full, disabling profiling");
688 dvmJitStopTranslationRequests();
689 }
690 dvmUnlockMutex(&gDvmJit.tableLock);
691 }
692 }
693 switch (interpState->jitState) {
694 case kJitTSelectRequest:
695 interpState->jitState = kJitTSelect;
696 interpState->currTraceHead = interpState->pc;
697 interpState->currTraceRun = 0;
698 interpState->totalTraceLen = 0;
699 interpState->currRunHead = interpState->pc;
700 interpState->currRunLen = 0;
701 interpState->trace[0].frag.startOffset =
702 interpState->pc - interpState->method->insns;
703 interpState->trace[0].frag.numInsts = 0;
704 interpState->trace[0].frag.runEnd = false;
705 interpState->trace[0].frag.hint = kJitHintNone;
706 break;
707 case kJitTSelect:
708 case kJitTSelectAbort:
709 res = true;
710 case kJitSingleStep:
711 case kJitSingleStepEnd:
712 case kJitOff:
713 case kJitNormal:
714 break;
715 default:
716 dvmAbort();
717 }
718 }
719 return res;
720}
721
Bill Buzbee27176222009-06-09 09:20:16 -0700722/*
723 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
724 * Stops all threads, and thus is a heavyweight operation.
725 */
726bool dvmJitResizeJitTable( unsigned int size )
727{
728 struct JitEntry *pNewTable;
729 u4 newMask;
730 unsigned int i;
731
732 assert(gDvm.pJitEntryTable != NULL);
733 assert(size && !(size & (size - 1))); /* Is power of 2? */
734
735 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
736
737 newMask = size - 1;
738
739 if (size <= gDvmJit.jitTableSize) {
740 return true;
741 }
742
743 pNewTable = (struct JitEntry*)calloc(size, sizeof(*pNewTable));
744 if (pNewTable == NULL) {
745 return true;
746 }
747 for (i=0; i< size; i++) {
748 pNewTable[i].chain = size; /* Initialize chain termination */
749 }
750
751 /* Stop all other interpreting/jit'ng threads */
752 dvmSuspendAllThreads(SUSPEND_FOR_JIT);
753
754 /*
755 * At this point, only the compiler thread may be in contention
756 * for the jitEntryTable (it is not affected by the thread suspension).
757 * Aquire the lock.
758 */
759
760 dvmLockMutex(&gDvmJit.tableLock);
761
762 for (i=0; i < gDvmJit.jitTableSize; i++) {
763 if (gDvmJit.pJitEntryTable[i].dPC) {
764 struct JitEntry *p;
765 p = allocateJitEntry(gDvmJit.pJitEntryTable[i].dPC,
766 pNewTable, size);
767 p->dPC = gDvmJit.pJitEntryTable[i].dPC;
768 p->codeAddress = gDvmJit.pJitEntryTable[i].codeAddress;
769 }
770 }
771
772 free(gDvmJit.pJitEntryTable);
773 gDvmJit.pJitEntryTable = pNewTable;
774 gDvmJit.jitTableSize = size;
775 gDvmJit.jitTableMask = size - 1;
776
777 dvmUnlockMutex(&gDvmJit.tableLock);
778
779 /* Restart the world */
780 dvmResumeAllThreads(SUSPEND_FOR_JIT);
781
782 return false;
783}
784
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700785/*
786 * Float/double conversion requires clamping to min and max of integer form. If
787 * target doesn't support this normally, use these.
788 */
789s8 dvmJitd2l(double d)
790{
791 static const double kMaxLong = (double)0x7fffffffffffffffULL;
792 static const double kMinLong = (double)0x8000000000000000ULL;
793 if (d >= kMaxLong)
794 return 0x7fffffffffffffffULL;
795 else if (d <= kMinLong)
796 return 0x8000000000000000ULL;
797 else if (d != d) // NaN case
798 return 0;
799 else
800 return (s8)d;
801}
802
803s8 dvmJitf2l(float f)
804{
805 static const float kMaxLong = (float)0x7fffffffffffffffULL;
806 static const float kMinLong = (float)0x8000000000000000ULL;
807 if (f >= kMaxLong)
808 return 0x7fffffffffffffffULL;
809 else if (f <= kMinLong)
810 return 0x8000000000000000ULL;
811 else if (f != f) // NaN case
812 return 0;
813 else
814 return (s8)f;
815}
816
Bill Buzbee27176222009-06-09 09:20:16 -0700817
Ben Chengba4fc8b2009-06-01 13:00:29 -0700818#endif /* WITH_JIT */