blob: 031d46dc9422972c8044509af1234ede7b82835d [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "dexdump/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
32#include <errno.h>
33
34/*
35 * Reset profile counts. Note that we could easily lose
36 * one or more of these write because of threading. Because these
37 * counts are considered hints, absolute correctness is not a
38 * problem and the cost of synchronizing would be prohibitive.
39 * NOTE: Experimental - 5/21/09. Keep rough track of the last
40 * time the counts were reset to allow trace builder to ignore
41 * stale thresholds. This is just a hint, and the only penalty
42 * for getting it wrong is a slight performance hit (far less than
43 * the cost of synchronization).
44 */
45static u8 lastProfileResetTimeUsec;
46static void resetProfileCounts() {
47 int i;
48 unsigned char *pJitProfTable = gDvmJit.pProfTable;
49 lastProfileResetTimeUsec = dvmGetRelativeTimeUsec();
50 if (pJitProfTable != NULL) {
51 for (i=0; i < JIT_PROF_SIZE; i++) {
52 pJitProfTable[i] = gDvmJit.threshold;
53 }
54 }
55}
56
57int dvmJitStartup(void)
58{
59 unsigned int i;
60 bool res = true; /* Assume success */
61
62 // Create the compiler thread and setup miscellaneous chores */
63 res &= dvmCompilerStartup();
64
65 dvmInitMutex(&gDvmJit.tableLock);
66 if (res && gDvm.executionMode == kExecutionModeJit) {
67 struct JitEntry *pJitTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -070068 unsigned char *pJitProfTable = NULL;
Bill Buzbee27176222009-06-09 09:20:16 -070069 assert(gDvm.jitTableSize &&
70 !(gDvm.jitTableSize & (gDvmJit.jitTableSize - 1))); // Power of 2?
Ben Chengba4fc8b2009-06-01 13:00:29 -070071 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -070072 pJitTable = (struct JitEntry*)
73 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
Ben Chengba4fc8b2009-06-01 13:00:29 -070074 if (!pJitTable) {
75 LOGE("jit table allocation failed\n");
76 res = false;
77 goto done;
78 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070079 /*
80 * NOTE: the profile table must only be allocated once, globally.
81 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
82 * and then restoring its original value. However, this action
83 * is not syncronized for speed so threads may continue to hold
84 * and update the profile table after profiling has been turned
85 * off by null'ng the global pointer. Be aware.
86 */
87 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
88 if (!pJitProfTable) {
89 LOGE("jit prof table allocation failed\n");
90 res = false;
91 goto done;
92 }
93 memset(pJitProfTable,0,JIT_PROF_SIZE);
Bill Buzbee27176222009-06-09 09:20:16 -070094 for (i=0; i < gDvmJit.jitTableSize; i++) {
95 pJitTable[i].chain = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -070096 }
97 /* Is chain field wide enough for termination pattern? */
98 assert(pJitTable[0].chain == gDvm.maxJitTableEntries);
99 resetProfileCounts();
100
101done:
102 gDvmJit.pJitEntryTable = pJitTable;
Bill Buzbee27176222009-06-09 09:20:16 -0700103 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
104 gDvmJit.jitTableEntriesUsed = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700105 gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
106 dvmUnlockMutex(&gDvmJit.tableLock);
107 }
108 return res;
109}
110
111/*
112 * If one of our fixed tables or the translation buffer fills up,
113 * call this routine to avoid wasting cycles on future translation requests.
114 */
115void dvmJitStopTranslationRequests()
116{
117 /*
118 * Note 1: This won't necessarily stop all translation requests, and
119 * operates on a delayed mechanism. Running threads look to the copy
120 * of this value in their private InterpState structures and won't see
121 * this change until it is refreshed (which happens on interpreter
122 * entry).
123 * Note 2: This is a one-shot memory leak on this table. Because this is a
124 * permanent off switch for Jit profiling, it is a one-time leak of 1K
125 * bytes, and no further attempt will be made to re-allocate it. Can't
126 * free it because some thread may be holding a reference.
127 */
128 gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
129}
130
131#if defined(EXIT_STATS)
132/* Convenience function to increment counter from assembly code */
133void dvmBumpNoChain()
134{
135 gDvm.jitNoChainExit++;
136}
137
138/* Convenience function to increment counter from assembly code */
139void dvmBumpNormal()
140{
141 gDvm.jitNormalExit++;
142}
143
144/* Convenience function to increment counter from assembly code */
145void dvmBumpPunt(int from)
146{
147 gDvm.jitPuntExit++;
148}
149#endif
150
151/* Dumps debugging & tuning stats to the log */
152void dvmJitStats()
153{
154 int i;
155 int hit;
156 int not_hit;
157 int chains;
158 if (gDvmJit.pJitEntryTable) {
159 for (i=0, chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700160 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700161 i++) {
162 if (gDvmJit.pJitEntryTable[i].dPC != 0)
163 hit++;
164 else
165 not_hit++;
Bill Buzbee27176222009-06-09 09:20:16 -0700166 if (gDvmJit.pJitEntryTable[i].chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700167 chains++;
168 }
169 LOGD(
170 "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
171 hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
172 gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
173#if defined(EXIT_STATS)
174 LOGD(
175 "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
176 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
177 gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
178#endif
179 LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
180#if defined(INVOKE_STATS)
181 LOGD("JIT: Invoke: %d noOpt, %d chainable, %d return",
182 gDvmJit.invokeNoOpt, gDvmJit.invokeChain, gDvmJit.returnOp);
183#endif
184 }
185}
186
187/*
188 * Final JIT shutdown. Only do this once, and do not attempt to restart
189 * the JIT later.
190 */
191void dvmJitShutdown(void)
192{
193 /* Shutdown the compiler thread */
194 dvmCompilerShutdown();
195
196 dvmCompilerDumpStats();
197
198 dvmDestroyMutex(&gDvmJit.tableLock);
199
200 if (gDvmJit.pJitEntryTable) {
201 free(gDvmJit.pJitEntryTable);
202 gDvmJit.pJitEntryTable = NULL;
203 }
204
205 if (gDvmJit.pProfTable) {
206 free(gDvmJit.pProfTable);
207 gDvmJit.pProfTable = NULL;
208 }
209}
210
Ben Chengba4fc8b2009-06-01 13:00:29 -0700211/*
212 * Adds to the current trace request one instruction at a time, just
213 * before that instruction is interpreted. This is the primary trace
214 * selection function. NOTE: return instruction are handled a little
215 * differently. In general, instructions are "proposed" to be added
216 * to the current trace prior to interpretation. If the interpreter
217 * then successfully completes the instruction, is will be considered
218 * part of the request. This allows us to examine machine state prior
219 * to interpretation, and also abort the trace request if the instruction
220 * throws or does something unexpected. However, return instructions
221 * will cause an immediate end to the translation request - which will
222 * be passed to the compiler before the return completes. This is done
223 * in response to special handling of returns by the interpreter (and
224 * because returns cannot throw in a way that causes problems for the
225 * translated code.
226 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700227int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
228{
229 int flags,i,len;
230 int switchInterp = false;
231 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
232#if defined(WITH_PROFILER)
233 || gDvm.activeProfilers
234#endif
235 );
236
237 switch (interpState->jitState) {
238 char* nopStr;
239 int target;
240 int offset;
241 DecodedInstruction decInsn;
242 case kJitTSelect:
243 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
244#if defined(SHOW_TRACE)
245 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
246#endif
247 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
248 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, pc);
249 offset = pc - interpState->method->insns;
250 if ((flags & kInstrNoJit) == kInstrNoJit) {
251 interpState->jitState = kJitTSelectEnd;
252 break;
253 } else {
254 if (pc != interpState->currRunHead + interpState->currRunLen) {
255 int currTraceRun;
256 /* We need to start a new trace run */
257 currTraceRun = ++interpState->currTraceRun;
258 interpState->currRunLen = 0;
259 interpState->currRunHead = (u2*)pc;
260 interpState->trace[currTraceRun].frag.startOffset = offset;
261 interpState->trace[currTraceRun].frag.numInsts = 0;
262 interpState->trace[currTraceRun].frag.runEnd = false;
263 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
264 }
265 interpState->trace[interpState->currTraceRun].frag.numInsts++;
266 interpState->totalTraceLen++;
267 interpState->currRunLen += len;
268 if ( ((flags & kInstrUnconditional) == 0) &&
269 ((flags & (kInstrCanBranch |
270 kInstrCanSwitch |
271 kInstrCanReturn |
272 kInstrInvoke)) != 0)) {
273 interpState->jitState = kJitTSelectEnd;
274#if defined(SHOW_TRACE)
275 LOGD("TraceGen: ending on %s, basic block end",
276 getOpcodeName(decInsn.opCode));
277#endif
278 }
279 if (decInsn.opCode == OP_THROW) {
280 interpState->jitState = kJitTSelectEnd;
281 }
Ben Cheng1efc9c52009-06-08 18:25:27 -0700282 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700283 interpState->jitState = kJitTSelectEnd;
284 }
285 if (debugOrProfile) {
286 interpState->jitState = kJitTSelectAbort;
287 switchInterp = !debugOrProfile;
288 break;
289 }
290 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
291 break;
292 }
293 }
294 /* NOTE: intentional fallthrough for returns */
295 case kJitTSelectEnd:
296 {
297 if (interpState->totalTraceLen == 0) {
298 switchInterp = !debugOrProfile;
299 break;
300 }
301 JitTraceDescription* desc =
302 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
303 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
304 if (desc == NULL) {
305 LOGE("Out of memory in trace selection");
306 dvmJitStopTranslationRequests();
307 interpState->jitState = kJitTSelectAbort;
308 switchInterp = !debugOrProfile;
309 break;
310 }
311 interpState->trace[interpState->currTraceRun].frag.runEnd =
312 true;
313 interpState->jitState = kJitNormal;
314 desc->method = interpState->method;
315 memcpy((char*)&(desc->trace[0]),
316 (char*)&(interpState->trace[0]),
317 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
318#if defined(SHOW_TRACE)
319 LOGD("TraceGen: trace done, adding to queue");
320#endif
321 dvmCompilerWorkEnqueue(
322 interpState->currTraceHead,kWorkOrderTrace,desc);
323 if (gDvmJit.blockingMode) {
324 dvmCompilerDrainQueue();
325 }
326 switchInterp = !debugOrProfile;
327 }
328 break;
329 case kJitSingleStep:
330 interpState->jitState = kJitSingleStepEnd;
331 break;
332 case kJitSingleStepEnd:
333 interpState->entryPoint = kInterpEntryResume;
334 switchInterp = !debugOrProfile;
335 break;
336 case kJitTSelectAbort:
337#if defined(SHOW_TRACE)
338 LOGD("TraceGen: trace abort");
339#endif
340 interpState->jitState = kJitNormal;
341 switchInterp = !debugOrProfile;
342 break;
343 case kJitNormal:
344 break;
345 default:
346 dvmAbort();
347 }
348 return switchInterp;
349}
350
351static inline struct JitEntry *findJitEntry(const u2* pc)
352{
353 int idx = dvmJitHash(pc);
354
355 /* Expect a high hit rate on 1st shot */
356 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
357 return &gDvmJit.pJitEntryTable[idx];
358 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700359 int chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700360 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
361 idx = gDvmJit.pJitEntryTable[idx].chain;
362 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
363 return &gDvmJit.pJitEntryTable[idx];
364 }
365 }
366 return NULL;
367}
368
Bill Buzbee27176222009-06-09 09:20:16 -0700369struct JitEntry *dvmFindJitEntry(const u2* pc)
370{
371 return findJitEntry(pc);
372}
373
374/*
375 * Allocate an entry in a JitTable. Assumes caller holds lock, if
376 * applicable. Normally used for table resizing. Will complain (die)
377 * if entry already exists in the table or if table is full.
378 */
379static struct JitEntry *allocateJitEntry(const u2* pc, struct JitEntry *table,
380 u4 size)
381{
382 struct JitEntry *p;
383 unsigned int idx;
384 unsigned int prev;
385 idx = dvmJitHashMask(pc, size-1);
386 while ((table[idx].chain != size) && (table[idx].dPC != pc)) {
387 idx = table[idx].chain;
388 }
389 assert(table[idx].dPC != pc); /* Already there */
390 if (table[idx].dPC == NULL) {
391 /* use this slot */
392 return &table[idx];
393 }
394 /* Find a free entry and chain it in */
395 prev = idx;
396 while (true) {
397 idx++;
398 if (idx == size)
399 idx = 0; /* Wraparound */
400 if ((table[idx].dPC == NULL) || (idx == prev))
401 break;
402 }
403 assert(idx != prev);
404 table[prev].chain = idx;
405 assert(table[idx].dPC == NULL);
406 return &table[idx];
407}
408
Ben Chengba4fc8b2009-06-01 13:00:29 -0700409/*
410 * If a translated code address exists for the davik byte code
411 * pointer return it. This routine needs to be fast.
412 */
413void* dvmJitGetCodeAddr(const u2* dPC)
414{
415 int idx = dvmJitHash(dPC);
416
Bill Buzbee46cd5b62009-06-05 15:36:06 -0700417 /* If anything is suspended, don't re-enter the code cache */
418 if (gDvm.sumThreadSuspendCount > 0) {
419 return NULL;
420 }
421
Ben Chengba4fc8b2009-06-01 13:00:29 -0700422 /* Expect a high hit rate on 1st shot */
423 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
424#if defined(EXIT_STATS)
425 gDvmJit.addrLookupsFound++;
426#endif
427 return gDvmJit.pJitEntryTable[idx].codeAddress;
428 } else {
Bill Buzbee27176222009-06-09 09:20:16 -0700429 int chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700430 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
431 idx = gDvmJit.pJitEntryTable[idx].chain;
432 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
433#if defined(EXIT_STATS)
434 gDvmJit.addrLookupsFound++;
435#endif
436 return gDvmJit.pJitEntryTable[idx].codeAddress;
437 }
438 }
439 }
440#if defined(EXIT_STATS)
441 gDvmJit.addrLookupsNotFound++;
442#endif
443 return NULL;
444}
445
446/*
447 * Register the translated code pointer into the JitTable.
448 * NOTE: Once a codeAddress field transitions from NULL to
449 * JIT'd code, it must not be altered without first halting all
450 * threads.
451 */
452void dvmJitSetCodeAddr(const u2* dPC, void *nPC) {
453 struct JitEntry *jitEntry = findJitEntry(dPC);
454 assert(jitEntry);
455 /* Thumb code has odd PC */
456 jitEntry->codeAddress = (void *) ((intptr_t) nPC |1);
457}
458
459/*
460 * Determine if valid trace-bulding request is active. Return true
461 * if we need to abort and switch back to the fast interpreter, false
462 * otherwise. NOTE: may be called even when trace selection is not being
463 * requested
464 */
465
466#define PROFILE_STALENESS_THRESHOLD 250000LL
467bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
468{
469 bool res = false; /* Assume success */
470 if (gDvmJit.pJitEntryTable != NULL) {
471 u8 delta = dvmGetRelativeTimeUsec() - lastProfileResetTimeUsec;
472 /*
473 * If the compiler is backlogged, or if a debugger or profiler is
474 * active, cancel any JIT actions
475 */
476 if ( (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
477 gDvm.debuggerActive || self->suspendCount
478#if defined(WITH_PROFILER)
479 || gDvm.activeProfilers
480#endif
481 ) {
482 if (interpState->jitState != kJitOff) {
483 interpState->jitState = kJitNormal;
484 }
485 } else if (delta > PROFILE_STALENESS_THRESHOLD) {
486 resetProfileCounts();
487 res = true; /* Stale profile - abort */
488 } else if (interpState->jitState == kJitTSelectRequest) {
Bill Buzbee27176222009-06-09 09:20:16 -0700489 u4 chainEndMarker = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700490 u4 idx = dvmJitHash(interpState->pc);
491
492 /* Walk the bucket chain to find an exact match for our PC */
493 while ((gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) &&
494 (gDvmJit.pJitEntryTable[idx].dPC != interpState->pc)) {
495 idx = gDvmJit.pJitEntryTable[idx].chain;
496 }
497
498 if (gDvmJit.pJitEntryTable[idx].dPC == interpState->pc) {
499 /*
500 * Got a match. This means a trace has already
501 * been requested for this address. Bail back to
502 * mterp, which will check if the translation is ready
503 * for execution
504 */
505 interpState->jitState = kJitTSelectAbort;
506 } else {
507 /*
508 * No match. Aquire jitTableLock and find the last
509 * slot in the chain. Possibly continue the chain walk in case
510 * some other thread allocated the slot we were looking
511 * at previuosly
512 */
513 dvmLockMutex(&gDvmJit.tableLock);
514 /*
515 * At this point, if .dPC is NULL, then the slot we're
516 * looking at is the target slot from the primary hash
517 * (the simple, and expected case). Otherwise we're going
518 * to have to find a free slot and chain it.
519 */
520 MEM_BARRIER();
521 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
522 u4 prev;
523 while (gDvmJit.pJitEntryTable[idx].chain != chainEndMarker) {
524 idx = gDvmJit.pJitEntryTable[idx].chain;
525 }
526 /* Here, idx should be pointing to the last cell of an
527 * active chain whose last member contains a valid dPC */
528 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
529 /* Now, do a linear walk to find a free cell and add it to
530 * end of this chain */
531 prev = idx;
532 while (true) {
533 idx++;
534 if (idx == chainEndMarker)
535 idx = 0; /* Wraparound */
536 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
537 (idx == prev))
538 break;
539 }
540 if (idx != prev) {
541 /* Got it - chain */
542 gDvmJit.pJitEntryTable[prev].chain = idx;
543 }
544 }
545 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
546 /* Allocate the slot */
547 gDvmJit.pJitEntryTable[idx].dPC = interpState->pc;
Bill Buzbee27176222009-06-09 09:20:16 -0700548 gDvmJit.jitTableEntriesUsed++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700549 } else {
550 /*
551 * Table is full. We could resize it, but that would
552 * be better handled by the translator thread. It
553 * will be aware of how full the table is getting.
554 * Disable further profiling and continue.
555 */
556 interpState->jitState = kJitTSelectAbort;
557 LOGD("JIT: JitTable full, disabling profiling");
558 dvmJitStopTranslationRequests();
559 }
560 dvmUnlockMutex(&gDvmJit.tableLock);
561 }
562 }
563 switch (interpState->jitState) {
564 case kJitTSelectRequest:
565 interpState->jitState = kJitTSelect;
566 interpState->currTraceHead = interpState->pc;
567 interpState->currTraceRun = 0;
568 interpState->totalTraceLen = 0;
569 interpState->currRunHead = interpState->pc;
570 interpState->currRunLen = 0;
571 interpState->trace[0].frag.startOffset =
572 interpState->pc - interpState->method->insns;
573 interpState->trace[0].frag.numInsts = 0;
574 interpState->trace[0].frag.runEnd = false;
575 interpState->trace[0].frag.hint = kJitHintNone;
576 break;
577 case kJitTSelect:
578 case kJitTSelectAbort:
579 res = true;
580 case kJitSingleStep:
581 case kJitSingleStepEnd:
582 case kJitOff:
583 case kJitNormal:
584 break;
585 default:
586 dvmAbort();
587 }
588 }
589 return res;
590}
591
Bill Buzbee27176222009-06-09 09:20:16 -0700592/*
593 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
594 * Stops all threads, and thus is a heavyweight operation.
595 */
596bool dvmJitResizeJitTable( unsigned int size )
597{
598 struct JitEntry *pNewTable;
599 u4 newMask;
600 unsigned int i;
601
602 assert(gDvm.pJitEntryTable != NULL);
603 assert(size && !(size & (size - 1))); /* Is power of 2? */
604
605 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
606
607 newMask = size - 1;
608
609 if (size <= gDvmJit.jitTableSize) {
610 return true;
611 }
612
613 pNewTable = (struct JitEntry*)calloc(size, sizeof(*pNewTable));
614 if (pNewTable == NULL) {
615 return true;
616 }
617 for (i=0; i< size; i++) {
618 pNewTable[i].chain = size; /* Initialize chain termination */
619 }
620
621 /* Stop all other interpreting/jit'ng threads */
622 dvmSuspendAllThreads(SUSPEND_FOR_JIT);
623
624 /*
625 * At this point, only the compiler thread may be in contention
626 * for the jitEntryTable (it is not affected by the thread suspension).
627 * Aquire the lock.
628 */
629
630 dvmLockMutex(&gDvmJit.tableLock);
631
632 for (i=0; i < gDvmJit.jitTableSize; i++) {
633 if (gDvmJit.pJitEntryTable[i].dPC) {
634 struct JitEntry *p;
635 p = allocateJitEntry(gDvmJit.pJitEntryTable[i].dPC,
636 pNewTable, size);
637 p->dPC = gDvmJit.pJitEntryTable[i].dPC;
638 p->codeAddress = gDvmJit.pJitEntryTable[i].codeAddress;
639 }
640 }
641
642 free(gDvmJit.pJitEntryTable);
643 gDvmJit.pJitEntryTable = pNewTable;
644 gDvmJit.jitTableSize = size;
645 gDvmJit.jitTableMask = size - 1;
646
647 dvmUnlockMutex(&gDvmJit.tableLock);
648
649 /* Restart the world */
650 dvmResumeAllThreads(SUSPEND_FOR_JIT);
651
652 return false;
653}
654
655
Ben Chengba4fc8b2009-06-01 13:00:29 -0700656#endif /* WITH_JIT */