blob: 68137a090b89ff135340739c9042ffab4f16275d [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "dexdump/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
63 void* interpStatePtr)
64{
65 Thread *self = dvmThreadSelf();
66 ShadowSpace *shadowSpace = self->shadowSpace;
67 InterpState *interpState = (InterpState *) interpStatePtr;
68 int preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
69 int postBytes = interpState->method->registersSize*4;
70
71 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
72 // self->threadId, (int)pc, (int)fp);
73
74 if (shadowSpace->selfVerificationState != kSVSIdle) {
75 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
76 self->threadId, shadowSpace->selfVerificationState);
77 LOGD("********** SHADOW STATE DUMP **********");
78 LOGD("* PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
79 }
80 shadowSpace->selfVerificationState = kSVSStart;
81
82 // Dynamically grow shadow register space if necessary
83 while (preBytes + postBytes > shadowSpace->registerSpaceSize) {
84 shadowSpace->registerSpaceSize *= 2;
85 free(shadowSpace->registerSpace);
86 shadowSpace->registerSpace =
87 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(int));
88 }
89
90 // Remember original state
91 shadowSpace->startPC = pc;
92 shadowSpace->fp = fp;
93 shadowSpace->glue = interpStatePtr;
94 shadowSpace->shadowFP = shadowSpace->registerSpace +
95 shadowSpace->registerSpaceSize - postBytes/4;
96
97 // Create a copy of the InterpState
98 memcpy(&(shadowSpace->interpState), interpStatePtr, sizeof(InterpState));
99 shadowSpace->interpState.fp = shadowSpace->shadowFP;
100 shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
101
102 // Create a copy of the stack
103 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
104 preBytes+postBytes);
105
106 // Setup the shadowed heap space
107 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
108
109 // Reset trace length
110 shadowSpace->traceLength = 0;
111
112 return shadowSpace;
113}
114
115/*
116 * Save ending PC, FP and compiled code exit point to shadow space.
117 * Return a pointer to the shadow space for JIT to restore state.
118 */
119void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
120 SelfVerificationState exitPoint)
121{
122 Thread *self = dvmThreadSelf();
123 ShadowSpace *shadowSpace = self->shadowSpace;
124 shadowSpace->endPC = pc;
125 shadowSpace->endShadowFP = fp;
126
127 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
128 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
129 // (int)pc);
130
131 if (shadowSpace->selfVerificationState != kSVSStart) {
132 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
133 self->threadId, shadowSpace->selfVerificationState);
134 LOGD("********** SHADOW STATE DUMP **********");
135 LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
136 (int)shadowSpace->endPC);
137 LOGD("* Interp FP: 0x%x", (int)shadowSpace->fp);
138 LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
139 (int)shadowSpace->endShadowFP);
140 }
141
142 // Special case when punting after a single instruction
143 if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
144 shadowSpace->selfVerificationState = kSVSIdle;
145 } else {
146 shadowSpace->selfVerificationState = exitPoint;
147 }
148
149 return shadowSpace;
150}
151
152/* Print contents of virtual registers */
153static void selfVerificationPrintRegisters(int* addr, int numWords)
154{
155 int i;
156 for (i = 0; i < numWords; i++) {
157 LOGD("* 0x%x: (v%d) 0x%8x", (int)(addr+i), i, *(addr+i));
158 }
159}
160
161/* Print values maintained in shadowSpace */
162static void selfVerificationDumpState(const u2* pc, Thread* self)
163{
164 ShadowSpace* shadowSpace = self->shadowSpace;
165 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
166 int frameBytes = (int) shadowSpace->registerSpace +
167 shadowSpace->registerSpaceSize*4 -
168 (int) shadowSpace->shadowFP;
169 int localRegs = 0;
170 int frameBytes2 = 0;
171 if (self->curFrame < shadowSpace->fp) {
172 localRegs = (stackSave->method->registersSize -
173 stackSave->method->insSize)*4;
174 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
175 }
176 LOGD("********** SHADOW STATE DUMP **********");
177 LOGD("* CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
178 (int)(pc - stackSave->method->insns));
179 LOGD("* Class: %s Method: %s", stackSave->method->clazz->descriptor,
180 stackSave->method->name);
181 LOGD("* Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
182 (int)shadowSpace->endPC);
183 LOGD("* Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
184 (int)self->curFrame);
185 LOGD("* Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
186 (int)shadowSpace->endShadowFP);
187 LOGD("* Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
188 localRegs, frameBytes2);
189 LOGD("* Trace length: %d State: %d", shadowSpace->traceLength,
190 shadowSpace->selfVerificationState);
191}
192
193/* Print decoded instructions in the current trace */
194static void selfVerificationDumpTrace(const u2* pc, Thread* self)
195{
196 ShadowSpace* shadowSpace = self->shadowSpace;
197 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700198 int i, addr, offset;
199 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700200
201 LOGD("********** SHADOW TRACE DUMP **********");
202 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700203 addr = shadowSpace->trace[i].addr;
204 offset = (int)((u2*)addr - stackSave->method->insns);
205 decInsn = &(shadowSpace->trace[i].decInsn);
206 /* Not properly decoding instruction, some registers may be garbage */
207 LOGD("* 0x%x: (0x%04x) %s v%d, v%d, v%d", addr, offset,
208 getOpcodeName(decInsn->opCode), decInsn->vA, decInsn->vB,
209 decInsn->vC);
Jeff Hao97319a82009-08-12 16:57:15 -0700210 }
211}
212
Ben Chengbcdc1de2009-08-21 16:18:46 -0700213/* Code is forced into this spin loop when a divergence is detected */
214static void selfVerificationSpinLoop()
215{
216 gDvmJit.selfVerificationSpin = true;
217 while(gDvmJit.selfVerificationSpin) sleep(10);
218}
219
Jeff Hao97319a82009-08-12 16:57:15 -0700220/* Manage self verification while in the debug interpreter */
221static bool selfVerificationDebugInterp(const u2* pc, Thread* self)
222{
223 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700224 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700225
226 DecodedInstruction decInsn;
227 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
228
Jeff Hao97319a82009-08-12 16:57:15 -0700229 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
230 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Ben Chengbcdc1de2009-08-21 16:18:46 -0700231 // shadowSpace->traceLength, getOpcodeName(decInsn.opCode));
Jeff Hao97319a82009-08-12 16:57:15 -0700232
233 if (state == kSVSIdle || state == kSVSStart) {
234 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
235 self->threadId, state);
236 selfVerificationDumpState(pc, self);
237 selfVerificationDumpTrace(pc, self);
238 }
239
240 /* Skip endPC once when trace has a backward branch */
241 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
242 state != kSVSBackwardBranch) {
243 shadowSpace->selfVerificationState = kSVSDebugInterp;
244 }
245
246 /* Check that the current pc is the end of the trace */
247 if ((state == kSVSSingleStep || state == kSVSDebugInterp) &&
248 pc == shadowSpace->endPC) {
249
250 shadowSpace->selfVerificationState = kSVSIdle;
251
252 /* Check register space */
253 int frameBytes = (int) shadowSpace->registerSpace +
254 shadowSpace->registerSpaceSize*4 -
255 (int) shadowSpace->shadowFP;
256 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
257 LOGD("~~~ DbgIntp(%d): REGISTERS UNEQUAL!", self->threadId);
258 selfVerificationDumpState(pc, self);
259 selfVerificationDumpTrace(pc, self);
260 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
261 (int)shadowSpace->fp, frameBytes);
262 selfVerificationPrintRegisters((int*)shadowSpace->fp, frameBytes/4);
263 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
264 (int)shadowSpace->shadowFP, frameBytes);
265 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
266 frameBytes/4);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700267 selfVerificationSpinLoop();
Jeff Hao97319a82009-08-12 16:57:15 -0700268 }
269 /* Check new frame if it exists (invokes only) */
270 if (self->curFrame < shadowSpace->fp) {
271 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
272 int localRegs = (stackSave->method->registersSize -
273 stackSave->method->insSize)*4;
274 int frameBytes2 = (int) shadowSpace->fp -
275 (int) self->curFrame - localRegs;
276 if (memcmp(((char*)self->curFrame)+localRegs,
277 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
278 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) UNEQUAL!",
279 self->threadId);
280 selfVerificationDumpState(pc, self);
281 selfVerificationDumpTrace(pc, self);
282 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
283 (int)self->curFrame, localRegs, frameBytes2);
284 selfVerificationPrintRegisters((int*)self->curFrame,
285 (frameBytes2+localRegs)/4);
286 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
287 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
288 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
289 (frameBytes2+localRegs)/4);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700290 selfVerificationSpinLoop();
Jeff Hao97319a82009-08-12 16:57:15 -0700291 }
292 }
293
294 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700295 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700296 ShadowHeap* heapSpacePtr;
297 for (heapSpacePtr = shadowSpace->heapSpace;
298 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700299 int memData = *((unsigned int*) heapSpacePtr->addr);
300 if (heapSpacePtr->data != memData) {
Jeff Hao97319a82009-08-12 16:57:15 -0700301 LOGD("~~~ DbgIntp(%d): MEMORY UNEQUAL!", self->threadId);
302 LOGD("* Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700303 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700304 selfVerificationDumpState(pc, self);
305 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700306 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700307 }
308 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700309 if (memDiff) selfVerificationSpinLoop();
Jeff Hao97319a82009-08-12 16:57:15 -0700310 return true;
311
312 /* If end not been reached, make sure max length not exceeded */
313 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
314 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
315 LOGD("* startPC: 0x%x endPC: 0x%x currPC: 0x%x",
316 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
317 selfVerificationDumpState(pc, self);
318 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700319 selfVerificationSpinLoop();
Jeff Hao97319a82009-08-12 16:57:15 -0700320
321 return true;
322 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700323 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700324 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700325 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700326 shadowSpace->traceLength++;
327
328 return false;
329}
330#endif
331
Ben Chengba4fc8b2009-06-01 13:00:29 -0700332int dvmJitStartup(void)
333{
334 unsigned int i;
335 bool res = true; /* Assume success */
336
337 // Create the compiler thread and setup miscellaneous chores */
338 res &= dvmCompilerStartup();
339
340 dvmInitMutex(&gDvmJit.tableLock);
341 if (res && gDvm.executionMode == kExecutionModeJit) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700342 JitEntry *pJitTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700343 unsigned char *pJitProfTable = NULL;
Ben Cheng3f02aa42009-08-14 13:52:09 -0700344 // Power of 2?
345 assert(gDvmJit.jitTableSize &&
346 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700347 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -0700348 pJitTable = (JitEntry*)
Bill Buzbee27176222009-06-09 09:20:16 -0700349 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700350 if (!pJitTable) {
351 LOGE("jit table allocation failed\n");
352 res = false;
353 goto done;
354 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700355 /*
356 * NOTE: the profile table must only be allocated once, globally.
357 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
358 * and then restoring its original value. However, this action
359 * is not syncronized for speed so threads may continue to hold
360 * and update the profile table after profiling has been turned
361 * off by null'ng the global pointer. Be aware.
362 */
363 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
364 if (!pJitProfTable) {
365 LOGE("jit prof table allocation failed\n");
366 res = false;
367 goto done;
368 }
369 memset(pJitProfTable,0,JIT_PROF_SIZE);
Bill Buzbee27176222009-06-09 09:20:16 -0700370 for (i=0; i < gDvmJit.jitTableSize; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700371 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700372 }
373 /* Is chain field wide enough for termination pattern? */
Ben Cheng3f02aa42009-08-14 13:52:09 -0700374 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700375
376done:
377 gDvmJit.pJitEntryTable = pJitTable;
Bill Buzbee27176222009-06-09 09:20:16 -0700378 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
379 gDvmJit.jitTableEntriesUsed = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700380 gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
381 dvmUnlockMutex(&gDvmJit.tableLock);
382 }
383 return res;
384}
385
386/*
387 * If one of our fixed tables or the translation buffer fills up,
388 * call this routine to avoid wasting cycles on future translation requests.
389 */
390void dvmJitStopTranslationRequests()
391{
392 /*
393 * Note 1: This won't necessarily stop all translation requests, and
394 * operates on a delayed mechanism. Running threads look to the copy
395 * of this value in their private InterpState structures and won't see
396 * this change until it is refreshed (which happens on interpreter
397 * entry).
398 * Note 2: This is a one-shot memory leak on this table. Because this is a
399 * permanent off switch for Jit profiling, it is a one-time leak of 1K
400 * bytes, and no further attempt will be made to re-allocate it. Can't
401 * free it because some thread may be holding a reference.
402 */
403 gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
404}
405
406#if defined(EXIT_STATS)
407/* Convenience function to increment counter from assembly code */
408void dvmBumpNoChain()
409{
410 gDvm.jitNoChainExit++;
411}
412
413/* Convenience function to increment counter from assembly code */
414void dvmBumpNormal()
415{
416 gDvm.jitNormalExit++;
417}
418
419/* Convenience function to increment counter from assembly code */
420void dvmBumpPunt(int from)
421{
422 gDvm.jitPuntExit++;
423}
424#endif
425
426/* Dumps debugging & tuning stats to the log */
427void dvmJitStats()
428{
429 int i;
430 int hit;
431 int not_hit;
432 int chains;
433 if (gDvmJit.pJitEntryTable) {
434 for (i=0, chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700435 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700436 i++) {
437 if (gDvmJit.pJitEntryTable[i].dPC != 0)
438 hit++;
439 else
440 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700441 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700442 chains++;
443 }
444 LOGD(
445 "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
446 hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
447 gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
448#if defined(EXIT_STATS)
449 LOGD(
450 "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
451 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
452 gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
453#endif
454 LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
455#if defined(INVOKE_STATS)
Ben Cheng38329f52009-07-07 14:19:20 -0700456 LOGD("JIT: Invoke: %d chainable, %d pred. chain, %d native, "
457 "%d return",
458 gDvmJit.invokeChain, gDvmJit.invokePredictedChain,
459 gDvmJit.invokeNative, gDvmJit.returnOp);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700460#endif
Ben Chenge80cd942009-07-17 15:54:23 -0700461 if (gDvmJit.profile) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700462 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700463 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700464 }
465}
466
Bill Buzbee716f1202009-07-23 13:22:09 -0700467
Ben Chengba4fc8b2009-06-01 13:00:29 -0700468/*
469 * Final JIT shutdown. Only do this once, and do not attempt to restart
470 * the JIT later.
471 */
472void dvmJitShutdown(void)
473{
474 /* Shutdown the compiler thread */
475 dvmCompilerShutdown();
476
477 dvmCompilerDumpStats();
478
479 dvmDestroyMutex(&gDvmJit.tableLock);
480
481 if (gDvmJit.pJitEntryTable) {
482 free(gDvmJit.pJitEntryTable);
483 gDvmJit.pJitEntryTable = NULL;
484 }
485
486 if (gDvmJit.pProfTable) {
487 free(gDvmJit.pProfTable);
488 gDvmJit.pProfTable = NULL;
489 }
490}
491
Ben Chengba4fc8b2009-06-01 13:00:29 -0700492/*
493 * Adds to the current trace request one instruction at a time, just
494 * before that instruction is interpreted. This is the primary trace
495 * selection function. NOTE: return instruction are handled a little
496 * differently. In general, instructions are "proposed" to be added
497 * to the current trace prior to interpretation. If the interpreter
498 * then successfully completes the instruction, is will be considered
499 * part of the request. This allows us to examine machine state prior
500 * to interpretation, and also abort the trace request if the instruction
501 * throws or does something unexpected. However, return instructions
502 * will cause an immediate end to the translation request - which will
503 * be passed to the compiler before the return completes. This is done
504 * in response to special handling of returns by the interpreter (and
505 * because returns cannot throw in a way that causes problems for the
506 * translated code.
507 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700508int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
509{
510 int flags,i,len;
511 int switchInterp = false;
512 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
513#if defined(WITH_PROFILER)
514 || gDvm.activeProfilers
515#endif
516 );
Ben Cheng79d173c2009-09-29 16:12:51 -0700517 /* Prepare to handle last PC and stage the current PC */
518 const u2 *lastPC = interpState->lastPC;
519 interpState->lastPC = pc;
520
Ben Chengba4fc8b2009-06-01 13:00:29 -0700521 switch (interpState->jitState) {
522 char* nopStr;
523 int target;
524 int offset;
525 DecodedInstruction decInsn;
526 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700527 /* First instruction - just remember the PC and exit */
528 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700529 /* Grow the trace around the last PC if jitState is kJitTSelect */
530 dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700531#if defined(SHOW_TRACE)
532 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
533#endif
534 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
Ben Cheng79d173c2009-09-29 16:12:51 -0700535 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, lastPC);
536 offset = lastPC - interpState->method->insns;
537 assert((unsigned) offset <
538 dvmGetMethodInsnsSize(interpState->method));
539 if (lastPC != interpState->currRunHead + interpState->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700540 int currTraceRun;
541 /* We need to start a new trace run */
542 currTraceRun = ++interpState->currTraceRun;
543 interpState->currRunLen = 0;
Ben Cheng79d173c2009-09-29 16:12:51 -0700544 interpState->currRunHead = (u2*)lastPC;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700545 interpState->trace[currTraceRun].frag.startOffset = offset;
546 interpState->trace[currTraceRun].frag.numInsts = 0;
547 interpState->trace[currTraceRun].frag.runEnd = false;
548 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
549 }
550 interpState->trace[interpState->currTraceRun].frag.numInsts++;
551 interpState->totalTraceLen++;
552 interpState->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700553
554 /* Will probably never hit this with the current trace buildier */
555 if (interpState->currTraceRun == (MAX_JIT_RUN_LEN - 1)) {
556 interpState->jitState = kJitTSelectEnd;
557 }
558
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700559 if ( ((flags & kInstrUnconditional) == 0) &&
Bill Buzbeef4ce16f2009-07-28 13:28:25 -0700560 /* don't end trace on INVOKE_DIRECT_EMPTY */
561 (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700562 ((flags & (kInstrCanBranch |
563 kInstrCanSwitch |
564 kInstrCanReturn |
565 kInstrInvoke)) != 0)) {
566 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700567#if defined(SHOW_TRACE)
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700568 LOGD("TraceGen: ending on %s, basic block end",
569 getOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700570#endif
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700571 }
572 if (decInsn.opCode == OP_THROW) {
573 interpState->jitState = kJitTSelectEnd;
574 }
575 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
576 interpState->jitState = kJitTSelectEnd;
577 }
578 if (debugOrProfile) {
579 interpState->jitState = kJitTSelectAbort;
580 switchInterp = !debugOrProfile;
581 break;
582 }
583 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
584 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700585 }
586 /* NOTE: intentional fallthrough for returns */
587 case kJitTSelectEnd:
588 {
589 if (interpState->totalTraceLen == 0) {
590 switchInterp = !debugOrProfile;
591 break;
592 }
593 JitTraceDescription* desc =
594 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
595 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
596 if (desc == NULL) {
597 LOGE("Out of memory in trace selection");
598 dvmJitStopTranslationRequests();
599 interpState->jitState = kJitTSelectAbort;
600 switchInterp = !debugOrProfile;
601 break;
602 }
603 interpState->trace[interpState->currTraceRun].frag.runEnd =
604 true;
605 interpState->jitState = kJitNormal;
606 desc->method = interpState->method;
607 memcpy((char*)&(desc->trace[0]),
608 (char*)&(interpState->trace[0]),
609 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
610#if defined(SHOW_TRACE)
611 LOGD("TraceGen: trace done, adding to queue");
612#endif
613 dvmCompilerWorkEnqueue(
614 interpState->currTraceHead,kWorkOrderTrace,desc);
615 if (gDvmJit.blockingMode) {
616 dvmCompilerDrainQueue();
617 }
618 switchInterp = !debugOrProfile;
619 }
620 break;
621 case kJitSingleStep:
622 interpState->jitState = kJitSingleStepEnd;
623 break;
624 case kJitSingleStepEnd:
625 interpState->entryPoint = kInterpEntryResume;
626 switchInterp = !debugOrProfile;
627 break;
628 case kJitTSelectAbort:
629#if defined(SHOW_TRACE)
630 LOGD("TraceGen: trace abort");
631#endif
632 interpState->jitState = kJitNormal;
633 switchInterp = !debugOrProfile;
634 break;
635 case kJitNormal:
Ben Cheng38329f52009-07-07 14:19:20 -0700636 switchInterp = !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700637 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700638#if defined(WITH_SELF_VERIFICATION)
639 case kJitSelfVerification:
640 if (selfVerificationDebugInterp(pc, self)) {
641 interpState->jitState = kJitNormal;
642 switchInterp = !debugOrProfile;
643 }
644 break;
645#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700646 default:
Ben Cheng9c147b82009-10-07 16:41:46 -0700647 if (!debugOrProfile) {
648 LOGE("Unexpected JIT state: %d", interpState->jitState);
649 dvmAbort();
650 }
651 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700652 }
653 return switchInterp;
654}
655
Bill Buzbee716f1202009-07-23 13:22:09 -0700656static inline JitEntry *findJitEntry(const u2* pc)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700657{
658 int idx = dvmJitHash(pc);
659
660 /* Expect a high hit rate on 1st shot */
661 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
662 return &gDvmJit.pJitEntryTable[idx];
663 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700664 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700665 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
666 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700667 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
668 return &gDvmJit.pJitEntryTable[idx];
669 }
670 }
671 return NULL;
672}
673
Bill Buzbee716f1202009-07-23 13:22:09 -0700674JitEntry *dvmFindJitEntry(const u2* pc)
Bill Buzbee27176222009-06-09 09:20:16 -0700675{
676 return findJitEntry(pc);
677}
678
679/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700680 * If a translated code address exists for the davik byte code
681 * pointer return it. This routine needs to be fast.
682 */
683void* dvmJitGetCodeAddr(const u2* dPC)
684{
685 int idx = dvmJitHash(dPC);
686
Bill Buzbee46cd5b62009-06-05 15:36:06 -0700687 /* If anything is suspended, don't re-enter the code cache */
688 if (gDvm.sumThreadSuspendCount > 0) {
689 return NULL;
690 }
691
Ben Chengba4fc8b2009-06-01 13:00:29 -0700692 /* Expect a high hit rate on 1st shot */
693 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
694#if defined(EXIT_STATS)
695 gDvmJit.addrLookupsFound++;
696#endif
697 return gDvmJit.pJitEntryTable[idx].codeAddress;
698 } else {
Bill Buzbee27176222009-06-09 09:20:16 -0700699 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700700 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
701 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700702 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
703#if defined(EXIT_STATS)
704 gDvmJit.addrLookupsFound++;
705#endif
706 return gDvmJit.pJitEntryTable[idx].codeAddress;
707 }
708 }
709 }
710#if defined(EXIT_STATS)
711 gDvmJit.addrLookupsNotFound++;
712#endif
713 return NULL;
714}
715
716/*
Bill Buzbee716f1202009-07-23 13:22:09 -0700717 * Find an entry in the JitTable, creating if necessary.
718 * Returns null if table is full.
719 */
720JitEntry *dvmJitLookupAndAdd(const u2* dPC)
721{
722 u4 chainEndMarker = gDvmJit.jitTableSize;
723 u4 idx = dvmJitHash(dPC);
724
725 /* Walk the bucket chain to find an exact match for our PC */
726 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
727 (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
728 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
729 }
730
731 if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
732 /*
733 * No match. Aquire jitTableLock and find the last
734 * slot in the chain. Possibly continue the chain walk in case
735 * some other thread allocated the slot we were looking
736 * at previuosly (perhaps even the dPC we're trying to enter).
737 */
738 dvmLockMutex(&gDvmJit.tableLock);
739 /*
740 * At this point, if .dPC is NULL, then the slot we're
741 * looking at is the target slot from the primary hash
742 * (the simple, and common case). Otherwise we're going
743 * to have to find a free slot and chain it.
744 */
745 MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
746 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
747 u4 prev;
748 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
749 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
750 /* Another thread got there first for this dPC */
751 dvmUnlockMutex(&gDvmJit.tableLock);
752 return &gDvmJit.pJitEntryTable[idx];
753 }
754 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
755 }
756 /* Here, idx should be pointing to the last cell of an
757 * active chain whose last member contains a valid dPC */
758 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
759 /* Linear walk to find a free cell and add it to the end */
760 prev = idx;
761 while (true) {
762 idx++;
763 if (idx == chainEndMarker)
764 idx = 0; /* Wraparound */
765 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
766 (idx == prev))
767 break;
768 }
769 if (idx != prev) {
770 JitEntryInfoUnion oldValue;
771 JitEntryInfoUnion newValue;
772 /*
773 * Although we hold the lock so that noone else will
774 * be trying to update a chain field, the other fields
775 * packed into the word may be in use by other threads.
776 */
777 do {
778 oldValue = gDvmJit.pJitEntryTable[prev].u;
779 newValue = oldValue;
780 newValue.info.chain = idx;
781 } while (!ATOMIC_CMP_SWAP(
782 &gDvmJit.pJitEntryTable[prev].u.infoWord,
783 oldValue.infoWord, newValue.infoWord));
784 }
785 }
786 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
787 /* Allocate the slot */
788 gDvmJit.pJitEntryTable[idx].dPC = dPC;
789 gDvmJit.jitTableEntriesUsed++;
790 } else {
791 /* Table is full */
792 idx = chainEndMarker;
793 }
794 dvmUnlockMutex(&gDvmJit.tableLock);
795 }
796 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
797}
798/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700799 * Register the translated code pointer into the JitTable.
800 * NOTE: Once a codeAddress field transitions from NULL to
801 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -0700802 * threads. This routine should only be called by the compiler
803 * thread.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700804 */
Bill Buzbee716f1202009-07-23 13:22:09 -0700805void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
806 JitEntryInfoUnion oldValue;
807 JitEntryInfoUnion newValue;
808 JitEntry *jitEntry = dvmJitLookupAndAdd(dPC);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700809 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -0700810 /* Note: order of update is important */
811 do {
812 oldValue = jitEntry->u;
813 newValue = oldValue;
814 newValue.info.instructionSet = set;
815 } while (!ATOMIC_CMP_SWAP(
816 &jitEntry->u.infoWord,
817 oldValue.infoWord, newValue.infoWord));
818 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700819}
820
821/*
822 * Determine if valid trace-bulding request is active. Return true
823 * if we need to abort and switch back to the fast interpreter, false
824 * otherwise. NOTE: may be called even when trace selection is not being
825 * requested
826 */
827
Ben Chengba4fc8b2009-06-01 13:00:29 -0700828bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
829{
Bill Buzbee48f18242009-06-19 16:02:27 -0700830 bool res = false; /* Assume success */
831 int i;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700832 if (gDvmJit.pJitEntryTable != NULL) {
Bill Buzbee48f18242009-06-19 16:02:27 -0700833 /* Two-level filtering scheme */
834 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
835 if (interpState->pc == interpState->threshFilter[i]) {
836 break;
837 }
838 }
839 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
840 /*
841 * Use random replacement policy - otherwise we could miss a large
842 * loop that contains more traces than the size of our filter array.
843 */
844 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
845 interpState->threshFilter[i] = interpState->pc;
846 res = true;
847 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700848 /*
849 * If the compiler is backlogged, or if a debugger or profiler is
850 * active, cancel any JIT actions
851 */
Bill Buzbee48f18242009-06-19 16:02:27 -0700852 if ( res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
Ben Chengba4fc8b2009-06-01 13:00:29 -0700853 gDvm.debuggerActive || self->suspendCount
854#if defined(WITH_PROFILER)
855 || gDvm.activeProfilers
856#endif
857 ) {
858 if (interpState->jitState != kJitOff) {
859 interpState->jitState = kJitNormal;
860 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700861 } else if (interpState->jitState == kJitTSelectRequest) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700862 JitEntry *slot = dvmJitLookupAndAdd(interpState->pc);
863 if (slot == NULL) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700864 /*
Bill Buzbee716f1202009-07-23 13:22:09 -0700865 * Table is full. This should have been
866 * detected by the compiler thread and the table
867 * resized before we run into it here. Assume bad things
868 * are afoot and disable profiling.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700869 */
870 interpState->jitState = kJitTSelectAbort;
Bill Buzbee716f1202009-07-23 13:22:09 -0700871 LOGD("JIT: JitTable full, disabling profiling");
872 dvmJitStopTranslationRequests();
873 } else if (slot->u.info.traceRequested) {
874 /* Trace already requested - revert to interpreter */
875 interpState->jitState = kJitTSelectAbort;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700876 } else {
Bill Buzbee716f1202009-07-23 13:22:09 -0700877 /* Mark request */
878 JitEntryInfoUnion oldValue;
879 JitEntryInfoUnion newValue;
880 do {
881 oldValue = slot->u;
882 newValue = oldValue;
883 newValue.info.traceRequested = true;
884 } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
885 oldValue.infoWord, newValue.infoWord));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700886 }
887 }
888 switch (interpState->jitState) {
889 case kJitTSelectRequest:
890 interpState->jitState = kJitTSelect;
891 interpState->currTraceHead = interpState->pc;
892 interpState->currTraceRun = 0;
893 interpState->totalTraceLen = 0;
894 interpState->currRunHead = interpState->pc;
895 interpState->currRunLen = 0;
896 interpState->trace[0].frag.startOffset =
897 interpState->pc - interpState->method->insns;
898 interpState->trace[0].frag.numInsts = 0;
899 interpState->trace[0].frag.runEnd = false;
900 interpState->trace[0].frag.hint = kJitHintNone;
Ben Cheng79d173c2009-09-29 16:12:51 -0700901 interpState->lastPC = 0;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700902 break;
903 case kJitTSelect:
904 case kJitTSelectAbort:
905 res = true;
906 case kJitSingleStep:
907 case kJitSingleStepEnd:
908 case kJitOff:
909 case kJitNormal:
Jeff Hao97319a82009-08-12 16:57:15 -0700910#if defined(WITH_SELF_VERIFICATION)
911 case kJitSelfVerification:
912#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700913 break;
914 default:
Ben Cheng9c147b82009-10-07 16:41:46 -0700915 LOGE("Unexpected JIT state: %d", interpState->jitState);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700916 dvmAbort();
917 }
918 }
919 return res;
920}
921
Bill Buzbee27176222009-06-09 09:20:16 -0700922/*
923 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
924 * Stops all threads, and thus is a heavyweight operation.
925 */
926bool dvmJitResizeJitTable( unsigned int size )
927{
Bill Buzbee716f1202009-07-23 13:22:09 -0700928 JitEntry *pNewTable;
929 JitEntry *pOldTable;
Bill Buzbee27176222009-06-09 09:20:16 -0700930 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -0700931 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -0700932 unsigned int i;
933
Ben Cheng3f02aa42009-08-14 13:52:09 -0700934 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -0700935 assert(size && !(size & (size - 1))); /* Is power of 2? */
936
937 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
938
939 newMask = size - 1;
940
941 if (size <= gDvmJit.jitTableSize) {
942 return true;
943 }
944
Bill Buzbee716f1202009-07-23 13:22:09 -0700945 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -0700946 if (pNewTable == NULL) {
947 return true;
948 }
949 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700950 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -0700951 }
952
953 /* Stop all other interpreting/jit'ng threads */
954 dvmSuspendAllThreads(SUSPEND_FOR_JIT);
955
Bill Buzbee716f1202009-07-23 13:22:09 -0700956 pOldTable = gDvmJit.pJitEntryTable;
957 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -0700958
959 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -0700960 gDvmJit.pJitEntryTable = pNewTable;
961 gDvmJit.jitTableSize = size;
962 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -0700963 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -0700964 dvmUnlockMutex(&gDvmJit.tableLock);
965
Bill Buzbee716f1202009-07-23 13:22:09 -0700966 for (i=0; i < oldSize; i++) {
967 if (pOldTable[i].dPC) {
968 JitEntry *p;
969 u2 chain;
970 p = dvmJitLookupAndAdd(pOldTable[i].dPC);
971 p->dPC = pOldTable[i].dPC;
972 /*
973 * Compiler thread may have just updated the new entry's
974 * code address field, so don't blindly copy null.
975 */
976 if (pOldTable[i].codeAddress != NULL) {
977 p->codeAddress = pOldTable[i].codeAddress;
978 }
979 /* We need to preserve the new chain field, but copy the rest */
980 dvmLockMutex(&gDvmJit.tableLock);
981 chain = p->u.info.chain;
982 p->u = pOldTable[i].u;
983 p->u.info.chain = chain;
984 dvmUnlockMutex(&gDvmJit.tableLock);
985 }
986 }
987
988 free(pOldTable);
989
Bill Buzbee27176222009-06-09 09:20:16 -0700990 /* Restart the world */
991 dvmResumeAllThreads(SUSPEND_FOR_JIT);
992
993 return false;
994}
995
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700996/*
997 * Float/double conversion requires clamping to min and max of integer form. If
998 * target doesn't support this normally, use these.
999 */
1000s8 dvmJitd2l(double d)
1001{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001002 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1003 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001004 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001005 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001006 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001007 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001008 else if (d != d) // NaN case
1009 return 0;
1010 else
1011 return (s8)d;
1012}
1013
1014s8 dvmJitf2l(float f)
1015{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001016 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1017 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001018 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001019 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001020 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001021 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001022 else if (f != f) // NaN case
1023 return 0;
1024 else
1025 return (s8)f;
1026}
1027
Bill Buzbee27176222009-06-09 09:20:16 -07001028
Ben Chengba4fc8b2009-06-01 13:00:29 -07001029#endif /* WITH_JIT */