blob: e920cfc1b62b398e18b83588ecdfdfc5d887dcd1 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
Andy McFaddenc6b25c72010-06-22 11:01:20 -070026#include "libdex/OpCodeNames.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070027#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
Bill Buzbee6e963e12009-06-17 16:56:19 -070032#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
Ben Chengba4fc8b2009-06-01 13:00:29 -070034#include <errno.h>
35
Jeff Hao97319a82009-08-12 16:57:15 -070036#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41 if (self->shadowSpace == NULL)
42 return NULL;
43
44 self->shadowSpace->registerSpaceSize = REG_SPACE;
45 self->shadowSpace->registerSpace =
46 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48 return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54 free(self->shadowSpace->registerSpace);
55 free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
Bill Buzbee9a8c75a2009-11-08 14:31:20 -080063 InterpState* interpState, int targetTrace)
Jeff Hao97319a82009-08-12 16:57:15 -070064{
65 Thread *self = dvmThreadSelf();
66 ShadowSpace *shadowSpace = self->shadowSpace;
Ben Cheng11d8f142010-03-24 15:24:19 -070067 unsigned preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
68 unsigned postBytes = interpState->method->registersSize*4;
Jeff Hao97319a82009-08-12 16:57:15 -070069
70 //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
71 // self->threadId, (int)pc, (int)fp);
72
73 if (shadowSpace->selfVerificationState != kSVSIdle) {
74 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
75 self->threadId, shadowSpace->selfVerificationState);
76 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -070077 LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
Jeff Hao97319a82009-08-12 16:57:15 -070078 }
79 shadowSpace->selfVerificationState = kSVSStart;
80
Ben Chengd5adae12010-03-26 17:45:28 -070081 if (interpState->entryPoint == kInterpEntryResume) {
82 interpState->entryPoint = kInterpEntryInstr;
83#if 0
84 /* Tracking the success rate of resume after single-stepping */
85 if (interpState->jitResumeDPC == pc) {
86 LOGD("SV single step resumed at %p", pc);
87 }
88 else {
89 LOGD("real %p DPC %p NPC %p", pc, interpState->jitResumeDPC,
90 interpState->jitResumeNPC);
91 }
92#endif
93 }
94
Jeff Hao97319a82009-08-12 16:57:15 -070095 // Dynamically grow shadow register space if necessary
Ben Cheng11d8f142010-03-24 15:24:19 -070096 if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
Jeff Hao97319a82009-08-12 16:57:15 -070097 free(shadowSpace->registerSpace);
Ben Cheng11d8f142010-03-24 15:24:19 -070098 shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
Jeff Hao97319a82009-08-12 16:57:15 -070099 shadowSpace->registerSpace =
Ben Cheng11d8f142010-03-24 15:24:19 -0700100 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
Jeff Hao97319a82009-08-12 16:57:15 -0700101 }
102
103 // Remember original state
104 shadowSpace->startPC = pc;
105 shadowSpace->fp = fp;
Ben Chengccd6c012009-10-15 14:52:45 -0700106 shadowSpace->glue = interpState;
107 /*
108 * Store the original method here in case the trace ends with a
109 * return/invoke, the last method.
110 */
111 shadowSpace->method = interpState->method;
Jeff Hao97319a82009-08-12 16:57:15 -0700112 shadowSpace->shadowFP = shadowSpace->registerSpace +
113 shadowSpace->registerSpaceSize - postBytes/4;
114
115 // Create a copy of the InterpState
Ben Chengccd6c012009-10-15 14:52:45 -0700116 memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
Jeff Hao97319a82009-08-12 16:57:15 -0700117 shadowSpace->interpState.fp = shadowSpace->shadowFP;
118 shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
119
120 // Create a copy of the stack
121 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
122 preBytes+postBytes);
123
124 // Setup the shadowed heap space
125 shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
126
127 // Reset trace length
128 shadowSpace->traceLength = 0;
129
130 return shadowSpace;
131}
132
133/*
134 * Save ending PC, FP and compiled code exit point to shadow space.
135 * Return a pointer to the shadow space for JIT to restore state.
136 */
137void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
138 SelfVerificationState exitPoint)
139{
140 Thread *self = dvmThreadSelf();
141 ShadowSpace *shadowSpace = self->shadowSpace;
Ben Chengd5adae12010-03-26 17:45:28 -0700142 // Official InterpState structure
143 InterpState *realGlue = shadowSpace->glue;
Jeff Hao97319a82009-08-12 16:57:15 -0700144 shadowSpace->endPC = pc;
145 shadowSpace->endShadowFP = fp;
146
147 //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
148 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
149 // (int)pc);
150
151 if (shadowSpace->selfVerificationState != kSVSStart) {
152 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
153 self->threadId, shadowSpace->selfVerificationState);
154 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700155 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700156 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700157 LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
158 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700159 (int)shadowSpace->endShadowFP);
160 }
161
Ben Chengd5adae12010-03-26 17:45:28 -0700162 // Move the resume [ND]PC from the shadow space to the real space so that
163 // the debug interpreter can return to the translation
164 if (exitPoint == kSVSSingleStep) {
165 realGlue->jitResumeNPC = shadowSpace->interpState.jitResumeNPC;
166 realGlue->jitResumeDPC = shadowSpace->interpState.jitResumeDPC;
167 } else {
168 realGlue->jitResumeNPC = NULL;
169 realGlue->jitResumeDPC = NULL;
170 }
171
Jeff Hao97319a82009-08-12 16:57:15 -0700172 // Special case when punting after a single instruction
Ben Chengd5adae12010-03-26 17:45:28 -0700173 if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700174 shadowSpace->selfVerificationState = kSVSIdle;
175 } else {
176 shadowSpace->selfVerificationState = exitPoint;
177 }
178
179 return shadowSpace;
180}
181
182/* Print contents of virtual registers */
Ben Chengccd6c012009-10-15 14:52:45 -0700183static void selfVerificationPrintRegisters(int* addr, int* addrRef,
184 int numWords)
Jeff Hao97319a82009-08-12 16:57:15 -0700185{
186 int i;
187 for (i = 0; i < numWords; i++) {
Ben Chengccd6c012009-10-15 14:52:45 -0700188 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
Jeff Hao97319a82009-08-12 16:57:15 -0700189 }
190}
191
192/* Print values maintained in shadowSpace */
193static void selfVerificationDumpState(const u2* pc, Thread* self)
194{
195 ShadowSpace* shadowSpace = self->shadowSpace;
196 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
197 int frameBytes = (int) shadowSpace->registerSpace +
198 shadowSpace->registerSpaceSize*4 -
199 (int) shadowSpace->shadowFP;
200 int localRegs = 0;
201 int frameBytes2 = 0;
202 if (self->curFrame < shadowSpace->fp) {
203 localRegs = (stackSave->method->registersSize -
204 stackSave->method->insSize)*4;
205 frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
206 }
207 LOGD("********** SHADOW STATE DUMP **********");
Ben Chengccd6c012009-10-15 14:52:45 -0700208 LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
Jeff Hao97319a82009-08-12 16:57:15 -0700209 (int)(pc - stackSave->method->insns));
Ben Chengccd6c012009-10-15 14:52:45 -0700210 LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
211 LOGD("Method: %s", shadowSpace->method->name);
212 LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
Jeff Hao97319a82009-08-12 16:57:15 -0700213 (int)shadowSpace->endPC);
Ben Chengccd6c012009-10-15 14:52:45 -0700214 LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
Jeff Hao97319a82009-08-12 16:57:15 -0700215 (int)self->curFrame);
Ben Chengccd6c012009-10-15 14:52:45 -0700216 LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
Jeff Hao97319a82009-08-12 16:57:15 -0700217 (int)shadowSpace->endShadowFP);
Ben Chengccd6c012009-10-15 14:52:45 -0700218 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
Jeff Hao97319a82009-08-12 16:57:15 -0700219 localRegs, frameBytes2);
Ben Chengccd6c012009-10-15 14:52:45 -0700220 LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
Jeff Hao97319a82009-08-12 16:57:15 -0700221 shadowSpace->selfVerificationState);
222}
223
224/* Print decoded instructions in the current trace */
225static void selfVerificationDumpTrace(const u2* pc, Thread* self)
226{
227 ShadowSpace* shadowSpace = self->shadowSpace;
228 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700229 int i, addr, offset;
230 DecodedInstruction *decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700231
232 LOGD("********** SHADOW TRACE DUMP **********");
233 for (i = 0; i < shadowSpace->traceLength; i++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700234 addr = shadowSpace->trace[i].addr;
235 offset = (int)((u2*)addr - stackSave->method->insns);
236 decInsn = &(shadowSpace->trace[i].decInsn);
237 /* Not properly decoding instruction, some registers may be garbage */
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700238 LOGD("0x%x: (0x%04x) %s",
239 addr, offset, dexGetOpcodeName(decInsn->opCode));
Jeff Hao97319a82009-08-12 16:57:15 -0700240 }
241}
242
Ben Chengbcdc1de2009-08-21 16:18:46 -0700243/* Code is forced into this spin loop when a divergence is detected */
Ben Chengccd6c012009-10-15 14:52:45 -0700244static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
Ben Chengbcdc1de2009-08-21 16:18:46 -0700245{
Ben Chengccd6c012009-10-15 14:52:45 -0700246 const u2 *startPC = shadowSpace->startPC;
Ben Cheng88a0f972010-02-24 15:00:40 -0800247 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
Ben Chengccd6c012009-10-15 14:52:45 -0700248 if (desc) {
249 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
Ben Cheng1357e942010-02-10 17:21:39 -0800250 /*
251 * This function effectively terminates the VM right here, so not
252 * freeing the desc pointer when the enqueuing fails is acceptable.
253 */
Ben Chengccd6c012009-10-15 14:52:45 -0700254 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700255 gDvmJit.selfVerificationSpin = true;
256 while(gDvmJit.selfVerificationSpin) sleep(10);
257}
258
Jeff Hao97319a82009-08-12 16:57:15 -0700259/* Manage self verification while in the debug interpreter */
Ben Chengd5adae12010-03-26 17:45:28 -0700260static bool selfVerificationDebugInterp(const u2* pc, Thread* self,
261 InterpState *interpState)
Jeff Hao97319a82009-08-12 16:57:15 -0700262{
263 ShadowSpace *shadowSpace = self->shadowSpace;
Jeff Hao97319a82009-08-12 16:57:15 -0700264 SelfVerificationState state = shadowSpace->selfVerificationState;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700265
266 DecodedInstruction decInsn;
267 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
268
Jeff Hao97319a82009-08-12 16:57:15 -0700269 //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
270 // self->threadId, (int)pc, (int)shadowSpace->endPC, state,
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700271 // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opCode));
Jeff Hao97319a82009-08-12 16:57:15 -0700272
273 if (state == kSVSIdle || state == kSVSStart) {
274 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
275 self->threadId, state);
276 selfVerificationDumpState(pc, self);
277 selfVerificationDumpTrace(pc, self);
278 }
279
Ben Chengd5adae12010-03-26 17:45:28 -0700280 /*
281 * Skip endPC once when trace has a backward branch. If the SV state is
282 * single step, keep it that way.
283 */
Jeff Hao97319a82009-08-12 16:57:15 -0700284 if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
Ben Chengd5adae12010-03-26 17:45:28 -0700285 (state != kSVSBackwardBranch && state != kSVSSingleStep)) {
Jeff Hao97319a82009-08-12 16:57:15 -0700286 shadowSpace->selfVerificationState = kSVSDebugInterp;
287 }
288
289 /* Check that the current pc is the end of the trace */
Ben Chengd5adae12010-03-26 17:45:28 -0700290 if ((state == kSVSDebugInterp || state == kSVSSingleStep) &&
291 pc == shadowSpace->endPC) {
Jeff Hao97319a82009-08-12 16:57:15 -0700292
293 shadowSpace->selfVerificationState = kSVSIdle;
294
295 /* Check register space */
296 int frameBytes = (int) shadowSpace->registerSpace +
297 shadowSpace->registerSpaceSize*4 -
298 (int) shadowSpace->shadowFP;
299 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700300 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
Jeff Hao97319a82009-08-12 16:57:15 -0700301 selfVerificationDumpState(pc, self);
302 selfVerificationDumpTrace(pc, self);
303 LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
304 (int)shadowSpace->fp, frameBytes);
Ben Chengccd6c012009-10-15 14:52:45 -0700305 selfVerificationPrintRegisters((int*)shadowSpace->fp,
306 (int*)shadowSpace->shadowFP,
307 frameBytes/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700308 LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
309 (int)shadowSpace->shadowFP, frameBytes);
310 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700311 (int*)shadowSpace->fp,
312 frameBytes/4);
313 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700314 }
315 /* Check new frame if it exists (invokes only) */
316 if (self->curFrame < shadowSpace->fp) {
317 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
318 int localRegs = (stackSave->method->registersSize -
319 stackSave->method->insSize)*4;
320 int frameBytes2 = (int) shadowSpace->fp -
321 (int) self->curFrame - localRegs;
322 if (memcmp(((char*)self->curFrame)+localRegs,
323 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
Ben Chengccd6c012009-10-15 14:52:45 -0700324 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
Jeff Hao97319a82009-08-12 16:57:15 -0700325 self->threadId);
326 selfVerificationDumpState(pc, self);
327 selfVerificationDumpTrace(pc, self);
328 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
329 (int)self->curFrame, localRegs, frameBytes2);
330 selfVerificationPrintRegisters((int*)self->curFrame,
Ben Chengccd6c012009-10-15 14:52:45 -0700331 (int*)shadowSpace->endShadowFP,
332 (frameBytes2+localRegs)/4);
Jeff Hao97319a82009-08-12 16:57:15 -0700333 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
334 (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
335 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
Ben Chengccd6c012009-10-15 14:52:45 -0700336 (int*)self->curFrame,
337 (frameBytes2+localRegs)/4);
338 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700339 }
340 }
341
342 /* Check memory space */
Ben Chengbcdc1de2009-08-21 16:18:46 -0700343 bool memDiff = false;
Jeff Hao97319a82009-08-12 16:57:15 -0700344 ShadowHeap* heapSpacePtr;
345 for (heapSpacePtr = shadowSpace->heapSpace;
346 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
Ben Chengbcdc1de2009-08-21 16:18:46 -0700347 int memData = *((unsigned int*) heapSpacePtr->addr);
348 if (heapSpacePtr->data != memData) {
Ben Chengccd6c012009-10-15 14:52:45 -0700349 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
350 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
Ben Chengbcdc1de2009-08-21 16:18:46 -0700351 heapSpacePtr->addr, memData, heapSpacePtr->data);
Jeff Hao97319a82009-08-12 16:57:15 -0700352 selfVerificationDumpState(pc, self);
353 selfVerificationDumpTrace(pc, self);
Ben Chengbcdc1de2009-08-21 16:18:46 -0700354 memDiff = true;
Jeff Hao97319a82009-08-12 16:57:15 -0700355 }
356 }
Ben Chengccd6c012009-10-15 14:52:45 -0700357 if (memDiff) selfVerificationSpinLoop(shadowSpace);
Ben Chengd5adae12010-03-26 17:45:28 -0700358
359 /*
360 * Switch to JIT single step mode to stay in the debug interpreter for
361 * one more instruction
362 */
363 if (state == kSVSSingleStep) {
364 interpState->jitState = kJitSingleStepEnd;
365 }
Jeff Hao97319a82009-08-12 16:57:15 -0700366 return true;
367
368 /* If end not been reached, make sure max length not exceeded */
369 } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
370 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
Ben Chengccd6c012009-10-15 14:52:45 -0700371 LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
Jeff Hao97319a82009-08-12 16:57:15 -0700372 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
373 selfVerificationDumpState(pc, self);
374 selfVerificationDumpTrace(pc, self);
Ben Chengccd6c012009-10-15 14:52:45 -0700375 selfVerificationSpinLoop(shadowSpace);
Jeff Hao97319a82009-08-12 16:57:15 -0700376
377 return true;
378 }
Ben Chengbcdc1de2009-08-21 16:18:46 -0700379 /* Log the instruction address and decoded instruction for debug */
Jeff Hao97319a82009-08-12 16:57:15 -0700380 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
Ben Chengbcdc1de2009-08-21 16:18:46 -0700381 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
Jeff Hao97319a82009-08-12 16:57:15 -0700382 shadowSpace->traceLength++;
383
384 return false;
385}
386#endif
387
Ben Chengba4fc8b2009-06-01 13:00:29 -0700388/*
389 * If one of our fixed tables or the translation buffer fills up,
390 * call this routine to avoid wasting cycles on future translation requests.
391 */
392void dvmJitStopTranslationRequests()
393{
394 /*
395 * Note 1: This won't necessarily stop all translation requests, and
396 * operates on a delayed mechanism. Running threads look to the copy
397 * of this value in their private InterpState structures and won't see
398 * this change until it is refreshed (which happens on interpreter
399 * entry).
400 * Note 2: This is a one-shot memory leak on this table. Because this is a
401 * permanent off switch for Jit profiling, it is a one-time leak of 1K
402 * bytes, and no further attempt will be made to re-allocate it. Can't
403 * free it because some thread may be holding a reference.
404 */
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800405 gDvmJit.pProfTable = NULL;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700406}
407
Ben Cheng978738d2010-05-13 13:45:57 -0700408#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700409/* Convenience function to increment counter from assembly code */
Ben Cheng6c10a972009-10-29 14:39:18 -0700410void dvmBumpNoChain(int from)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700411{
Ben Cheng6c10a972009-10-29 14:39:18 -0700412 gDvmJit.noChainExit[from]++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700413}
414
415/* Convenience function to increment counter from assembly code */
416void dvmBumpNormal()
417{
Ben Cheng6c10a972009-10-29 14:39:18 -0700418 gDvmJit.normalExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700419}
420
421/* Convenience function to increment counter from assembly code */
422void dvmBumpPunt(int from)
423{
Ben Cheng6c10a972009-10-29 14:39:18 -0700424 gDvmJit.puntExit++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700425}
426#endif
427
428/* Dumps debugging & tuning stats to the log */
429void dvmJitStats()
430{
431 int i;
432 int hit;
433 int not_hit;
434 int chains;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800435 int stubs;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700436 if (gDvmJit.pJitEntryTable) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800437 for (i=0, stubs=chains=hit=not_hit=0;
Bill Buzbee27176222009-06-09 09:20:16 -0700438 i < (int) gDvmJit.jitTableSize;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700439 i++) {
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800440 if (gDvmJit.pJitEntryTable[i].dPC != 0) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700441 hit++;
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800442 if (gDvmJit.pJitEntryTable[i].codeAddress ==
Bill Buzbeebd047242010-05-13 13:02:53 -0700443 dvmCompilerGetInterpretTemplate())
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800444 stubs++;
445 } else
Ben Chengba4fc8b2009-06-01 13:00:29 -0700446 not_hit++;
Bill Buzbee716f1202009-07-23 13:22:09 -0700447 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700448 chains++;
449 }
Ben Cheng72621c92010-03-10 13:12:55 -0800450 LOGD("JIT: table size is %d, entries used is %d",
Ben Cheng86717f72010-03-05 15:27:21 -0800451 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed);
Ben Cheng72621c92010-03-10 13:12:55 -0800452 LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
453 hit, not_hit + hit, chains, gDvmJit.threshold,
454 gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
Ben Cheng86717f72010-03-05 15:27:21 -0800455
Ben Cheng978738d2010-05-13 13:45:57 -0700456#if defined(WITH_JIT_TUNING)
457 LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
458
Ben Cheng72621c92010-03-10 13:12:55 -0800459 LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
460 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
461 gDvmJit.normalExit, gDvmJit.puntExit);
Ben Cheng452efba2010-04-30 15:14:00 -0700462
Ben Cheng978738d2010-05-13 13:45:57 -0700463 LOGD("JIT: ICHits: %d", gDvmICHitCount);
464
Ben Cheng72621c92010-03-10 13:12:55 -0800465 LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
466 "%d switch overflow",
467 gDvmJit.noChainExit[kInlineCacheMiss],
468 gDvmJit.noChainExit[kCallsiteInterpreted],
469 gDvmJit.noChainExit[kSwitchOverflow]);
Ben Cheng86717f72010-03-05 15:27:21 -0800470
Ben Chengb88ec3c2010-05-17 12:50:33 -0700471 LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
472 "%d dropped",
473 gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
474 gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
Ben Cheng452efba2010-04-30 15:14:00 -0700475 gDvmJit.icPatchDropped);
476
Ben Cheng86717f72010-03-05 15:27:21 -0800477 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
478 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
479 gDvmJit.invokeNative, gDvmJit.returnOp);
480 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
481 LOGD("JIT: Avg unit compilation time: %llu us",
482 gDvmJit.jitTime / gDvmJit.numCompilations);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700483#endif
Ben Cheng86717f72010-03-05 15:27:21 -0800484
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800485 LOGD("JIT: %d Translation chains, %d interp stubs",
486 gDvmJit.translationChains, stubs);
Ben Chenge80cd942009-07-17 15:54:23 -0700487 if (gDvmJit.profile) {
Bill Buzbee716f1202009-07-23 13:22:09 -0700488 dvmCompilerSortAndPrintTraceProfiles();
Bill Buzbee6e963e12009-06-17 16:56:19 -0700489 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700490 }
491}
492
Bill Buzbee716f1202009-07-23 13:22:09 -0700493
Bill Buzbeed7269912009-11-10 14:31:32 -0800494void setTraceConstruction(JitEntry *slot, bool value)
495{
496
497 JitEntryInfoUnion oldValue;
498 JitEntryInfoUnion newValue;
499 do {
500 oldValue = slot->u;
501 newValue = oldValue;
502 newValue.info.traceConstruction = value;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700503 } while (android_atomic_release_cas(oldValue.infoWord, newValue.infoWord,
504 &slot->u.infoWord) != 0);
Bill Buzbeed7269912009-11-10 14:31:32 -0800505}
506
507void resetTracehead(InterpState* interpState, JitEntry *slot)
508{
Bill Buzbeebd047242010-05-13 13:02:53 -0700509 slot->codeAddress = dvmCompilerGetInterpretTemplate();
Bill Buzbeed7269912009-11-10 14:31:32 -0800510 setTraceConstruction(slot, false);
511}
512
513/* Clean up any pending trace builds */
514void dvmJitAbortTraceSelect(InterpState* interpState)
515{
516 if (interpState->jitState == kJitTSelect)
Ben Chenga4973592010-03-31 11:59:18 -0700517 interpState->jitState = kJitDone;
Bill Buzbeed7269912009-11-10 14:31:32 -0800518}
519
Ben Chengba4fc8b2009-06-01 13:00:29 -0700520/*
Bill Buzbee964a7b02010-01-28 12:54:19 -0800521 * Find an entry in the JitTable, creating if necessary.
522 * Returns null if table is full.
523 */
524static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked)
525{
526 u4 chainEndMarker = gDvmJit.jitTableSize;
527 u4 idx = dvmJitHash(dPC);
528
529 /* Walk the bucket chain to find an exact match for our PC */
530 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
531 (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
532 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
533 }
534
535 if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
536 /*
537 * No match. Aquire jitTableLock and find the last
538 * slot in the chain. Possibly continue the chain walk in case
539 * some other thread allocated the slot we were looking
540 * at previuosly (perhaps even the dPC we're trying to enter).
541 */
542 if (!callerLocked)
543 dvmLockMutex(&gDvmJit.tableLock);
544 /*
545 * At this point, if .dPC is NULL, then the slot we're
546 * looking at is the target slot from the primary hash
547 * (the simple, and common case). Otherwise we're going
548 * to have to find a free slot and chain it.
549 */
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700550 ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800551 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
552 u4 prev;
553 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
554 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
555 /* Another thread got there first for this dPC */
556 if (!callerLocked)
557 dvmUnlockMutex(&gDvmJit.tableLock);
558 return &gDvmJit.pJitEntryTable[idx];
559 }
560 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
561 }
562 /* Here, idx should be pointing to the last cell of an
563 * active chain whose last member contains a valid dPC */
564 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
565 /* Linear walk to find a free cell and add it to the end */
566 prev = idx;
567 while (true) {
568 idx++;
569 if (idx == chainEndMarker)
570 idx = 0; /* Wraparound */
571 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
572 (idx == prev))
573 break;
574 }
575 if (idx != prev) {
576 JitEntryInfoUnion oldValue;
577 JitEntryInfoUnion newValue;
578 /*
579 * Although we hold the lock so that noone else will
580 * be trying to update a chain field, the other fields
581 * packed into the word may be in use by other threads.
582 */
583 do {
584 oldValue = gDvmJit.pJitEntryTable[prev].u;
585 newValue = oldValue;
586 newValue.info.chain = idx;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700587 } while (android_atomic_release_cas(oldValue.infoWord,
588 newValue.infoWord,
589 &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800590 }
591 }
592 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
593 /*
594 * Initialize codeAddress and allocate the slot. Must
595 * happen in this order (since dPC is set, the entry is live.
596 */
597 gDvmJit.pJitEntryTable[idx].dPC = dPC;
598 gDvmJit.jitTableEntriesUsed++;
599 } else {
600 /* Table is full */
601 idx = chainEndMarker;
602 }
603 if (!callerLocked)
604 dvmUnlockMutex(&gDvmJit.tableLock);
605 }
606 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
607}
Ben Chenga4973592010-03-31 11:59:18 -0700608
Bill Buzbee964a7b02010-01-28 12:54:19 -0800609/*
Ben Chengd44faf52010-06-02 15:33:51 -0700610 * Check if the next instruction following the invoke is a move-result and if
611 * so add it to the trace.
612 *
613 * lastPC, len, offset are all from the preceding invoke instruction
614 */
615static void insertMoveResult(const u2 *lastPC, int len, int offset,
616 InterpState *interpState)
617{
618 DecodedInstruction nextDecInsn;
619 const u2 *moveResultPC = lastPC + len;
620
621 dexDecodeInstruction(gDvm.instrFormat, moveResultPC, &nextDecInsn);
622 if ((nextDecInsn.opCode != OP_MOVE_RESULT) &&
623 (nextDecInsn.opCode != OP_MOVE_RESULT_WIDE) &&
624 (nextDecInsn.opCode != OP_MOVE_RESULT_OBJECT))
625 return;
626
627 /* We need to start a new trace run */
628 int currTraceRun = ++interpState->currTraceRun;
629 interpState->currRunHead = moveResultPC;
630 interpState->trace[currTraceRun].frag.startOffset = offset + len;
631 interpState->trace[currTraceRun].frag.numInsts = 1;
632 interpState->trace[currTraceRun].frag.runEnd = false;
633 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
634 interpState->totalTraceLen++;
635
636 interpState->currRunLen = dexGetInstrOrTableWidthAbs(gDvm.instrWidth,
637 moveResultPC);
638}
639
640/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700641 * Adds to the current trace request one instruction at a time, just
642 * before that instruction is interpreted. This is the primary trace
643 * selection function. NOTE: return instruction are handled a little
644 * differently. In general, instructions are "proposed" to be added
645 * to the current trace prior to interpretation. If the interpreter
646 * then successfully completes the instruction, is will be considered
647 * part of the request. This allows us to examine machine state prior
648 * to interpretation, and also abort the trace request if the instruction
649 * throws or does something unexpected. However, return instructions
650 * will cause an immediate end to the translation request - which will
651 * be passed to the compiler before the return completes. This is done
652 * in response to special handling of returns by the interpreter (and
653 * because returns cannot throw in a way that causes problems for the
654 * translated code.
655 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700656int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
657{
Carl Shapiroe3c01da2010-05-20 22:54:18 -0700658 int flags, len;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700659 int switchInterp = false;
Ben Chenga4973592010-03-31 11:59:18 -0700660 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Bill Buzbeed7269912009-11-10 14:31:32 -0800661
Ben Cheng79d173c2009-09-29 16:12:51 -0700662 /* Prepare to handle last PC and stage the current PC */
663 const u2 *lastPC = interpState->lastPC;
664 interpState->lastPC = pc;
665
Ben Chengba4fc8b2009-06-01 13:00:29 -0700666 switch (interpState->jitState) {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700667 int offset;
668 DecodedInstruction decInsn;
669 case kJitTSelect:
Ben Chengdc84bb22009-10-02 12:58:52 -0700670 /* First instruction - just remember the PC and exit */
671 if (lastPC == NULL) break;
Ben Cheng79d173c2009-09-29 16:12:51 -0700672 /* Grow the trace around the last PC if jitState is kJitTSelect */
673 dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
Ben Cheng6c10a972009-10-29 14:39:18 -0700674
675 /*
676 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
677 * to the amount of space it takes to generate the chaining
678 * cells.
679 */
680 if (interpState->totalTraceLen != 0 &&
681 (decInsn.opCode == OP_PACKED_SWITCH ||
682 decInsn.opCode == OP_SPARSE_SWITCH)) {
683 interpState->jitState = kJitTSelectEnd;
684 break;
685 }
686
Bill Buzbeef9f33282009-11-22 12:45:30 -0800687
Ben Chengba4fc8b2009-06-01 13:00:29 -0700688#if defined(SHOW_TRACE)
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700689 LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700690#endif
691 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
Ben Cheng79d173c2009-09-29 16:12:51 -0700692 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, lastPC);
693 offset = lastPC - interpState->method->insns;
694 assert((unsigned) offset <
695 dvmGetMethodInsnsSize(interpState->method));
696 if (lastPC != interpState->currRunHead + interpState->currRunLen) {
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700697 int currTraceRun;
698 /* We need to start a new trace run */
699 currTraceRun = ++interpState->currTraceRun;
700 interpState->currRunLen = 0;
Ben Cheng79d173c2009-09-29 16:12:51 -0700701 interpState->currRunHead = (u2*)lastPC;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700702 interpState->trace[currTraceRun].frag.startOffset = offset;
703 interpState->trace[currTraceRun].frag.numInsts = 0;
704 interpState->trace[currTraceRun].frag.runEnd = false;
705 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
706 }
707 interpState->trace[interpState->currTraceRun].frag.numInsts++;
708 interpState->totalTraceLen++;
709 interpState->currRunLen += len;
Ben Cheng79d173c2009-09-29 16:12:51 -0700710
Ben Chengd44faf52010-06-02 15:33:51 -0700711 /*
712 * If the last instruction is an invoke, we will try to sneak in
713 * the move-result* (if existent) into a separate trace run.
714 */
715 int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
716
Ben Cheng79d173c2009-09-29 16:12:51 -0700717 /* Will probably never hit this with the current trace buildier */
Ben Chengd44faf52010-06-02 15:33:51 -0700718 if (interpState->currTraceRun ==
719 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
Ben Cheng79d173c2009-09-29 16:12:51 -0700720 interpState->jitState = kJitTSelectEnd;
721 }
722
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700723 if ( ((flags & kInstrUnconditional) == 0) &&
Bill Buzbeef4ce16f2009-07-28 13:28:25 -0700724 /* don't end trace on INVOKE_DIRECT_EMPTY */
725 (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700726 ((flags & (kInstrCanBranch |
727 kInstrCanSwitch |
728 kInstrCanReturn |
729 kInstrInvoke)) != 0)) {
730 interpState->jitState = kJitTSelectEnd;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700731#if defined(SHOW_TRACE)
Ben Chengd44faf52010-06-02 15:33:51 -0700732 LOGD("TraceGen: ending on %s, basic block end",
Andy McFaddenc6b25c72010-06-22 11:01:20 -0700733 dexGetOpcodeName(decInsn.opCode));
Ben Chengba4fc8b2009-06-01 13:00:29 -0700734#endif
Ben Chengd44faf52010-06-02 15:33:51 -0700735
736 /*
737 * If the next instruction is a variant of move-result, insert
738 * it to the trace as well.
739 */
740 if (flags & kInstrInvoke) {
741 insertMoveResult(lastPC, len, offset, interpState);
742 }
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700743 }
Bill Buzbee2ce8a6c2009-12-03 15:09:32 -0800744 /* Break on throw or self-loop */
Bill Buzbee324b3ac2009-12-04 13:17:36 -0800745 if ((decInsn.opCode == OP_THROW) || (lastPC == pc)){
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700746 interpState->jitState = kJitTSelectEnd;
747 }
748 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
749 interpState->jitState = kJitTSelectEnd;
750 }
Ben Chenga4973592010-03-31 11:59:18 -0700751 /* Abandon the trace request if debugger/profiler is attached */
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700752 if (debugOrProfile) {
Ben Chenga4973592010-03-31 11:59:18 -0700753 interpState->jitState = kJitDone;
Bill Buzbee50a6bf22009-07-08 13:08:04 -0700754 break;
755 }
756 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
757 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700758 }
759 /* NOTE: intentional fallthrough for returns */
760 case kJitTSelectEnd:
761 {
Ben Chenga4973592010-03-31 11:59:18 -0700762 /* Bad trace */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700763 if (interpState->totalTraceLen == 0) {
Bill Buzbeed7269912009-11-10 14:31:32 -0800764 /* Bad trace - mark as untranslatable */
Ben Chenga4973592010-03-31 11:59:18 -0700765 interpState->jitState = kJitDone;
766 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700767 break;
768 }
769 JitTraceDescription* desc =
770 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
771 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
772 if (desc == NULL) {
773 LOGE("Out of memory in trace selection");
774 dvmJitStopTranslationRequests();
Ben Chenga4973592010-03-31 11:59:18 -0700775 interpState->jitState = kJitDone;
776 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700777 break;
778 }
779 interpState->trace[interpState->currTraceRun].frag.runEnd =
780 true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700781 desc->method = interpState->method;
782 memcpy((char*)&(desc->trace[0]),
783 (char*)&(interpState->trace[0]),
784 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
785#if defined(SHOW_TRACE)
786 LOGD("TraceGen: trace done, adding to queue");
787#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800788 if (dvmCompilerWorkEnqueue(
789 interpState->currTraceHead,kWorkOrderTrace,desc)) {
790 /* Work order successfully enqueued */
791 if (gDvmJit.blockingMode) {
792 dvmCompilerDrainQueue();
793 }
Ben Cheng1357e942010-02-10 17:21:39 -0800794 } else {
795 /*
796 * Make sure the descriptor for the abandoned work order is
797 * freed.
798 */
799 free(desc);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700800 }
Bill Buzbee964a7b02010-01-28 12:54:19 -0800801 /*
802 * Reset "trace in progress" flag whether or not we
803 * successfully entered a work order.
804 */
Ben Cheng6999d842010-01-26 16:46:15 -0800805 JitEntry *jitEntry =
806 lookupAndAdd(interpState->currTraceHead, false);
807 if (jitEntry) {
808 setTraceConstruction(jitEntry, false);
809 }
Ben Chenga4973592010-03-31 11:59:18 -0700810 interpState->jitState = kJitDone;
811 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700812 }
813 break;
814 case kJitSingleStep:
815 interpState->jitState = kJitSingleStepEnd;
816 break;
817 case kJitSingleStepEnd:
818 interpState->entryPoint = kInterpEntryResume;
Ben Chenga4973592010-03-31 11:59:18 -0700819 interpState->jitState = kJitDone;
820 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700821 break;
Ben Chenga4973592010-03-31 11:59:18 -0700822 case kJitDone:
823 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700824 break;
Jeff Hao97319a82009-08-12 16:57:15 -0700825#if defined(WITH_SELF_VERIFICATION)
826 case kJitSelfVerification:
Ben Chengd5adae12010-03-26 17:45:28 -0700827 if (selfVerificationDebugInterp(pc, self, interpState)) {
828 /*
829 * If the next state is not single-step end, we can switch
830 * interpreter now.
831 */
832 if (interpState->jitState != kJitSingleStepEnd) {
Ben Chenga4973592010-03-31 11:59:18 -0700833 interpState->jitState = kJitDone;
834 switchInterp = true;
Ben Chengd5adae12010-03-26 17:45:28 -0700835 }
Jeff Hao97319a82009-08-12 16:57:15 -0700836 }
837 break;
838#endif
Ben Chenga4973592010-03-31 11:59:18 -0700839 /*
840 * If the debug interpreter was entered for non-JIT reasons, check if
841 * the original reason still holds. If not, we have to force the
842 * interpreter switch here and use dvmDebuggerOrProfilerActive instead
843 * of dvmJitDebuggerOrProfilerActive since the latter will alwasy
844 * return true when the debugger/profiler is already detached and the
845 * JIT profiling table is restored.
846 */
847 case kJitNot:
848 switchInterp = !dvmDebuggerOrProfilerActive();
Ben Chenged79ff02009-10-13 13:26:40 -0700849 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700850 default:
Ben Chenga4973592010-03-31 11:59:18 -0700851 LOGE("Unexpected JIT state: %d entry point: %d",
852 interpState->jitState, interpState->entryPoint);
853 dvmAbort();
Ben Cheng9c147b82009-10-07 16:41:46 -0700854 break;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700855 }
Ben Chenga4973592010-03-31 11:59:18 -0700856 /*
857 * Final check to see if we can really switch the interpreter. Make sure
858 * the jitState is kJitDone or kJitNot when switchInterp is set to true.
859 */
860 assert(switchInterp == false || interpState->jitState == kJitDone ||
861 interpState->jitState == kJitNot);
862 return switchInterp && !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700863}
864
Ben Chengccd6c012009-10-15 14:52:45 -0700865JitEntry *dvmFindJitEntry(const u2* pc)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700866{
867 int idx = dvmJitHash(pc);
868
869 /* Expect a high hit rate on 1st shot */
870 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
871 return &gDvmJit.pJitEntryTable[idx];
872 else {
Bill Buzbee27176222009-06-09 09:20:16 -0700873 int chainEndMarker = gDvmJit.jitTableSize;
Bill Buzbee716f1202009-07-23 13:22:09 -0700874 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
875 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700876 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
877 return &gDvmJit.pJitEntryTable[idx];
878 }
879 }
880 return NULL;
881}
882
Bill Buzbee27176222009-06-09 09:20:16 -0700883/*
Ben Chengba4fc8b2009-06-01 13:00:29 -0700884 * If a translated code address exists for the davik byte code
885 * pointer return it. This routine needs to be fast.
886 */
887void* dvmJitGetCodeAddr(const u2* dPC)
888{
889 int idx = dvmJitHash(dPC);
Ben Cheng6999d842010-01-26 16:46:15 -0800890 const u2* npc = gDvmJit.pJitEntryTable[idx].dPC;
Bill Buzbee9797a232010-01-12 12:20:13 -0800891 if (npc != NULL) {
Ben Cheng6999d842010-01-26 16:46:15 -0800892 bool hideTranslation = (gDvm.sumThreadSuspendCount != 0) ||
893 (gDvmJit.codeCacheFull == true) ||
894 (gDvmJit.pProfTable == NULL);
895
Bill Buzbee9797a232010-01-12 12:20:13 -0800896 if (npc == dPC) {
Ben Cheng978738d2010-05-13 13:45:57 -0700897#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -0800898 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700899#endif
Ben Cheng6999d842010-01-26 16:46:15 -0800900 return hideTranslation ?
901 NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
Bill Buzbee9797a232010-01-12 12:20:13 -0800902 } else {
903 int chainEndMarker = gDvmJit.jitTableSize;
904 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
905 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
906 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
Ben Cheng978738d2010-05-13 13:45:57 -0700907#if defined(WITH_JIT_TUNING)
Bill Buzbee9797a232010-01-12 12:20:13 -0800908 gDvmJit.addrLookupsFound++;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700909#endif
Ben Cheng6999d842010-01-26 16:46:15 -0800910 return hideTranslation ?
911 NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
Bill Buzbee9797a232010-01-12 12:20:13 -0800912 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700913 }
914 }
915 }
Ben Cheng978738d2010-05-13 13:45:57 -0700916#if defined(WITH_JIT_TUNING)
Ben Chengba4fc8b2009-06-01 13:00:29 -0700917 gDvmJit.addrLookupsNotFound++;
918#endif
919 return NULL;
920}
921
922/*
923 * Register the translated code pointer into the JitTable.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -0800924 * NOTE: Once a codeAddress field transitions from initial state to
Ben Chengba4fc8b2009-06-01 13:00:29 -0700925 * JIT'd code, it must not be altered without first halting all
Bill Buzbee716f1202009-07-23 13:22:09 -0700926 * threads. This routine should only be called by the compiler
927 * thread.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700928 */
Bill Buzbee716f1202009-07-23 13:22:09 -0700929void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
930 JitEntryInfoUnion oldValue;
931 JitEntryInfoUnion newValue;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800932 JitEntry *jitEntry = lookupAndAdd(dPC, false);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700933 assert(jitEntry);
Bill Buzbee716f1202009-07-23 13:22:09 -0700934 /* Note: order of update is important */
935 do {
936 oldValue = jitEntry->u;
937 newValue = oldValue;
938 newValue.info.instructionSet = set;
Andy McFadden6e10b9a2010-06-14 15:24:39 -0700939 } while (android_atomic_release_cas(
940 oldValue.infoWord, newValue.infoWord,
941 &jitEntry->u.infoWord) != 0);
Bill Buzbee716f1202009-07-23 13:22:09 -0700942 jitEntry->codeAddress = nPC;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700943}
944
945/*
946 * Determine if valid trace-bulding request is active. Return true
947 * if we need to abort and switch back to the fast interpreter, false
Ben Chenga4973592010-03-31 11:59:18 -0700948 * otherwise.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700949 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700950bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
951{
Ben Chenga4973592010-03-31 11:59:18 -0700952 bool switchInterp = false; /* Assume success */
Bill Buzbee48f18242009-06-19 16:02:27 -0700953 int i;
buzbee852aacd2010-06-08 16:24:46 -0700954 /*
955 * A note on trace "hotness" filtering:
956 *
957 * Our first level trigger is intentionally loose - we need it to
958 * fire easily not just to identify potential traces to compile, but
959 * also to allow re-entry into the code cache.
960 *
961 * The 2nd level filter (done here) exists to be selective about
962 * what we actually compile. It works by requiring the same
963 * trace head "key" (defined as filterKey below) to appear twice in
964 * a relatively short period of time. The difficulty is defining the
965 * shape of the filterKey. Unfortunately, there is no "one size fits
966 * all" approach.
967 *
968 * For spiky execution profiles dominated by a smallish
969 * number of very hot loops, we would want the second-level filter
970 * to be very selective. A good selective filter is requiring an
971 * exact match of the Dalvik PC. In other words, defining filterKey as:
972 * intptr_t filterKey = (intptr_t)interpState->pc
973 *
974 * However, for flat execution profiles we do best when aggressively
975 * translating. A heuristically decent proxy for this is to use
976 * the value of the method pointer containing the trace as the filterKey.
977 * Intuitively, this is saying that once any trace in a method appears hot,
978 * immediately translate any other trace from that same method that
979 * survives the first-level filter. Here, filterKey would be defined as:
980 * intptr_t filterKey = (intptr_t)interpState->method
981 *
982 * The problem is that we can't easily detect whether we're dealing
983 * with a spiky or flat profile. If we go with the "pc" match approach,
984 * flat profiles perform poorly. If we go with the loose "method" match,
985 * we end up generating a lot of useless translations. Probably the
986 * best approach in the future will be to retain profile information
987 * across runs of each application in order to determine it's profile,
988 * and then choose once we have enough history.
989 *
990 * However, for now we've decided to chose a compromise filter scheme that
991 * includes elements of both. The high order bits of the filter key
992 * are drawn from the enclosing method, and are combined with a slice
993 * of the low-order bits of the Dalvik pc of the trace head. The
994 * looseness of the filter can be adjusted by changing with width of
995 * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider
996 * the slice, the tighter the filter.
997 *
998 * Note: the fixed shifts in the function below reflect assumed word
999 * alignment for method pointers, and half-word alignment of the Dalvik pc.
1000 * for method pointers and half-word alignment for dalvik pc.
1001 */
buzbeec35294d2010-06-09 14:22:50 -07001002 u4 methodKey = (u4)interpState->method <<
1003 (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
1004 u4 pcKey = ((u4)interpState->pc >> 1) &
1005 ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1006 intptr_t filterKey = (intptr_t)(methodKey | pcKey);
Ben Chenga4973592010-03-31 11:59:18 -07001007 bool debugOrProfile = dvmDebuggerOrProfilerActive();
Ben Cheng40094c12010-02-24 20:58:44 -08001008
Ben Chenga4973592010-03-31 11:59:18 -07001009 /* Check if the JIT request can be handled now */
1010 if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
1011 /* Bypass the filter for hot trace requests or during stress mode */
1012 if (interpState->jitState == kJitTSelectRequest &&
1013 gDvmJit.threshold > 6) {
Ben Cheng40094c12010-02-24 20:58:44 -08001014 /* Two-level filtering scheme */
1015 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
1016 if (filterKey == interpState->threshFilter[i]) {
buzbee852aacd2010-06-08 16:24:46 -07001017 interpState->threshFilter[i] = 0; // Reset filter entry
Ben Cheng40094c12010-02-24 20:58:44 -08001018 break;
1019 }
Bill Buzbee48f18242009-06-19 16:02:27 -07001020 }
Ben Cheng40094c12010-02-24 20:58:44 -08001021 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1022 /*
1023 * Use random replacement policy - otherwise we could miss a
1024 * large loop that contains more traces than the size of our
1025 * filter array.
1026 */
1027 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
1028 interpState->threshFilter[i] = filterKey;
Ben Chenga4973592010-03-31 11:59:18 -07001029 interpState->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001030 }
Ben Chenga4973592010-03-31 11:59:18 -07001031 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001032
Ben Chenga4973592010-03-31 11:59:18 -07001033 /* If the compiler is backlogged, cancel any JIT actions */
1034 if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
1035 interpState->jitState = kJitDone;
Ben Cheng40094c12010-02-24 20:58:44 -08001036 }
Bill Buzbeed7269912009-11-10 14:31:32 -08001037
Ben Chengba4fc8b2009-06-01 13:00:29 -07001038 /*
Ben Chenga4973592010-03-31 11:59:18 -07001039 * Check for additional reasons that might force the trace select
1040 * request to be dropped
Ben Chengba4fc8b2009-06-01 13:00:29 -07001041 */
Ben Chenga4973592010-03-31 11:59:18 -07001042 if (interpState->jitState == kJitTSelectRequest ||
1043 interpState->jitState == kJitTSelectRequestHot) {
Bill Buzbee964a7b02010-01-28 12:54:19 -08001044 JitEntry *slot = lookupAndAdd(interpState->pc, false);
Bill Buzbee716f1202009-07-23 13:22:09 -07001045 if (slot == NULL) {
Ben Chengba4fc8b2009-06-01 13:00:29 -07001046 /*
Bill Buzbee716f1202009-07-23 13:22:09 -07001047 * Table is full. This should have been
1048 * detected by the compiler thread and the table
1049 * resized before we run into it here. Assume bad things
1050 * are afoot and disable profiling.
Ben Chengba4fc8b2009-06-01 13:00:29 -07001051 */
Ben Chenga4973592010-03-31 11:59:18 -07001052 interpState->jitState = kJitDone;
Bill Buzbee716f1202009-07-23 13:22:09 -07001053 LOGD("JIT: JitTable full, disabling profiling");
1054 dvmJitStopTranslationRequests();
Bill Buzbeed7269912009-11-10 14:31:32 -08001055 } else if (slot->u.info.traceConstruction) {
1056 /*
Ben Cheng60c24f42010-01-04 12:29:56 -08001057 * Trace request already in progress, but most likely it
Bill Buzbeed7269912009-11-10 14:31:32 -08001058 * aborted without cleaning up. Assume the worst and
1059 * mark trace head as untranslatable. If we're wrong,
1060 * the compiler thread will correct the entry when the
1061 * translation is completed. The downside here is that
1062 * some existing translation may chain to the interpret-only
1063 * template instead of the real translation during this
1064 * window. Performance, but not correctness, issue.
1065 */
Ben Chenga4973592010-03-31 11:59:18 -07001066 interpState->jitState = kJitDone;
Bill Buzbeed7269912009-11-10 14:31:32 -08001067 resetTracehead(interpState, slot);
1068 } else if (slot->codeAddress) {
1069 /* Nothing to do here - just return */
Ben Chenga4973592010-03-31 11:59:18 -07001070 interpState->jitState = kJitDone;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001071 } else {
Bill Buzbeed7269912009-11-10 14:31:32 -08001072 /*
1073 * Mark request. Note, we are not guaranteed exclusivity
1074 * here. A window exists for another thread to be
1075 * attempting to build this same trace. Rather than
1076 * bear the cost of locking, we'll just allow that to
1077 * happen. The compiler thread, if it chooses, can
1078 * discard redundant requests.
1079 */
1080 setTraceConstruction(slot, true);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001081 }
1082 }
Ben Chenga4973592010-03-31 11:59:18 -07001083
Ben Chengba4fc8b2009-06-01 13:00:29 -07001084 switch (interpState->jitState) {
1085 case kJitTSelectRequest:
Ben Cheng40094c12010-02-24 20:58:44 -08001086 case kJitTSelectRequestHot:
Ben Chenga4973592010-03-31 11:59:18 -07001087 interpState->jitState = kJitTSelect;
1088 interpState->currTraceHead = interpState->pc;
1089 interpState->currTraceRun = 0;
1090 interpState->totalTraceLen = 0;
1091 interpState->currRunHead = interpState->pc;
1092 interpState->currRunLen = 0;
1093 interpState->trace[0].frag.startOffset =
1094 interpState->pc - interpState->method->insns;
1095 interpState->trace[0].frag.numInsts = 0;
1096 interpState->trace[0].frag.runEnd = false;
1097 interpState->trace[0].frag.hint = kJitHintNone;
1098 interpState->lastPC = 0;
1099 break;
1100 /*
1101 * For JIT's perspective there is no need to stay in the debug
1102 * interpreter unless debugger/profiler is attached.
1103 */
1104 case kJitDone:
1105 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001106 break;
1107 default:
Ben Chenga4973592010-03-31 11:59:18 -07001108 LOGE("Unexpected JIT state: %d entry point: %d",
1109 interpState->jitState, interpState->entryPoint);
Ben Chengba4fc8b2009-06-01 13:00:29 -07001110 dvmAbort();
1111 }
Ben Chenga4973592010-03-31 11:59:18 -07001112 } else {
1113 /*
1114 * Cannot build trace this time - ready to leave the dbg interpreter
1115 */
1116 interpState->jitState = kJitDone;
1117 switchInterp = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001118 }
Ben Chenga4973592010-03-31 11:59:18 -07001119
1120 /*
1121 * Final check to see if we can really switch the interpreter. Make sure
1122 * the jitState is kJitDone when switchInterp is set to true.
1123 */
1124 assert(switchInterp == false || interpState->jitState == kJitDone);
1125 return switchInterp && !debugOrProfile;
Ben Chengba4fc8b2009-06-01 13:00:29 -07001126}
1127
Bill Buzbee27176222009-06-09 09:20:16 -07001128/*
1129 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
Bill Buzbee964a7b02010-01-28 12:54:19 -08001130 * Stops all threads, and thus is a heavyweight operation. May only be called
1131 * by the compiler thread.
Bill Buzbee27176222009-06-09 09:20:16 -07001132 */
1133bool dvmJitResizeJitTable( unsigned int size )
1134{
Bill Buzbee716f1202009-07-23 13:22:09 -07001135 JitEntry *pNewTable;
1136 JitEntry *pOldTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001137 JitEntry tempEntry;
Bill Buzbee27176222009-06-09 09:20:16 -07001138 u4 newMask;
Bill Buzbee716f1202009-07-23 13:22:09 -07001139 unsigned int oldSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001140 unsigned int i;
1141
Ben Cheng3f02aa42009-08-14 13:52:09 -07001142 assert(gDvmJit.pJitEntryTable != NULL);
Bill Buzbee27176222009-06-09 09:20:16 -07001143 assert(size && !(size & (size - 1))); /* Is power of 2? */
1144
Ben Chenga4973592010-03-31 11:59:18 -07001145 LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
Bill Buzbee27176222009-06-09 09:20:16 -07001146
1147 newMask = size - 1;
1148
1149 if (size <= gDvmJit.jitTableSize) {
1150 return true;
1151 }
1152
Bill Buzbee964a7b02010-01-28 12:54:19 -08001153 /* Make sure requested size is compatible with chain field width */
1154 tempEntry.u.info.chain = size;
1155 if (tempEntry.u.info.chain != size) {
1156 LOGD("Jit: JitTable request of %d too big", size);
1157 return true;
1158 }
1159
Bill Buzbee716f1202009-07-23 13:22:09 -07001160 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
Bill Buzbee27176222009-06-09 09:20:16 -07001161 if (pNewTable == NULL) {
1162 return true;
1163 }
1164 for (i=0; i< size; i++) {
Bill Buzbee716f1202009-07-23 13:22:09 -07001165 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
Bill Buzbee27176222009-06-09 09:20:16 -07001166 }
1167
1168 /* Stop all other interpreting/jit'ng threads */
Ben Chenga8e64a72009-10-20 13:01:36 -07001169 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001170
Bill Buzbee716f1202009-07-23 13:22:09 -07001171 pOldTable = gDvmJit.pJitEntryTable;
1172 oldSize = gDvmJit.jitTableSize;
Bill Buzbee27176222009-06-09 09:20:16 -07001173
1174 dvmLockMutex(&gDvmJit.tableLock);
Bill Buzbee27176222009-06-09 09:20:16 -07001175 gDvmJit.pJitEntryTable = pNewTable;
1176 gDvmJit.jitTableSize = size;
1177 gDvmJit.jitTableMask = size - 1;
Bill Buzbee716f1202009-07-23 13:22:09 -07001178 gDvmJit.jitTableEntriesUsed = 0;
Bill Buzbee27176222009-06-09 09:20:16 -07001179
Bill Buzbee716f1202009-07-23 13:22:09 -07001180 for (i=0; i < oldSize; i++) {
1181 if (pOldTable[i].dPC) {
1182 JitEntry *p;
1183 u2 chain;
Bill Buzbee964a7b02010-01-28 12:54:19 -08001184 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ );
1185 p->codeAddress = pOldTable[i].codeAddress;
Bill Buzbee716f1202009-07-23 13:22:09 -07001186 /* We need to preserve the new chain field, but copy the rest */
Bill Buzbee716f1202009-07-23 13:22:09 -07001187 chain = p->u.info.chain;
1188 p->u = pOldTable[i].u;
1189 p->u.info.chain = chain;
Bill Buzbee716f1202009-07-23 13:22:09 -07001190 }
1191 }
Bill Buzbee964a7b02010-01-28 12:54:19 -08001192 dvmUnlockMutex(&gDvmJit.tableLock);
Bill Buzbee716f1202009-07-23 13:22:09 -07001193
1194 free(pOldTable);
1195
Bill Buzbee27176222009-06-09 09:20:16 -07001196 /* Restart the world */
Ben Chenga8e64a72009-10-20 13:01:36 -07001197 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
Bill Buzbee27176222009-06-09 09:20:16 -07001198
1199 return false;
1200}
1201
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001202/*
Ben Cheng60c24f42010-01-04 12:29:56 -08001203 * Reset the JitTable to the initial clean state.
1204 */
1205void dvmJitResetTable(void)
1206{
1207 JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1208 unsigned int size = gDvmJit.jitTableSize;
1209 unsigned int i;
1210
1211 dvmLockMutex(&gDvmJit.tableLock);
1212 memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1213 for (i=0; i< size; i++) {
1214 jitEntry[i].u.info.chain = size; /* Initialize chain termination */
1215 }
1216 gDvmJit.jitTableEntriesUsed = 0;
1217 dvmUnlockMutex(&gDvmJit.tableLock);
1218}
1219
1220/*
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001221 * Float/double conversion requires clamping to min and max of integer form. If
1222 * target doesn't support this normally, use these.
1223 */
1224s8 dvmJitd2l(double d)
1225{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001226 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1227 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001228 if (d >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001229 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001230 else if (d <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001231 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001232 else if (d != d) // NaN case
1233 return 0;
1234 else
1235 return (s8)d;
1236}
1237
1238s8 dvmJitf2l(float f)
1239{
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001240 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1241 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001242 if (f >= kMaxLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001243 return (s8)0x7fffffffffffffffULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001244 else if (f <= kMinLong)
Bill Buzbee9727c3d2009-08-01 11:32:36 -07001245 return (s8)0x8000000000000000ULL;
Bill Buzbee50a6bf22009-07-08 13:08:04 -07001246 else if (f != f) // NaN case
1247 return 0;
1248 else
1249 return (s8)f;
1250}
1251
Ben Chengba4fc8b2009-06-01 13:00:29 -07001252#endif /* WITH_JIT */