blob: 4f2261b38de25d81bafde0ed9f8d8281cceb72f7 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
Ben Cheng7c4afdb2010-02-11 15:03:00 -080019#include <cutils/ashmem.h>
Ben Chengba4fc8b2009-06-01 13:00:29 -070020
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24
Ben Chengba4fc8b2009-06-01 13:00:29 -070025static inline bool workQueueLength(void)
26{
27 return gDvmJit.compilerQueueLength;
28}
29
30static CompilerWorkOrder workDequeue(void)
31{
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
Bill Buzbeef9f33282009-11-22 12:45:30 -080042 if (gDvmJit.compilerQueueLength == 0) {
Carl Shapirob31b3012010-05-25 18:35:37 -070043 dvmSignalCond(&gDvmJit.compilerQueueEmpty);
Bill Buzbeef9f33282009-11-22 12:45:30 -080044 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070045
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51}
52
Bill Buzbee1b3da592011-02-03 07:38:22 -080053/*
54 * Enqueue a work order - retrying until successful. If attempt to enqueue
55 * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
56 * code cache reset.
57 */
58#define ENQUEUE_MAX_RETRIES 20
59void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
60{
61 bool success;
62 int retries = 0;
63 do {
64 success = dvmCompilerWorkEnqueue(pc, kind, info);
65 if (!success) {
66 retries++;
67 if (retries > ENQUEUE_MAX_RETRIES) {
68 LOGE("JIT: compiler queue wedged - forcing reset");
69 gDvmJit.codeCacheFull = true; // Force reset
70 success = true; // Because we'll drop the order now anyway
71 } else {
72 dvmLockMutex(&gDvmJit.compilerLock);
73 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
74 &gDvmJit.compilerLock);
75 dvmUnlockMutex(&gDvmJit.compilerLock);
76
77 }
78 }
79 } while (!success);
80}
81
Bill Buzbee964a7b02010-01-28 12:54:19 -080082/*
83 * Attempt to enqueue a work order, returning true if successful.
Ben Cheng1357e942010-02-10 17:21:39 -080084 *
85 * NOTE: Make sure that the caller frees the info pointer if the return value
86 * is false.
Bill Buzbee964a7b02010-01-28 12:54:19 -080087 */
Ben Chengba4fc8b2009-06-01 13:00:29 -070088bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
89{
90 int cc;
91 int i;
92 int numWork;
Ben Cheng60c24f42010-01-04 12:29:56 -080093 bool result = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -070094
Bill Buzbee1b3da592011-02-03 07:38:22 -080095 dvmLockMutex(&gDvmJit.compilerLock);
Ben Chengba4fc8b2009-06-01 13:00:29 -070096
Ben Cheng7a0bcd02010-01-22 16:45:45 -080097 /*
Ben Cheng6999d842010-01-26 16:46:15 -080098 * Return if queue or code cache is full.
Ben Cheng7a0bcd02010-01-22 16:45:45 -080099 */
Ben Cheng6999d842010-01-26 16:46:15 -0800100 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
101 gDvmJit.codeCacheFull == true) {
Ben Cheng60c24f42010-01-04 12:29:56 -0800102 result = false;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800103 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700104 }
105
106 for (numWork = gDvmJit.compilerQueueLength,
107 i = gDvmJit.compilerWorkDequeueIndex;
108 numWork > 0;
109 numWork--) {
110 /* Already enqueued */
111 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800112 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700113 /* Wrap around */
114 if (i == COMPILER_WORK_QUEUE_SIZE)
115 i = 0;
116 }
117
Ben Chengccd6c012009-10-15 14:52:45 -0700118 CompilerWorkOrder *newOrder =
119 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
120 newOrder->pc = pc;
121 newOrder->kind = kind;
122 newOrder->info = info;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700123 newOrder->result.methodCompilationAborted = NULL;
Ben Chengccd6c012009-10-15 14:52:45 -0700124 newOrder->result.codeAddress = NULL;
125 newOrder->result.discardResult =
Bill Buzbee1f748632010-03-02 16:14:41 -0800126 (kind == kWorkOrderTraceDebug) ? true : false;
buzbee18fba342011-01-19 15:31:15 -0800127 newOrder->result.cacheVersion = gDvmJit.cacheVersion;
Ben Cheng33672452010-01-12 14:59:30 -0800128 newOrder->result.requestingThread = dvmThreadSelf();
129
Ben Chengba4fc8b2009-06-01 13:00:29 -0700130 gDvmJit.compilerWorkEnqueueIndex++;
131 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
132 gDvmJit.compilerWorkEnqueueIndex = 0;
133 gDvmJit.compilerQueueLength++;
134 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
135 assert(cc == 0);
136
Bill Buzbee964a7b02010-01-28 12:54:19 -0800137unlockAndExit:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700138 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng60c24f42010-01-04 12:29:56 -0800139 return result;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700140}
141
Ben Cheng11d8f142010-03-24 15:24:19 -0700142/* Block until the queue length is 0, or there is a pending suspend request */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700143void dvmCompilerDrainQueue(void)
144{
Ben Cheng11d8f142010-03-24 15:24:19 -0700145 Thread *self = dvmThreadSelf();
146
Ben Chengba4fc8b2009-06-01 13:00:29 -0700147 dvmLockMutex(&gDvmJit.compilerLock);
Ben Cheng11d8f142010-03-24 15:24:19 -0700148 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
buzbee9a3147c2011-03-02 15:43:48 -0800149 self->interpBreak.ctl.suspendCount == 0) {
Ben Cheng812e6b12010-03-15 15:19:06 -0700150 /*
151 * Use timed wait here - more than one mutator threads may be blocked
152 * but the compiler thread will only signal once when the queue is
153 * emptied. Furthermore, the compiler thread may have been shutdown
154 * so the blocked thread may never get the wakeup signal.
155 */
156 dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700157 }
158 dvmUnlockMutex(&gDvmJit.compilerLock);
159}
160
Ben Cheng60c24f42010-01-04 12:29:56 -0800161bool dvmCompilerSetupCodeCache(void)
162{
163 extern void dvmCompilerTemplateStart(void);
164 extern void dmvCompilerTemplateEnd(void);
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800165 int fd;
Ben Cheng60c24f42010-01-04 12:29:56 -0800166
167 /* Allocate the code cache */
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800168 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
169 if (fd < 0) {
170 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
171 gDvmJit.codeCacheSize);
172 return false;
173 }
174 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
175 PROT_READ | PROT_WRITE | PROT_EXEC,
176 MAP_PRIVATE , fd, 0);
177 close(fd);
Ben Cheng60c24f42010-01-04 12:29:56 -0800178 if (gDvmJit.codeCache == MAP_FAILED) {
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800179 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
Ben Cheng60c24f42010-01-04 12:29:56 -0800180 return false;
181 }
182
Ben Chengb88ec3c2010-05-17 12:50:33 -0700183 gDvmJit.pageSizeMask = getpagesize() - 1;
184
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800185 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
186 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800187
Ben Cheng60c24f42010-01-04 12:29:56 -0800188 /* Copy the template code into the beginning of the code cache */
189 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
190 (intptr_t) dvmCompilerTemplateStart;
191 memcpy((void *) gDvmJit.codeCache,
192 (void *) dvmCompilerTemplateStart,
193 templateSize);
194
Ben Cheng72621c92010-03-10 13:12:55 -0800195 /*
196 * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
197 * page.
198 */
199 if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
200 templateSize = (templateSize + 4095) & ~4095;
201 }
202
Ben Cheng60c24f42010-01-04 12:29:56 -0800203 gDvmJit.templateSize = templateSize;
204 gDvmJit.codeCacheByteUsed = templateSize;
205
206 /* Only flush the part in the code cache that is being used now */
buzbee13fbc2e2010-12-14 11:06:25 -0800207 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
208 (intptr_t) gDvmJit.codeCache + templateSize, 0);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700209
Ben Cheng1f3da0b2010-06-03 13:52:42 -0700210 int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
211 PROTECT_CODE_CACHE_ATTRS);
212
213 if (result == -1) {
214 LOGE("Failed to remove the write permission for the code cache");
215 dvmAbort();
216 }
Ben Chengb88ec3c2010-05-17 12:50:33 -0700217
Ben Cheng60c24f42010-01-04 12:29:56 -0800218 return true;
219}
220
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800221static void crawlDalvikStack(Thread *thread, bool print)
222{
223 void *fp = thread->curFrame;
224 StackSaveArea* saveArea = NULL;
225 int stackLevel = 0;
226
227 if (print) {
228 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
229 dvmGetThreadStatusStr(thread->status),
230 thread->inJitCodeCache,
231 thread->inJitCodeCache ? "jit" : "interp");
232 }
233 /* Crawl the Dalvik stack frames to clear the returnAddr field */
234 while (fp != NULL) {
235 saveArea = SAVEAREA_FROM_FP(fp);
236
237 if (print) {
Carl Shapirofc75f3e2010-12-07 11:43:38 -0800238 if (dvmIsBreakFrame((u4*)fp)) {
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800239 LOGD(" #%d: break frame (%p)",
240 stackLevel, saveArea->returnAddr);
241 }
242 else {
243 LOGD(" #%d: %s.%s%s (%p)",
244 stackLevel,
245 saveArea->method->clazz->descriptor,
246 saveArea->method->name,
247 dvmIsNativeMethod(saveArea->method) ?
248 " (native)" : "",
249 saveArea->returnAddr);
250 }
251 }
252 stackLevel++;
253 saveArea->returnAddr = NULL;
254 assert(fp != saveArea->prevFrame);
255 fp = saveArea->prevFrame;
256 }
257 /* Make sure the stack is fully unwound to the bottom */
258 assert(saveArea == NULL ||
259 (u1 *) (saveArea+1) == thread->interpStackStart);
260}
261
Ben Cheng60c24f42010-01-04 12:29:56 -0800262static void resetCodeCache(void)
263{
Ben Cheng60c24f42010-01-04 12:29:56 -0800264 Thread* thread;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800265 u8 startTime = dvmGetRelativeTimeUsec();
266 int inJit = 0;
Ben Cheng6999d842010-01-26 16:46:15 -0800267 int byteUsed = gDvmJit.codeCacheByteUsed;
Ben Cheng60c24f42010-01-04 12:29:56 -0800268
buzbee5867bea2011-04-09 14:47:32 -0700269 /* If any thread is found stuck in the JIT state, don't reset the cache */
270 dvmLockThreadList(NULL);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800271 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
Ben Cheng6999d842010-01-26 16:46:15 -0800272 /*
273 * Crawl the stack to wipe out the returnAddr field so that
274 * 1) the soon-to-be-deleted code in the JIT cache won't be used
275 * 2) or the thread stuck in the JIT land will soon return
276 * to the interpreter land
277 */
278 crawlDalvikStack(thread, false);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800279 if (thread->inJitCodeCache) {
280 inJit++;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800281 }
buzbee5867bea2011-04-09 14:47:32 -0700282 /* Cancel any ongoing trace selection */
283 dvmUpdateInterpBreak(thread, kInterpJitBreak, kSubModeJitTraceBuild,
284 false /* clear */);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800285 }
buzbee5867bea2011-04-09 14:47:32 -0700286 dvmUnlockThreadList();
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800287
288 if (inJit) {
Ben Cheng6999d842010-01-26 16:46:15 -0800289 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
290 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
291 ++gDvmJit.numCodeCacheResetDelayed);
292 return;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800293 }
294
Ben Cheng6999d842010-01-26 16:46:15 -0800295 /* Lock the mutex to clean up the work queue */
296 dvmLockMutex(&gDvmJit.compilerLock);
297
buzbee18fba342011-01-19 15:31:15 -0800298 /* Update the translation cache version */
299 gDvmJit.cacheVersion++;
300
Ben Cheng6999d842010-01-26 16:46:15 -0800301 /* Drain the work queue to free the work orders */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800302 while (workQueueLength()) {
303 CompilerWorkOrder work = workDequeue();
304 free(work.info);
305 }
306
Ben Cheng60c24f42010-01-04 12:29:56 -0800307 /* Reset the JitEntry table contents to the initial unpopulated state */
308 dvmJitResetTable();
309
Ben Chengb88ec3c2010-05-17 12:50:33 -0700310 UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
Ben Cheng60c24f42010-01-04 12:29:56 -0800311 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800312 * Wipe out the code cache content to force immediate crashes if
313 * stale JIT'ed code is invoked.
314 */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800315 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
316 0,
317 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
buzbee13fbc2e2010-12-14 11:06:25 -0800318 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
319 (intptr_t) gDvmJit.codeCache +
320 gDvmJit.codeCacheByteUsed, 0);
Ben Cheng60c24f42010-01-04 12:29:56 -0800321
Ben Chengb88ec3c2010-05-17 12:50:33 -0700322 PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
323
Ben Cheng60c24f42010-01-04 12:29:56 -0800324 /* Reset the current mark of used bytes to the end of template code */
325 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
326 gDvmJit.numCompilations = 0;
327
328 /* Reset the work queue */
329 memset(gDvmJit.compilerWorkQueue, 0,
330 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
331 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
332 gDvmJit.compilerQueueLength = 0;
333
Ben Cheng6999d842010-01-26 16:46:15 -0800334 /* Reset the IC patch work queue */
335 dvmLockMutex(&gDvmJit.compilerICPatchLock);
336 gDvmJit.compilerICPatchIndex = 0;
337 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
338
Ben Cheng385828e2011-03-04 16:48:33 -0800339 /*
340 * Reset the inflight compilation address (can only be done in safe points
341 * or by the compiler thread when its thread state is RUNNING).
342 */
343 gDvmJit.inflightBaseAddr = NULL;
344
Ben Cheng60c24f42010-01-04 12:29:56 -0800345 /* All clear now */
346 gDvmJit.codeCacheFull = false;
347
Ben Cheng6999d842010-01-26 16:46:15 -0800348 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800349
Ben Cheng6999d842010-01-26 16:46:15 -0800350 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
351 (dvmGetRelativeTimeUsec() - startTime) / 1000,
352 byteUsed, ++gDvmJit.numCodeCacheReset,
353 gDvmJit.numCodeCacheResetDelayed);
354}
355
356/*
357 * Perform actions that are only safe when all threads are suspended. Currently
358 * we do:
359 * 1) Check if the code cache is full. If so reset it and restart populating it
360 * from scratch.
361 * 2) Patch predicted chaining cells by consuming recorded work orders.
362 */
363void dvmCompilerPerformSafePointChecks(void)
364{
365 if (gDvmJit.codeCacheFull) {
366 resetCodeCache();
367 }
368 dvmCompilerPatchInlineCache();
Ben Cheng60c24f42010-01-04 12:29:56 -0800369}
370
Andy McFadden953a0ed2010-09-17 15:48:38 -0700371static bool compilerThreadStartup(void)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800372{
373 JitEntry *pJitTable = NULL;
374 unsigned char *pJitProfTable = NULL;
buzbee2e152ba2010-12-15 16:32:35 -0800375 JitTraceProfCounters *pJitTraceProfCounters = NULL;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800376 unsigned int i;
377
378 if (!dvmCompilerArchInit())
379 goto fail;
380
381 /*
382 * Setup the code cache if we have not inherited a valid code cache
383 * from the zygote.
384 */
385 if (gDvmJit.codeCache == NULL) {
386 if (!dvmCompilerSetupCodeCache())
387 goto fail;
388 }
389
390 /* Allocate the initial arena block */
391 if (dvmCompilerHeapInit() == false) {
392 goto fail;
393 }
394
Ben Cheng385828e2011-03-04 16:48:33 -0800395 /* Cache the thread pointer */
396 gDvmJit.compilerThread = dvmThreadSelf();
397
Bill Buzbee964a7b02010-01-28 12:54:19 -0800398 dvmLockMutex(&gDvmJit.compilerLock);
399
Bill Buzbee964a7b02010-01-28 12:54:19 -0800400 /* Track method-level compilation statistics */
401 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700402
403#if defined(WITH_JIT_TUNING)
Ben Cheng452efba2010-04-30 15:14:00 -0700404 gDvm.verboseShutdown = true;
Ben Cheng1357e942010-02-10 17:21:39 -0800405#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800406
407 dvmUnlockMutex(&gDvmJit.compilerLock);
408
409 /* Set up the JitTable */
410
411 /* Power of 2? */
412 assert(gDvmJit.jitTableSize &&
413 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
414
415 dvmInitMutex(&gDvmJit.tableLock);
416 dvmLockMutex(&gDvmJit.tableLock);
417 pJitTable = (JitEntry*)
418 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
419 if (!pJitTable) {
420 LOGE("jit table allocation failed\n");
421 dvmUnlockMutex(&gDvmJit.tableLock);
422 goto fail;
423 }
424 /*
425 * NOTE: the profile table must only be allocated once, globally.
426 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
427 * and then restoring its original value. However, this action
buzbee9a3147c2011-03-02 15:43:48 -0800428 * is not synchronized for speed so threads may continue to hold
Bill Buzbee964a7b02010-01-28 12:54:19 -0800429 * and update the profile table after profiling has been turned
430 * off by null'ng the global pointer. Be aware.
431 */
432 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
433 if (!pJitProfTable) {
434 LOGE("jit prof table allocation failed\n");
435 dvmUnlockMutex(&gDvmJit.tableLock);
436 goto fail;
437 }
438 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
439 for (i=0; i < gDvmJit.jitTableSize; i++) {
440 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
441 }
442 /* Is chain field wide enough for termination pattern? */
443 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
444
buzbee2e152ba2010-12-15 16:32:35 -0800445 /* Allocate the trace profiling structure */
446 pJitTraceProfCounters = (JitTraceProfCounters*)
447 calloc(1, sizeof(*pJitTraceProfCounters));
448 if (!pJitTraceProfCounters) {
449 LOGE("jit trace prof counters allocation failed\n");
450 dvmUnlockMutex(&gDvmJit.tableLock);
451 goto fail;
452 }
453
Bill Buzbee964a7b02010-01-28 12:54:19 -0800454 gDvmJit.pJitEntryTable = pJitTable;
455 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
456 gDvmJit.jitTableEntriesUsed = 0;
457 gDvmJit.compilerHighWater =
458 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
Ben Chenga4973592010-03-31 11:59:18 -0700459 /*
460 * If the VM is launched with wait-on-the-debugger, we will need to hide
461 * the profile table here
462 */
463 gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800464 gDvmJit.pProfTableCopy = pJitProfTable;
buzbee2e152ba2010-12-15 16:32:35 -0800465 gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
buzbee99e3e6e2011-03-29 10:26:07 -0700466 dvmJitUpdateThreadStateAll();
Bill Buzbee964a7b02010-01-28 12:54:19 -0800467 dvmUnlockMutex(&gDvmJit.tableLock);
468
469 /* Signal running threads to refresh their cached pJitTable pointers */
470 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
471 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
Ben Chengdca71432010-03-16 16:04:11 -0700472
473 /* Enable signature breakpoints by customizing the following code */
474#if defined(SIGNATURE_BREAKPOINT)
475 /*
476 * Suppose one sees the following native crash in the bugreport:
477 * I/DEBUG ( 1638): Build fingerprint: 'unknown'
478 * I/DEBUG ( 1638): pid: 2468, tid: 2507 >>> com.google.android.gallery3d
479 * I/DEBUG ( 1638): signal 11 (SIGSEGV), fault addr 00001400
480 * I/DEBUG ( 1638): r0 44ea7190 r1 44e4f7b8 r2 44ebc710 r3 00000000
481 * I/DEBUG ( 1638): r4 00000a00 r5 41862dec r6 4710dc10 r7 00000280
482 * I/DEBUG ( 1638): r8 ad010f40 r9 46a37a12 10 001116b0 fp 42a78208
483 * I/DEBUG ( 1638): ip 00000090 sp 4710dbc8 lr ad060e67 pc 46b90682
484 * cpsr 00000030
485 * I/DEBUG ( 1638): #00 pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
486 * I/DEBUG ( 1638): #01 pc 00060e62 /system/lib/libdvm.so
487 *
488 * I/DEBUG ( 1638): code around pc:
489 * I/DEBUG ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
490 * I/DEBUG ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
491 * I/DEBUG ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
492 * I/DEBUG ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
493 * I/DEBUG ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
494 *
495 * Clearly it is a JIT bug. To find out which translation contains the
496 * offending code, the content of the memory dump around the faulting PC
497 * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
498 * when a similar compilation is being created, the JIT compiler replay the
499 * trace in the verbose mode and one can investigate the instruction
500 * sequence in details.
501 *
502 * The length of the signature may need additional experiments to determine.
503 * The rule of thumb is don't include PC-relative instructions in the
504 * signature since it may be affected by the alignment of the compiled code.
505 * However, a signature that's too short might increase the chance of false
506 * positive matches. Using gdbjithelper to disassembly the memory content
507 * first might be a good companion approach.
508 *
509 * For example, if the next 4 words starting from 46b90680 is pasted into
510 * the data structure:
511 */
512
513 gDvmJit.signatureBreakpointSize = 4;
514 gDvmJit.signatureBreakpoint =
515 malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
516 gDvmJit.signatureBreakpoint[0] = 0x512000bc;
517 gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
518 gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
519 gDvmJit.signatureBreakpoint[3] = 0x6f696028;
520
521 /*
522 * The following log will be printed when a match is found in subsequent
523 * testings:
524 *
525 * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
526 * D/dalvikvm( 2468): --------
527 * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
528 * offset 0x1f7
529 * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
530 * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
531 * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
532 * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
533 * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
534 * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
535 * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
536 * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
537 * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
538 * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
539 * blocks
540 * :
541 * :
542 * D/dalvikvm( 2468): 0x20 (0020): ldr r0, [r5, #52]
543 * D/dalvikvm( 2468): 0x22 (0022): ldr r2, [pc, #96]
544 * D/dalvikvm( 2468): 0x24 (0024): cmp r0, #0
545 * D/dalvikvm( 2468): 0x26 (0026): beq 0x00000034
546 * D/dalvikvm( 2468): 0x28 (0028): ldr r1, [r1, #0]
547 * D/dalvikvm( 2468): 0x2a (002a): ldr r0, [r0, #0]
548 * D/dalvikvm( 2468): 0x2c (002c): blx r2
549 * D/dalvikvm( 2468): 0x2e (002e): cmp r0, #0
550 * D/dalvikvm( 2468): 0x30 (0030): beq 0x00000050
551 * D/dalvikvm( 2468): 0x32 (0032): ldr r0, [r5, #52]
552 * D/dalvikvm( 2468): 0x34 (0034): lsls r4, r7, #2
553 * D/dalvikvm( 2468): 0x36 (0036): str r0, [r4, r4]
554 * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
555 * D/dalvikvm( 2468): L0x0195:
556 * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
557 * v26, (#1)
558 * D/dalvikvm( 2468): 0x38 (0038): ldr r7, [r5, #104]
559 * D/dalvikvm( 2468): 0x3a (003a): adds r7, r7, #1
560 * D/dalvikvm( 2468): 0x3c (003c): str r7, [r5, #104]
561 * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
562 * D/dalvikvm( 2468): L0x0165:
563 * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
564 * (#0)
565 * D/dalvikvm( 2468): 0x3e (003e): ldr r0, [r5, #104]
566 * D/dalvikvm( 2468): 0x40 (0040): str r0, [r5, #0]
567 *
568 * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
569 */
570#endif
571
Bill Buzbee964a7b02010-01-28 12:54:19 -0800572 return true;
573
574fail:
575 return false;
576
577}
578
Ben Chengba4fc8b2009-06-01 13:00:29 -0700579static void *compilerThreadStart(void *arg)
580{
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700581 dvmChangeStatus(NULL, THREAD_VMWAIT);
582
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800583 /*
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800584 * If we're not running stand-alone, wait a little before
585 * recieving translation requests on the assumption that process start
586 * up code isn't worth compiling. We'll resume when the framework
587 * signals us that the first screen draw has happened, or the timer
588 * below expires (to catch daemons).
Ben Chengf30acbb2010-02-14 16:17:36 -0800589 *
590 * There is a theoretical race between the callback to
591 * VMRuntime.startJitCompiation and when the compiler thread reaches this
592 * point. In case the callback happens earlier, in order not to permanently
593 * hold the system_server (which is not using the timed wait) in
594 * interpreter-only mode we bypass the delay here.
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800595 */
Ben Chengf30acbb2010-02-14 16:17:36 -0800596 if (gDvmJit.runningInAndroidFramework &&
597 !gDvmJit.alreadyEnabledViaFramework) {
598 /*
599 * If the current VM instance is the system server (detected by having
600 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
601 * conditional variable to determine whether to start the JIT or not.
602 * If the system server detects that the whole system is booted in
603 * safe mode, the conditional variable will never be signaled and the
604 * system server will remain in the interpreter-only mode. All
605 * subsequent apps will be started with the --enable-safemode flag
606 * explicitly appended.
607 */
608 if (gDvm.systemServerPid == 0) {
609 dvmLockMutex(&gDvmJit.compilerLock);
610 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
611 &gDvmJit.compilerLock);
612 dvmUnlockMutex(&gDvmJit.compilerLock);
613 LOGD("JIT started for system_server");
614 } else {
615 dvmLockMutex(&gDvmJit.compilerLock);
616 /*
617 * TUNING: experiment with the delay & perhaps make it
618 * target-specific
619 */
620 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
621 &gDvmJit.compilerLock, 3000, 0);
622 dvmUnlockMutex(&gDvmJit.compilerLock);
623 }
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800624 if (gDvmJit.haltCompilerThread) {
625 return NULL;
626 }
Bill Buzbee94d89f82010-01-29 13:44:19 -0800627 }
628
Bill Buzbee964a7b02010-01-28 12:54:19 -0800629 compilerThreadStartup();
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800630
Ben Chengba4fc8b2009-06-01 13:00:29 -0700631 dvmLockMutex(&gDvmJit.compilerLock);
632 /*
633 * Since the compiler thread will not touch any objects on the heap once
634 * being created, we just fake its state as VMWAIT so that it can be a
635 * bit late when there is suspend request pending.
636 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700637 while (!gDvmJit.haltCompilerThread) {
638 if (workQueueLength() == 0) {
639 int cc;
640 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
641 assert(cc == 0);
642 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
643 &gDvmJit.compilerLock);
644 continue;
645 } else {
646 do {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700647 CompilerWorkOrder work = workDequeue();
648 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng978738d2010-05-13 13:45:57 -0700649#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800650 u8 startTime = dvmGetRelativeTimeUsec();
651#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800652 /*
653 * Check whether there is a suspend request on me. This
654 * is necessary to allow a clean shutdown.
Ben Cheng11d8f142010-03-24 15:24:19 -0700655 *
656 * However, in the blocking stress testing mode, let the
657 * compiler thread continue doing compilations to unblock
658 * other requesting threads. This may occasionally cause
659 * shutdown from proceeding cleanly in the standalone invocation
660 * of the vm but this should be acceptable.
Bill Buzbee964a7b02010-01-28 12:54:19 -0800661 */
Ben Cheng11d8f142010-03-24 15:24:19 -0700662 if (!gDvmJit.blockingMode)
Andy McFaddenab227f72010-04-06 12:37:48 -0700663 dvmCheckSuspendPending(dvmThreadSelf());
Bill Buzbee27176222009-06-09 09:20:16 -0700664 /* Is JitTable filling up? */
665 if (gDvmJit.jitTableEntriesUsed >
666 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
Ben Cheng6999d842010-01-26 16:46:15 -0800667 bool resizeFail =
668 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
669 /*
670 * If the jit table is full, consider it's time to reset
671 * the code cache too.
672 */
673 gDvmJit.codeCacheFull |= resizeFail;
Bill Buzbee27176222009-06-09 09:20:16 -0700674 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700675 if (gDvmJit.haltCompilerThread) {
676 LOGD("Compiler shutdown in progress - discarding request");
Ben Cheng6999d842010-01-26 16:46:15 -0800677 } else if (!gDvmJit.codeCacheFull) {
Bill Buzbeefc519dc2010-03-06 23:30:57 -0800678 jmp_buf jmpBuf;
679 work.bailPtr = &jmpBuf;
680 bool aborted = setjmp(jmpBuf);
681 if (!aborted) {
buzbee2e152ba2010-12-15 16:32:35 -0800682 bool codeCompiled = dvmCompilerDoWork(&work);
buzbee5867bea2011-04-09 14:47:32 -0700683 /*
684 * Make sure we are still operating with the
685 * same translation cache version. See
686 * Issue 4271784 for details.
687 */
688 dvmLockMutex(&gDvmJit.compilerLock);
689 if ((work.result.cacheVersion ==
690 gDvmJit.cacheVersion) &&
691 codeCompiled &&
692 !work.result.discardResult &&
693 work.result.codeAddress) {
buzbee2e152ba2010-12-15 16:32:35 -0800694 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
695 work.result.instructionSet,
Ben Chengcfdeca32011-01-14 11:36:46 -0800696 false, /* not method entry */
buzbee2e152ba2010-12-15 16:32:35 -0800697 work.result.profileCodeSize);
698 }
buzbee5867bea2011-04-09 14:47:32 -0700699 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700700 }
buzbee2e152ba2010-12-15 16:32:35 -0800701 dvmCompilerArenaReset();
Ben Chengba4fc8b2009-06-01 13:00:29 -0700702 }
703 free(work.info);
Ben Cheng978738d2010-05-13 13:45:57 -0700704#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800705 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
706#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700707 dvmLockMutex(&gDvmJit.compilerLock);
708 } while (workQueueLength() != 0);
709 }
710 }
711 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
712 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengef00a852009-06-22 22:53:35 -0700713
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700714 /*
715 * As part of detaching the thread we need to call into Java code to update
716 * the ThreadGroup, and we should not be in VMWAIT state while executing
717 * interpreted code.
718 */
719 dvmChangeStatus(NULL, THREAD_RUNNING);
720
Andy McFadden43eb5012010-02-01 16:56:53 -0800721 if (gDvm.verboseShutdown)
722 LOGD("Compiler thread shutting down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700723 return NULL;
724}
725
Ben Chengba4fc8b2009-06-01 13:00:29 -0700726bool dvmCompilerStartup(void)
727{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800728
729 dvmInitMutex(&gDvmJit.compilerLock);
Ben Cheng6999d842010-01-26 16:46:15 -0800730 dvmInitMutex(&gDvmJit.compilerICPatchLock);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700731 dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
Bill Buzbee94d89f82010-01-29 13:44:19 -0800732 dvmLockMutex(&gDvmJit.compilerLock);
733 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
734 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
735
736 /* Reset the work queue */
737 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
738 gDvmJit.compilerQueueLength = 0;
739 dvmUnlockMutex(&gDvmJit.compilerLock);
740
Ben Chengba4fc8b2009-06-01 13:00:29 -0700741 /*
Bill Buzbee94d89f82010-01-29 13:44:19 -0800742 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
Bill Buzbee964a7b02010-01-28 12:54:19 -0800743 * the compiler thread, which will do the real initialization if and
744 * when it is signalled to do so.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700745 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800746 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
747 compilerThreadStart, NULL);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700748}
749
750void dvmCompilerShutdown(void)
751{
752 void *threadReturn;
753
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800754 /* Disable new translation requests */
755 gDvmJit.pProfTable = NULL;
756 gDvmJit.pProfTableCopy = NULL;
buzbee99e3e6e2011-03-29 10:26:07 -0700757 dvmJitUpdateThreadStateAll();
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800758
buzbee2e152ba2010-12-15 16:32:35 -0800759 if (gDvm.verboseShutdown ||
760 gDvmJit.profileMode == kTraceProfilingContinuous) {
Ben Cheng88a0f972010-02-24 15:00:40 -0800761 dvmCompilerDumpStats();
762 while (gDvmJit.compilerQueueLength)
763 sleep(5);
764 }
765
Ben Chengba4fc8b2009-06-01 13:00:29 -0700766 if (gDvmJit.compilerHandle) {
767
768 gDvmJit.haltCompilerThread = true;
769
770 dvmLockMutex(&gDvmJit.compilerLock);
771 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
772 dvmUnlockMutex(&gDvmJit.compilerLock);
773
Ben Chengef00a852009-06-22 22:53:35 -0700774 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
775 LOGW("Compiler thread join failed\n");
Andy McFadden43eb5012010-02-01 16:56:53 -0800776 else if (gDvm.verboseShutdown)
Ben Chengef00a852009-06-22 22:53:35 -0700777 LOGD("Compiler thread has shut down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700778 }
Bill Buzbee06bb8392010-01-31 18:53:15 -0800779
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800780 /* Break loops within the translation cache */
781 dvmJitUnchainAll();
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800782
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800783 /*
784 * NOTE: our current implementatation doesn't allow for the compiler
785 * thread to be restarted after it exits here. We aren't freeing
786 * the JitTable or the ProfTable because threads which still may be
787 * running or in the process of shutting down may hold references to
788 * them.
789 */
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800790}
Bill Buzbee06bb8392010-01-31 18:53:15 -0800791
buzbee99e3e6e2011-03-29 10:26:07 -0700792void dvmCompilerUpdateGlobalState()
Bill Buzbee06bb8392010-01-31 18:53:15 -0800793{
794 bool jitActive;
795 bool jitActivate;
Bill Buzbee3e392682010-02-03 18:13:57 -0800796 bool needUnchain = false;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800797
Ben Chenga4973592010-03-31 11:59:18 -0700798 /*
799 * The tableLock might not be initialized yet by the compiler thread if
800 * debugger is attached from the very beginning of the VM launch. If
801 * pProfTableCopy is NULL, the lock is not initialized yet and we don't
802 * need to refresh anything either.
803 */
804 if (gDvmJit.pProfTableCopy == NULL) {
805 return;
806 }
807
buzbee18fba342011-01-19 15:31:15 -0800808 /*
809 * On the first enabling of method tracing, switch the compiler
810 * into a mode that includes trace support for invokes and returns.
811 * If there are any existing translations, flush them. NOTE: we
812 * can't blindly flush the translation cache because this code
813 * may be executed before the compiler thread has finished
814 * initialization.
815 */
buzbee9a3147c2011-03-02 15:43:48 -0800816 if ((gDvm.activeProfilers != 0) &&
buzbee18fba342011-01-19 15:31:15 -0800817 !gDvmJit.methodTraceSupport) {
818 bool resetRequired;
819 /*
820 * compilerLock will prevent new compilations from being
821 * installed while we are working.
822 */
823 dvmLockMutex(&gDvmJit.compilerLock);
824 gDvmJit.cacheVersion++; // invalidate compilations in flight
825 gDvmJit.methodTraceSupport = true;
826 resetRequired = (gDvmJit.numCompilations != 0);
827 dvmUnlockMutex(&gDvmJit.compilerLock);
828 if (resetRequired) {
829 dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
830 resetCodeCache();
831 dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
832 }
833 }
834
Bill Buzbee06bb8392010-01-31 18:53:15 -0800835 dvmLockMutex(&gDvmJit.tableLock);
836 jitActive = gDvmJit.pProfTable != NULL;
buzbeecb3081f2011-01-14 13:37:31 -0800837 jitActivate = !dvmDebuggerOrProfilerActive();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800838
839 if (jitActivate && !jitActive) {
840 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800841 } else if (!jitActivate && jitActive) {
842 gDvmJit.pProfTable = NULL;
Bill Buzbee3e392682010-02-03 18:13:57 -0800843 needUnchain = true;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800844 }
Bill Buzbee3e392682010-02-03 18:13:57 -0800845 dvmUnlockMutex(&gDvmJit.tableLock);
846 if (needUnchain)
847 dvmJitUnchainAll();
buzbee9a3147c2011-03-02 15:43:48 -0800848 // Make sure all threads have current values
buzbee99e3e6e2011-03-29 10:26:07 -0700849 dvmJitUpdateThreadStateAll();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800850}