blob: 59463b796c6b15fac954d5f5c357ef5fe76e8b08 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
Ben Cheng7c4afdb2010-02-11 15:03:00 -080019#include <cutils/ashmem.h>
Ben Chengba4fc8b2009-06-01 13:00:29 -070020
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24
Ben Chengba4fc8b2009-06-01 13:00:29 -070025static inline bool workQueueLength(void)
26{
27 return gDvmJit.compilerQueueLength;
28}
29
30static CompilerWorkOrder workDequeue(void)
31{
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
Bill Buzbeef9f33282009-11-22 12:45:30 -080042 if (gDvmJit.compilerQueueLength == 0) {
Carl Shapirob31b3012010-05-25 18:35:37 -070043 dvmSignalCond(&gDvmJit.compilerQueueEmpty);
Bill Buzbeef9f33282009-11-22 12:45:30 -080044 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070045
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51}
52
Bill Buzbee1b3da592011-02-03 07:38:22 -080053
54/*
55 * Enqueue a work order - retrying until successful. If attempt to enqueue
56 * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
57 * code cache reset.
58 */
59#define ENQUEUE_MAX_RETRIES 20
60void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
61{
62 bool success;
63 int retries = 0;
64 do {
65 success = dvmCompilerWorkEnqueue(pc, kind, info);
66 if (!success) {
67 retries++;
68 if (retries > ENQUEUE_MAX_RETRIES) {
69 LOGE("JIT: compiler queue wedged - forcing reset");
70 gDvmJit.codeCacheFull = true; // Force reset
71 success = true; // Because we'll drop the order now anyway
72 } else {
73 dvmLockMutex(&gDvmJit.compilerLock);
74 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
75 &gDvmJit.compilerLock);
76 dvmUnlockMutex(&gDvmJit.compilerLock);
77
78 }
79 }
80 } while (!success);
81}
82
Bill Buzbee964a7b02010-01-28 12:54:19 -080083/*
84 * Attempt to enqueue a work order, returning true if successful.
Ben Cheng1357e942010-02-10 17:21:39 -080085 *
86 * NOTE: Make sure that the caller frees the info pointer if the return value
87 * is false.
Bill Buzbee964a7b02010-01-28 12:54:19 -080088 */
Ben Chengba4fc8b2009-06-01 13:00:29 -070089bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
90{
91 int cc;
92 int i;
93 int numWork;
Ben Cheng60c24f42010-01-04 12:29:56 -080094 bool result = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -070095
Bill Buzbee1b3da592011-02-03 07:38:22 -080096 dvmLockMutex(&gDvmJit.compilerLock);
Ben Chengba4fc8b2009-06-01 13:00:29 -070097
Ben Cheng7a0bcd02010-01-22 16:45:45 -080098 /*
Ben Cheng6999d842010-01-26 16:46:15 -080099 * Return if queue or code cache is full.
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800100 */
Ben Cheng6999d842010-01-26 16:46:15 -0800101 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
102 gDvmJit.codeCacheFull == true) {
Ben Cheng60c24f42010-01-04 12:29:56 -0800103 result = false;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800104 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700105 }
106
107 for (numWork = gDvmJit.compilerQueueLength,
108 i = gDvmJit.compilerWorkDequeueIndex;
109 numWork > 0;
110 numWork--) {
111 /* Already enqueued */
112 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800113 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700114 /* Wrap around */
115 if (i == COMPILER_WORK_QUEUE_SIZE)
116 i = 0;
117 }
118
Ben Chengccd6c012009-10-15 14:52:45 -0700119 CompilerWorkOrder *newOrder =
120 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
121 newOrder->pc = pc;
122 newOrder->kind = kind;
123 newOrder->info = info;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700124 newOrder->result.methodCompilationAborted = NULL;
Ben Chengccd6c012009-10-15 14:52:45 -0700125 newOrder->result.codeAddress = NULL;
126 newOrder->result.discardResult =
Bill Buzbee1f748632010-03-02 16:14:41 -0800127 (kind == kWorkOrderTraceDebug) ? true : false;
buzbee18fba342011-01-19 15:31:15 -0800128 newOrder->result.cacheVersion = gDvmJit.cacheVersion;
Ben Cheng33672452010-01-12 14:59:30 -0800129 newOrder->result.requestingThread = dvmThreadSelf();
130
Ben Chengba4fc8b2009-06-01 13:00:29 -0700131 gDvmJit.compilerWorkEnqueueIndex++;
132 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
133 gDvmJit.compilerWorkEnqueueIndex = 0;
134 gDvmJit.compilerQueueLength++;
135 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
136 assert(cc == 0);
137
Bill Buzbee964a7b02010-01-28 12:54:19 -0800138unlockAndExit:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700139 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng60c24f42010-01-04 12:29:56 -0800140 return result;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700141}
142
Ben Cheng11d8f142010-03-24 15:24:19 -0700143/* Block until the queue length is 0, or there is a pending suspend request */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700144void dvmCompilerDrainQueue(void)
145{
Ben Cheng11d8f142010-03-24 15:24:19 -0700146 Thread *self = dvmThreadSelf();
147
Ben Chengba4fc8b2009-06-01 13:00:29 -0700148 dvmLockMutex(&gDvmJit.compilerLock);
Ben Cheng11d8f142010-03-24 15:24:19 -0700149 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
150 self->suspendCount == 0) {
Ben Cheng812e6b12010-03-15 15:19:06 -0700151 /*
152 * Use timed wait here - more than one mutator threads may be blocked
153 * but the compiler thread will only signal once when the queue is
154 * emptied. Furthermore, the compiler thread may have been shutdown
155 * so the blocked thread may never get the wakeup signal.
156 */
157 dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700158 }
159 dvmUnlockMutex(&gDvmJit.compilerLock);
160}
161
Ben Cheng60c24f42010-01-04 12:29:56 -0800162bool dvmCompilerSetupCodeCache(void)
163{
164 extern void dvmCompilerTemplateStart(void);
165 extern void dmvCompilerTemplateEnd(void);
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800166 int fd;
Ben Cheng60c24f42010-01-04 12:29:56 -0800167
168 /* Allocate the code cache */
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800169 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
170 if (fd < 0) {
171 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
172 gDvmJit.codeCacheSize);
173 return false;
174 }
175 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
176 PROT_READ | PROT_WRITE | PROT_EXEC,
177 MAP_PRIVATE , fd, 0);
178 close(fd);
Ben Cheng60c24f42010-01-04 12:29:56 -0800179 if (gDvmJit.codeCache == MAP_FAILED) {
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800180 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
Ben Cheng60c24f42010-01-04 12:29:56 -0800181 return false;
182 }
183
Ben Chengb88ec3c2010-05-17 12:50:33 -0700184 gDvmJit.pageSizeMask = getpagesize() - 1;
185
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800186 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
187 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800188
Ben Cheng60c24f42010-01-04 12:29:56 -0800189 /* Copy the template code into the beginning of the code cache */
190 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
191 (intptr_t) dvmCompilerTemplateStart;
192 memcpy((void *) gDvmJit.codeCache,
193 (void *) dvmCompilerTemplateStart,
194 templateSize);
195
Ben Cheng72621c92010-03-10 13:12:55 -0800196 /*
197 * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
198 * page.
199 */
200 if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
201 templateSize = (templateSize + 4095) & ~4095;
202 }
203
Ben Cheng60c24f42010-01-04 12:29:56 -0800204 gDvmJit.templateSize = templateSize;
205 gDvmJit.codeCacheByteUsed = templateSize;
206
207 /* Only flush the part in the code cache that is being used now */
buzbee13fbc2e2010-12-14 11:06:25 -0800208 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
209 (intptr_t) gDvmJit.codeCache + templateSize, 0);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700210
Ben Cheng1f3da0b2010-06-03 13:52:42 -0700211 int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
212 PROTECT_CODE_CACHE_ATTRS);
213
214 if (result == -1) {
215 LOGE("Failed to remove the write permission for the code cache");
216 dvmAbort();
217 }
Ben Chengb88ec3c2010-05-17 12:50:33 -0700218
Ben Cheng60c24f42010-01-04 12:29:56 -0800219 return true;
220}
221
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800222static void crawlDalvikStack(Thread *thread, bool print)
223{
224 void *fp = thread->curFrame;
225 StackSaveArea* saveArea = NULL;
226 int stackLevel = 0;
227
228 if (print) {
229 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
230 dvmGetThreadStatusStr(thread->status),
231 thread->inJitCodeCache,
232 thread->inJitCodeCache ? "jit" : "interp");
233 }
234 /* Crawl the Dalvik stack frames to clear the returnAddr field */
235 while (fp != NULL) {
236 saveArea = SAVEAREA_FROM_FP(fp);
237
238 if (print) {
Carl Shapirofc75f3e2010-12-07 11:43:38 -0800239 if (dvmIsBreakFrame((u4*)fp)) {
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800240 LOGD(" #%d: break frame (%p)",
241 stackLevel, saveArea->returnAddr);
242 }
243 else {
244 LOGD(" #%d: %s.%s%s (%p)",
245 stackLevel,
246 saveArea->method->clazz->descriptor,
247 saveArea->method->name,
248 dvmIsNativeMethod(saveArea->method) ?
249 " (native)" : "",
250 saveArea->returnAddr);
251 }
252 }
253 stackLevel++;
254 saveArea->returnAddr = NULL;
255 assert(fp != saveArea->prevFrame);
256 fp = saveArea->prevFrame;
257 }
258 /* Make sure the stack is fully unwound to the bottom */
259 assert(saveArea == NULL ||
260 (u1 *) (saveArea+1) == thread->interpStackStart);
261}
262
Ben Cheng60c24f42010-01-04 12:29:56 -0800263static void resetCodeCache(void)
264{
Ben Cheng60c24f42010-01-04 12:29:56 -0800265 Thread* thread;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800266 u8 startTime = dvmGetRelativeTimeUsec();
267 int inJit = 0;
Ben Cheng6999d842010-01-26 16:46:15 -0800268 int byteUsed = gDvmJit.codeCacheByteUsed;
Ben Cheng60c24f42010-01-04 12:29:56 -0800269
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800270 /* If any thread is found stuck in the JIT state, don't reset the cache */
271 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
Ben Cheng6999d842010-01-26 16:46:15 -0800272 /*
273 * Crawl the stack to wipe out the returnAddr field so that
274 * 1) the soon-to-be-deleted code in the JIT cache won't be used
275 * 2) or the thread stuck in the JIT land will soon return
276 * to the interpreter land
277 */
278 crawlDalvikStack(thread, false);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800279 if (thread->inJitCodeCache) {
280 inJit++;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800281 }
282 }
283
284 if (inJit) {
Ben Cheng6999d842010-01-26 16:46:15 -0800285 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
286 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
287 ++gDvmJit.numCodeCacheResetDelayed);
288 return;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800289 }
290
Ben Cheng6999d842010-01-26 16:46:15 -0800291 /* Lock the mutex to clean up the work queue */
292 dvmLockMutex(&gDvmJit.compilerLock);
293
buzbee18fba342011-01-19 15:31:15 -0800294 /* Update the translation cache version */
295 gDvmJit.cacheVersion++;
296
Ben Cheng6999d842010-01-26 16:46:15 -0800297 /* Drain the work queue to free the work orders */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800298 while (workQueueLength()) {
299 CompilerWorkOrder work = workDequeue();
300 free(work.info);
301 }
302
Ben Cheng60c24f42010-01-04 12:29:56 -0800303 /* Reset the JitEntry table contents to the initial unpopulated state */
304 dvmJitResetTable();
305
Ben Chengb88ec3c2010-05-17 12:50:33 -0700306 UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
Ben Cheng60c24f42010-01-04 12:29:56 -0800307 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800308 * Wipe out the code cache content to force immediate crashes if
309 * stale JIT'ed code is invoked.
310 */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800311 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
312 0,
313 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
buzbee13fbc2e2010-12-14 11:06:25 -0800314 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
315 (intptr_t) gDvmJit.codeCache +
316 gDvmJit.codeCacheByteUsed, 0);
Ben Cheng60c24f42010-01-04 12:29:56 -0800317
Ben Chengb88ec3c2010-05-17 12:50:33 -0700318 PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
319
Ben Cheng60c24f42010-01-04 12:29:56 -0800320 /* Reset the current mark of used bytes to the end of template code */
321 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
322 gDvmJit.numCompilations = 0;
323
324 /* Reset the work queue */
325 memset(gDvmJit.compilerWorkQueue, 0,
326 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
327 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
328 gDvmJit.compilerQueueLength = 0;
329
Ben Cheng6999d842010-01-26 16:46:15 -0800330 /* Reset the IC patch work queue */
331 dvmLockMutex(&gDvmJit.compilerICPatchLock);
332 gDvmJit.compilerICPatchIndex = 0;
333 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
334
Ben Cheng60c24f42010-01-04 12:29:56 -0800335 /* All clear now */
336 gDvmJit.codeCacheFull = false;
337
Ben Cheng6999d842010-01-26 16:46:15 -0800338 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800339
Ben Cheng6999d842010-01-26 16:46:15 -0800340 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
341 (dvmGetRelativeTimeUsec() - startTime) / 1000,
342 byteUsed, ++gDvmJit.numCodeCacheReset,
343 gDvmJit.numCodeCacheResetDelayed);
344}
345
346/*
347 * Perform actions that are only safe when all threads are suspended. Currently
348 * we do:
349 * 1) Check if the code cache is full. If so reset it and restart populating it
350 * from scratch.
351 * 2) Patch predicted chaining cells by consuming recorded work orders.
352 */
353void dvmCompilerPerformSafePointChecks(void)
354{
355 if (gDvmJit.codeCacheFull) {
356 resetCodeCache();
357 }
358 dvmCompilerPatchInlineCache();
Ben Cheng60c24f42010-01-04 12:29:56 -0800359}
360
Andy McFadden953a0ed2010-09-17 15:48:38 -0700361static bool compilerThreadStartup(void)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800362{
363 JitEntry *pJitTable = NULL;
364 unsigned char *pJitProfTable = NULL;
buzbee2e152ba2010-12-15 16:32:35 -0800365 JitTraceProfCounters *pJitTraceProfCounters = NULL;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800366 unsigned int i;
367
368 if (!dvmCompilerArchInit())
369 goto fail;
370
371 /*
372 * Setup the code cache if we have not inherited a valid code cache
373 * from the zygote.
374 */
375 if (gDvmJit.codeCache == NULL) {
376 if (!dvmCompilerSetupCodeCache())
377 goto fail;
378 }
379
380 /* Allocate the initial arena block */
381 if (dvmCompilerHeapInit() == false) {
382 goto fail;
383 }
384
Bill Buzbee964a7b02010-01-28 12:54:19 -0800385 dvmLockMutex(&gDvmJit.compilerLock);
386
Bill Buzbee964a7b02010-01-28 12:54:19 -0800387 /* Track method-level compilation statistics */
388 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700389
390#if defined(WITH_JIT_TUNING)
Ben Cheng452efba2010-04-30 15:14:00 -0700391 gDvm.verboseShutdown = true;
Ben Cheng1357e942010-02-10 17:21:39 -0800392#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800393
394 dvmUnlockMutex(&gDvmJit.compilerLock);
395
396 /* Set up the JitTable */
397
398 /* Power of 2? */
399 assert(gDvmJit.jitTableSize &&
400 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
401
402 dvmInitMutex(&gDvmJit.tableLock);
403 dvmLockMutex(&gDvmJit.tableLock);
404 pJitTable = (JitEntry*)
405 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
406 if (!pJitTable) {
407 LOGE("jit table allocation failed\n");
408 dvmUnlockMutex(&gDvmJit.tableLock);
409 goto fail;
410 }
411 /*
412 * NOTE: the profile table must only be allocated once, globally.
413 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
414 * and then restoring its original value. However, this action
415 * is not syncronized for speed so threads may continue to hold
416 * and update the profile table after profiling has been turned
417 * off by null'ng the global pointer. Be aware.
418 */
419 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
420 if (!pJitProfTable) {
421 LOGE("jit prof table allocation failed\n");
422 dvmUnlockMutex(&gDvmJit.tableLock);
423 goto fail;
424 }
425 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
426 for (i=0; i < gDvmJit.jitTableSize; i++) {
427 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
428 }
429 /* Is chain field wide enough for termination pattern? */
430 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
431
buzbee2e152ba2010-12-15 16:32:35 -0800432 /* Allocate the trace profiling structure */
433 pJitTraceProfCounters = (JitTraceProfCounters*)
434 calloc(1, sizeof(*pJitTraceProfCounters));
435 if (!pJitTraceProfCounters) {
436 LOGE("jit trace prof counters allocation failed\n");
437 dvmUnlockMutex(&gDvmJit.tableLock);
438 goto fail;
439 }
440
Bill Buzbee964a7b02010-01-28 12:54:19 -0800441 gDvmJit.pJitEntryTable = pJitTable;
442 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
443 gDvmJit.jitTableEntriesUsed = 0;
444 gDvmJit.compilerHighWater =
445 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
Ben Chenga4973592010-03-31 11:59:18 -0700446 /*
447 * If the VM is launched with wait-on-the-debugger, we will need to hide
448 * the profile table here
449 */
450 gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800451 gDvmJit.pProfTableCopy = pJitProfTable;
buzbee2e152ba2010-12-15 16:32:35 -0800452 gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800453 dvmUnlockMutex(&gDvmJit.tableLock);
454
455 /* Signal running threads to refresh their cached pJitTable pointers */
456 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
457 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
Ben Chengdca71432010-03-16 16:04:11 -0700458
459 /* Enable signature breakpoints by customizing the following code */
460#if defined(SIGNATURE_BREAKPOINT)
461 /*
462 * Suppose one sees the following native crash in the bugreport:
463 * I/DEBUG ( 1638): Build fingerprint: 'unknown'
464 * I/DEBUG ( 1638): pid: 2468, tid: 2507 >>> com.google.android.gallery3d
465 * I/DEBUG ( 1638): signal 11 (SIGSEGV), fault addr 00001400
466 * I/DEBUG ( 1638): r0 44ea7190 r1 44e4f7b8 r2 44ebc710 r3 00000000
467 * I/DEBUG ( 1638): r4 00000a00 r5 41862dec r6 4710dc10 r7 00000280
468 * I/DEBUG ( 1638): r8 ad010f40 r9 46a37a12 10 001116b0 fp 42a78208
469 * I/DEBUG ( 1638): ip 00000090 sp 4710dbc8 lr ad060e67 pc 46b90682
470 * cpsr 00000030
471 * I/DEBUG ( 1638): #00 pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
472 * I/DEBUG ( 1638): #01 pc 00060e62 /system/lib/libdvm.so
473 *
474 * I/DEBUG ( 1638): code around pc:
475 * I/DEBUG ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
476 * I/DEBUG ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
477 * I/DEBUG ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
478 * I/DEBUG ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
479 * I/DEBUG ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
480 *
481 * Clearly it is a JIT bug. To find out which translation contains the
482 * offending code, the content of the memory dump around the faulting PC
483 * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
484 * when a similar compilation is being created, the JIT compiler replay the
485 * trace in the verbose mode and one can investigate the instruction
486 * sequence in details.
487 *
488 * The length of the signature may need additional experiments to determine.
489 * The rule of thumb is don't include PC-relative instructions in the
490 * signature since it may be affected by the alignment of the compiled code.
491 * However, a signature that's too short might increase the chance of false
492 * positive matches. Using gdbjithelper to disassembly the memory content
493 * first might be a good companion approach.
494 *
495 * For example, if the next 4 words starting from 46b90680 is pasted into
496 * the data structure:
497 */
498
499 gDvmJit.signatureBreakpointSize = 4;
500 gDvmJit.signatureBreakpoint =
501 malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
502 gDvmJit.signatureBreakpoint[0] = 0x512000bc;
503 gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
504 gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
505 gDvmJit.signatureBreakpoint[3] = 0x6f696028;
506
507 /*
508 * The following log will be printed when a match is found in subsequent
509 * testings:
510 *
511 * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
512 * D/dalvikvm( 2468): --------
513 * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
514 * offset 0x1f7
515 * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
516 * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
517 * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
518 * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
519 * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
520 * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
521 * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
522 * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
523 * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
524 * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
525 * blocks
526 * :
527 * :
528 * D/dalvikvm( 2468): 0x20 (0020): ldr r0, [r5, #52]
529 * D/dalvikvm( 2468): 0x22 (0022): ldr r2, [pc, #96]
530 * D/dalvikvm( 2468): 0x24 (0024): cmp r0, #0
531 * D/dalvikvm( 2468): 0x26 (0026): beq 0x00000034
532 * D/dalvikvm( 2468): 0x28 (0028): ldr r1, [r1, #0]
533 * D/dalvikvm( 2468): 0x2a (002a): ldr r0, [r0, #0]
534 * D/dalvikvm( 2468): 0x2c (002c): blx r2
535 * D/dalvikvm( 2468): 0x2e (002e): cmp r0, #0
536 * D/dalvikvm( 2468): 0x30 (0030): beq 0x00000050
537 * D/dalvikvm( 2468): 0x32 (0032): ldr r0, [r5, #52]
538 * D/dalvikvm( 2468): 0x34 (0034): lsls r4, r7, #2
539 * D/dalvikvm( 2468): 0x36 (0036): str r0, [r4, r4]
540 * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
541 * D/dalvikvm( 2468): L0x0195:
542 * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
543 * v26, (#1)
544 * D/dalvikvm( 2468): 0x38 (0038): ldr r7, [r5, #104]
545 * D/dalvikvm( 2468): 0x3a (003a): adds r7, r7, #1
546 * D/dalvikvm( 2468): 0x3c (003c): str r7, [r5, #104]
547 * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
548 * D/dalvikvm( 2468): L0x0165:
549 * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
550 * (#0)
551 * D/dalvikvm( 2468): 0x3e (003e): ldr r0, [r5, #104]
552 * D/dalvikvm( 2468): 0x40 (0040): str r0, [r5, #0]
553 *
554 * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
555 */
556#endif
557
Bill Buzbee964a7b02010-01-28 12:54:19 -0800558 return true;
559
560fail:
561 return false;
562
563}
564
Ben Chengba4fc8b2009-06-01 13:00:29 -0700565static void *compilerThreadStart(void *arg)
566{
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700567 dvmChangeStatus(NULL, THREAD_VMWAIT);
568
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800569 /*
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800570 * If we're not running stand-alone, wait a little before
571 * recieving translation requests on the assumption that process start
572 * up code isn't worth compiling. We'll resume when the framework
573 * signals us that the first screen draw has happened, or the timer
574 * below expires (to catch daemons).
Ben Chengf30acbb2010-02-14 16:17:36 -0800575 *
576 * There is a theoretical race between the callback to
577 * VMRuntime.startJitCompiation and when the compiler thread reaches this
578 * point. In case the callback happens earlier, in order not to permanently
579 * hold the system_server (which is not using the timed wait) in
580 * interpreter-only mode we bypass the delay here.
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800581 */
Ben Chengf30acbb2010-02-14 16:17:36 -0800582 if (gDvmJit.runningInAndroidFramework &&
583 !gDvmJit.alreadyEnabledViaFramework) {
584 /*
585 * If the current VM instance is the system server (detected by having
586 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
587 * conditional variable to determine whether to start the JIT or not.
588 * If the system server detects that the whole system is booted in
589 * safe mode, the conditional variable will never be signaled and the
590 * system server will remain in the interpreter-only mode. All
591 * subsequent apps will be started with the --enable-safemode flag
592 * explicitly appended.
593 */
594 if (gDvm.systemServerPid == 0) {
595 dvmLockMutex(&gDvmJit.compilerLock);
596 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
597 &gDvmJit.compilerLock);
598 dvmUnlockMutex(&gDvmJit.compilerLock);
599 LOGD("JIT started for system_server");
600 } else {
601 dvmLockMutex(&gDvmJit.compilerLock);
602 /*
603 * TUNING: experiment with the delay & perhaps make it
604 * target-specific
605 */
606 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
607 &gDvmJit.compilerLock, 3000, 0);
608 dvmUnlockMutex(&gDvmJit.compilerLock);
609 }
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800610 if (gDvmJit.haltCompilerThread) {
611 return NULL;
612 }
Bill Buzbee94d89f82010-01-29 13:44:19 -0800613 }
614
Bill Buzbee964a7b02010-01-28 12:54:19 -0800615 compilerThreadStartup();
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800616
Ben Chengba4fc8b2009-06-01 13:00:29 -0700617 dvmLockMutex(&gDvmJit.compilerLock);
618 /*
619 * Since the compiler thread will not touch any objects on the heap once
620 * being created, we just fake its state as VMWAIT so that it can be a
621 * bit late when there is suspend request pending.
622 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700623 while (!gDvmJit.haltCompilerThread) {
624 if (workQueueLength() == 0) {
625 int cc;
626 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
627 assert(cc == 0);
628 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
629 &gDvmJit.compilerLock);
630 continue;
631 } else {
632 do {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700633 CompilerWorkOrder work = workDequeue();
634 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng978738d2010-05-13 13:45:57 -0700635#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800636 u8 startTime = dvmGetRelativeTimeUsec();
637#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800638 /*
639 * Check whether there is a suspend request on me. This
640 * is necessary to allow a clean shutdown.
Ben Cheng11d8f142010-03-24 15:24:19 -0700641 *
642 * However, in the blocking stress testing mode, let the
643 * compiler thread continue doing compilations to unblock
644 * other requesting threads. This may occasionally cause
645 * shutdown from proceeding cleanly in the standalone invocation
646 * of the vm but this should be acceptable.
Bill Buzbee964a7b02010-01-28 12:54:19 -0800647 */
Ben Cheng11d8f142010-03-24 15:24:19 -0700648 if (!gDvmJit.blockingMode)
Andy McFaddenab227f72010-04-06 12:37:48 -0700649 dvmCheckSuspendPending(dvmThreadSelf());
Bill Buzbee27176222009-06-09 09:20:16 -0700650 /* Is JitTable filling up? */
651 if (gDvmJit.jitTableEntriesUsed >
652 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
Ben Cheng6999d842010-01-26 16:46:15 -0800653 bool resizeFail =
654 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
655 /*
656 * If the jit table is full, consider it's time to reset
657 * the code cache too.
658 */
659 gDvmJit.codeCacheFull |= resizeFail;
Bill Buzbee27176222009-06-09 09:20:16 -0700660 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700661 if (gDvmJit.haltCompilerThread) {
662 LOGD("Compiler shutdown in progress - discarding request");
Ben Cheng6999d842010-01-26 16:46:15 -0800663 } else if (!gDvmJit.codeCacheFull) {
Bill Buzbeefc519dc2010-03-06 23:30:57 -0800664 jmp_buf jmpBuf;
665 work.bailPtr = &jmpBuf;
666 bool aborted = setjmp(jmpBuf);
667 if (!aborted) {
buzbee2e152ba2010-12-15 16:32:35 -0800668 bool codeCompiled = dvmCompilerDoWork(&work);
669 if (codeCompiled && !work.result.discardResult &&
670 work.result.codeAddress) {
671 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
672 work.result.instructionSet,
Ben Chengcfdeca32011-01-14 11:36:46 -0800673 false, /* not method entry */
buzbee2e152ba2010-12-15 16:32:35 -0800674 work.result.profileCodeSize);
675 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700676 }
buzbee2e152ba2010-12-15 16:32:35 -0800677 dvmCompilerArenaReset();
Ben Chengba4fc8b2009-06-01 13:00:29 -0700678 }
679 free(work.info);
Ben Cheng978738d2010-05-13 13:45:57 -0700680#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800681 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
682#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700683 dvmLockMutex(&gDvmJit.compilerLock);
684 } while (workQueueLength() != 0);
685 }
686 }
687 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
688 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengef00a852009-06-22 22:53:35 -0700689
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700690 /*
691 * As part of detaching the thread we need to call into Java code to update
692 * the ThreadGroup, and we should not be in VMWAIT state while executing
693 * interpreted code.
694 */
695 dvmChangeStatus(NULL, THREAD_RUNNING);
696
Andy McFadden43eb5012010-02-01 16:56:53 -0800697 if (gDvm.verboseShutdown)
698 LOGD("Compiler thread shutting down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700699 return NULL;
700}
701
Ben Chengba4fc8b2009-06-01 13:00:29 -0700702bool dvmCompilerStartup(void)
703{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800704
705 dvmInitMutex(&gDvmJit.compilerLock);
Ben Cheng6999d842010-01-26 16:46:15 -0800706 dvmInitMutex(&gDvmJit.compilerICPatchLock);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700707 dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
Bill Buzbee94d89f82010-01-29 13:44:19 -0800708 dvmLockMutex(&gDvmJit.compilerLock);
709 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
710 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
711
712 /* Reset the work queue */
713 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
714 gDvmJit.compilerQueueLength = 0;
715 dvmUnlockMutex(&gDvmJit.compilerLock);
716
Ben Chengba4fc8b2009-06-01 13:00:29 -0700717 /*
Bill Buzbee94d89f82010-01-29 13:44:19 -0800718 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
Bill Buzbee964a7b02010-01-28 12:54:19 -0800719 * the compiler thread, which will do the real initialization if and
720 * when it is signalled to do so.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700721 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800722 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
723 compilerThreadStart, NULL);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700724}
725
726void dvmCompilerShutdown(void)
727{
728 void *threadReturn;
729
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800730 /* Disable new translation requests */
731 gDvmJit.pProfTable = NULL;
732 gDvmJit.pProfTableCopy = NULL;
733
buzbee2e152ba2010-12-15 16:32:35 -0800734 if (gDvm.verboseShutdown ||
735 gDvmJit.profileMode == kTraceProfilingContinuous) {
Ben Cheng88a0f972010-02-24 15:00:40 -0800736 dvmCompilerDumpStats();
737 while (gDvmJit.compilerQueueLength)
738 sleep(5);
739 }
740
Ben Chengba4fc8b2009-06-01 13:00:29 -0700741 if (gDvmJit.compilerHandle) {
742
743 gDvmJit.haltCompilerThread = true;
744
745 dvmLockMutex(&gDvmJit.compilerLock);
746 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
747 dvmUnlockMutex(&gDvmJit.compilerLock);
748
Ben Chengef00a852009-06-22 22:53:35 -0700749 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
750 LOGW("Compiler thread join failed\n");
Andy McFadden43eb5012010-02-01 16:56:53 -0800751 else if (gDvm.verboseShutdown)
Ben Chengef00a852009-06-22 22:53:35 -0700752 LOGD("Compiler thread has shut down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700753 }
Bill Buzbee06bb8392010-01-31 18:53:15 -0800754
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800755 /* Break loops within the translation cache */
756 dvmJitUnchainAll();
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800757
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800758 /*
759 * NOTE: our current implementatation doesn't allow for the compiler
760 * thread to be restarted after it exits here. We aren't freeing
761 * the JitTable or the ProfTable because threads which still may be
762 * running or in the process of shutting down may hold references to
763 * them.
764 */
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800765}
Bill Buzbee06bb8392010-01-31 18:53:15 -0800766
767void dvmCompilerStateRefresh()
768{
769 bool jitActive;
770 bool jitActivate;
Bill Buzbee3e392682010-02-03 18:13:57 -0800771 bool needUnchain = false;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800772
Ben Chenga4973592010-03-31 11:59:18 -0700773 /*
774 * The tableLock might not be initialized yet by the compiler thread if
775 * debugger is attached from the very beginning of the VM launch. If
776 * pProfTableCopy is NULL, the lock is not initialized yet and we don't
777 * need to refresh anything either.
778 */
779 if (gDvmJit.pProfTableCopy == NULL) {
780 return;
781 }
782
buzbee18fba342011-01-19 15:31:15 -0800783 /*
784 * On the first enabling of method tracing, switch the compiler
785 * into a mode that includes trace support for invokes and returns.
786 * If there are any existing translations, flush them. NOTE: we
787 * can't blindly flush the translation cache because this code
788 * may be executed before the compiler thread has finished
789 * initialization.
790 */
791 if ((gDvm.interpBreak & kSubModeMethodTrace) &&
792 !gDvmJit.methodTraceSupport) {
793 bool resetRequired;
794 /*
795 * compilerLock will prevent new compilations from being
796 * installed while we are working.
797 */
798 dvmLockMutex(&gDvmJit.compilerLock);
799 gDvmJit.cacheVersion++; // invalidate compilations in flight
800 gDvmJit.methodTraceSupport = true;
801 resetRequired = (gDvmJit.numCompilations != 0);
802 dvmUnlockMutex(&gDvmJit.compilerLock);
803 if (resetRequired) {
804 dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
805 resetCodeCache();
806 dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
807 }
808 }
809
Bill Buzbee06bb8392010-01-31 18:53:15 -0800810 dvmLockMutex(&gDvmJit.tableLock);
811 jitActive = gDvmJit.pProfTable != NULL;
buzbeecb3081f2011-01-14 13:37:31 -0800812 jitActivate = !dvmDebuggerOrProfilerActive();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800813
814 if (jitActivate && !jitActive) {
815 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800816 } else if (!jitActivate && jitActive) {
817 gDvmJit.pProfTable = NULL;
Bill Buzbee3e392682010-02-03 18:13:57 -0800818 needUnchain = true;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800819 }
Bill Buzbee3e392682010-02-03 18:13:57 -0800820 dvmUnlockMutex(&gDvmJit.tableLock);
821 if (needUnchain)
822 dvmJitUnchainAll();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800823}