blob: 817b7e6491cee21375c26ec1d20124e82e91f919 [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
Ben Cheng7c4afdb2010-02-11 15:03:00 -080019#include <cutils/ashmem.h>
Ben Chengba4fc8b2009-06-01 13:00:29 -070020
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24
Ben Chengba4fc8b2009-06-01 13:00:29 -070025static inline bool workQueueLength(void)
26{
27 return gDvmJit.compilerQueueLength;
28}
29
30static CompilerWorkOrder workDequeue(void)
31{
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
Bill Buzbeef9f33282009-11-22 12:45:30 -080042 if (gDvmJit.compilerQueueLength == 0) {
Carl Shapirob31b3012010-05-25 18:35:37 -070043 dvmSignalCond(&gDvmJit.compilerQueueEmpty);
Bill Buzbeef9f33282009-11-22 12:45:30 -080044 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070045
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51}
52
Bill Buzbee1b3da592011-02-03 07:38:22 -080053/*
54 * Enqueue a work order - retrying until successful. If attempt to enqueue
55 * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
56 * code cache reset.
57 */
58#define ENQUEUE_MAX_RETRIES 20
59void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
60{
61 bool success;
62 int retries = 0;
63 do {
64 success = dvmCompilerWorkEnqueue(pc, kind, info);
65 if (!success) {
66 retries++;
67 if (retries > ENQUEUE_MAX_RETRIES) {
68 LOGE("JIT: compiler queue wedged - forcing reset");
69 gDvmJit.codeCacheFull = true; // Force reset
70 success = true; // Because we'll drop the order now anyway
71 } else {
72 dvmLockMutex(&gDvmJit.compilerLock);
73 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
74 &gDvmJit.compilerLock);
75 dvmUnlockMutex(&gDvmJit.compilerLock);
76
77 }
78 }
79 } while (!success);
80}
81
Bill Buzbee964a7b02010-01-28 12:54:19 -080082/*
83 * Attempt to enqueue a work order, returning true if successful.
Ben Cheng1357e942010-02-10 17:21:39 -080084 *
85 * NOTE: Make sure that the caller frees the info pointer if the return value
86 * is false.
Bill Buzbee964a7b02010-01-28 12:54:19 -080087 */
Ben Chengba4fc8b2009-06-01 13:00:29 -070088bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
89{
90 int cc;
91 int i;
92 int numWork;
Ben Cheng60c24f42010-01-04 12:29:56 -080093 bool result = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -070094
Bill Buzbee1b3da592011-02-03 07:38:22 -080095 dvmLockMutex(&gDvmJit.compilerLock);
Ben Chengba4fc8b2009-06-01 13:00:29 -070096
Ben Cheng7a0bcd02010-01-22 16:45:45 -080097 /*
Ben Cheng6999d842010-01-26 16:46:15 -080098 * Return if queue or code cache is full.
Ben Cheng7a0bcd02010-01-22 16:45:45 -080099 */
Ben Cheng6999d842010-01-26 16:46:15 -0800100 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
101 gDvmJit.codeCacheFull == true) {
Ben Cheng60c24f42010-01-04 12:29:56 -0800102 result = false;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800103 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700104 }
105
106 for (numWork = gDvmJit.compilerQueueLength,
107 i = gDvmJit.compilerWorkDequeueIndex;
108 numWork > 0;
109 numWork--) {
110 /* Already enqueued */
111 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800112 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700113 /* Wrap around */
114 if (i == COMPILER_WORK_QUEUE_SIZE)
115 i = 0;
116 }
117
Ben Chengccd6c012009-10-15 14:52:45 -0700118 CompilerWorkOrder *newOrder =
119 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
120 newOrder->pc = pc;
121 newOrder->kind = kind;
122 newOrder->info = info;
Ben Cheng7a2697d2010-06-07 13:44:23 -0700123 newOrder->result.methodCompilationAborted = NULL;
Ben Chengccd6c012009-10-15 14:52:45 -0700124 newOrder->result.codeAddress = NULL;
125 newOrder->result.discardResult =
Bill Buzbee1f748632010-03-02 16:14:41 -0800126 (kind == kWorkOrderTraceDebug) ? true : false;
buzbee18fba342011-01-19 15:31:15 -0800127 newOrder->result.cacheVersion = gDvmJit.cacheVersion;
Ben Cheng33672452010-01-12 14:59:30 -0800128 newOrder->result.requestingThread = dvmThreadSelf();
129
Ben Chengba4fc8b2009-06-01 13:00:29 -0700130 gDvmJit.compilerWorkEnqueueIndex++;
131 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
132 gDvmJit.compilerWorkEnqueueIndex = 0;
133 gDvmJit.compilerQueueLength++;
134 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
135 assert(cc == 0);
136
Bill Buzbee964a7b02010-01-28 12:54:19 -0800137unlockAndExit:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700138 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng60c24f42010-01-04 12:29:56 -0800139 return result;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700140}
141
Ben Cheng11d8f142010-03-24 15:24:19 -0700142/* Block until the queue length is 0, or there is a pending suspend request */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700143void dvmCompilerDrainQueue(void)
144{
Ben Cheng11d8f142010-03-24 15:24:19 -0700145 Thread *self = dvmThreadSelf();
146
Ben Chengba4fc8b2009-06-01 13:00:29 -0700147 dvmLockMutex(&gDvmJit.compilerLock);
Ben Cheng11d8f142010-03-24 15:24:19 -0700148 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
149 self->suspendCount == 0) {
Ben Cheng812e6b12010-03-15 15:19:06 -0700150 /*
151 * Use timed wait here - more than one mutator threads may be blocked
152 * but the compiler thread will only signal once when the queue is
153 * emptied. Furthermore, the compiler thread may have been shutdown
154 * so the blocked thread may never get the wakeup signal.
155 */
156 dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700157 }
158 dvmUnlockMutex(&gDvmJit.compilerLock);
159}
160
Ben Cheng60c24f42010-01-04 12:29:56 -0800161bool dvmCompilerSetupCodeCache(void)
162{
163 extern void dvmCompilerTemplateStart(void);
164 extern void dmvCompilerTemplateEnd(void);
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800165 int fd;
Ben Cheng60c24f42010-01-04 12:29:56 -0800166
167 /* Allocate the code cache */
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800168 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
169 if (fd < 0) {
170 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
171 gDvmJit.codeCacheSize);
172 return false;
173 }
174 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
175 PROT_READ | PROT_WRITE | PROT_EXEC,
176 MAP_PRIVATE , fd, 0);
177 close(fd);
Ben Cheng60c24f42010-01-04 12:29:56 -0800178 if (gDvmJit.codeCache == MAP_FAILED) {
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800179 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
Ben Cheng60c24f42010-01-04 12:29:56 -0800180 return false;
181 }
182
Ben Chengb88ec3c2010-05-17 12:50:33 -0700183 gDvmJit.pageSizeMask = getpagesize() - 1;
184
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800185 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
186 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800187
Ben Cheng60c24f42010-01-04 12:29:56 -0800188 /* Copy the template code into the beginning of the code cache */
189 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
190 (intptr_t) dvmCompilerTemplateStart;
191 memcpy((void *) gDvmJit.codeCache,
192 (void *) dvmCompilerTemplateStart,
193 templateSize);
194
Ben Cheng72621c92010-03-10 13:12:55 -0800195 /*
196 * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
197 * page.
198 */
199 if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
200 templateSize = (templateSize + 4095) & ~4095;
201 }
202
Ben Cheng60c24f42010-01-04 12:29:56 -0800203 gDvmJit.templateSize = templateSize;
204 gDvmJit.codeCacheByteUsed = templateSize;
205
206 /* Only flush the part in the code cache that is being used now */
buzbee13fbc2e2010-12-14 11:06:25 -0800207 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
208 (intptr_t) gDvmJit.codeCache + templateSize, 0);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700209
Ben Cheng1f3da0b2010-06-03 13:52:42 -0700210 int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
211 PROTECT_CODE_CACHE_ATTRS);
212
213 if (result == -1) {
214 LOGE("Failed to remove the write permission for the code cache");
215 dvmAbort();
216 }
Ben Chengb88ec3c2010-05-17 12:50:33 -0700217
Ben Cheng60c24f42010-01-04 12:29:56 -0800218 return true;
219}
220
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800221static void crawlDalvikStack(Thread *thread, bool print)
222{
223 void *fp = thread->curFrame;
224 StackSaveArea* saveArea = NULL;
225 int stackLevel = 0;
226
227 if (print) {
228 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
229 dvmGetThreadStatusStr(thread->status),
230 thread->inJitCodeCache,
231 thread->inJitCodeCache ? "jit" : "interp");
232 }
233 /* Crawl the Dalvik stack frames to clear the returnAddr field */
234 while (fp != NULL) {
235 saveArea = SAVEAREA_FROM_FP(fp);
236
237 if (print) {
Carl Shapirofc75f3e2010-12-07 11:43:38 -0800238 if (dvmIsBreakFrame((u4*)fp)) {
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800239 LOGD(" #%d: break frame (%p)",
240 stackLevel, saveArea->returnAddr);
241 }
242 else {
243 LOGD(" #%d: %s.%s%s (%p)",
244 stackLevel,
245 saveArea->method->clazz->descriptor,
246 saveArea->method->name,
247 dvmIsNativeMethod(saveArea->method) ?
248 " (native)" : "",
249 saveArea->returnAddr);
250 }
251 }
252 stackLevel++;
253 saveArea->returnAddr = NULL;
254 assert(fp != saveArea->prevFrame);
255 fp = saveArea->prevFrame;
256 }
257 /* Make sure the stack is fully unwound to the bottom */
258 assert(saveArea == NULL ||
259 (u1 *) (saveArea+1) == thread->interpStackStart);
260}
261
Ben Cheng60c24f42010-01-04 12:29:56 -0800262static void resetCodeCache(void)
263{
Ben Cheng60c24f42010-01-04 12:29:56 -0800264 Thread* thread;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800265 u8 startTime = dvmGetRelativeTimeUsec();
266 int inJit = 0;
Ben Cheng6999d842010-01-26 16:46:15 -0800267 int byteUsed = gDvmJit.codeCacheByteUsed;
Ben Cheng60c24f42010-01-04 12:29:56 -0800268
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800269 /* If any thread is found stuck in the JIT state, don't reset the cache */
270 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
Ben Cheng6999d842010-01-26 16:46:15 -0800271 /*
272 * Crawl the stack to wipe out the returnAddr field so that
273 * 1) the soon-to-be-deleted code in the JIT cache won't be used
274 * 2) or the thread stuck in the JIT land will soon return
275 * to the interpreter land
276 */
277 crawlDalvikStack(thread, false);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800278 if (thread->inJitCodeCache) {
279 inJit++;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800280 }
281 }
282
283 if (inJit) {
Ben Cheng6999d842010-01-26 16:46:15 -0800284 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
285 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
286 ++gDvmJit.numCodeCacheResetDelayed);
287 return;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800288 }
289
Ben Cheng6999d842010-01-26 16:46:15 -0800290 /* Lock the mutex to clean up the work queue */
291 dvmLockMutex(&gDvmJit.compilerLock);
292
buzbee18fba342011-01-19 15:31:15 -0800293 /* Update the translation cache version */
294 gDvmJit.cacheVersion++;
295
Ben Cheng6999d842010-01-26 16:46:15 -0800296 /* Drain the work queue to free the work orders */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800297 while (workQueueLength()) {
298 CompilerWorkOrder work = workDequeue();
299 free(work.info);
300 }
301
Ben Cheng60c24f42010-01-04 12:29:56 -0800302 /* Reset the JitEntry table contents to the initial unpopulated state */
303 dvmJitResetTable();
304
Ben Chengb88ec3c2010-05-17 12:50:33 -0700305 UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
Ben Cheng60c24f42010-01-04 12:29:56 -0800306 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800307 * Wipe out the code cache content to force immediate crashes if
308 * stale JIT'ed code is invoked.
309 */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800310 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
311 0,
312 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
buzbee13fbc2e2010-12-14 11:06:25 -0800313 dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
314 (intptr_t) gDvmJit.codeCache +
315 gDvmJit.codeCacheByteUsed, 0);
Ben Cheng60c24f42010-01-04 12:29:56 -0800316
Ben Chengb88ec3c2010-05-17 12:50:33 -0700317 PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
318
Ben Cheng60c24f42010-01-04 12:29:56 -0800319 /* Reset the current mark of used bytes to the end of template code */
320 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
321 gDvmJit.numCompilations = 0;
322
323 /* Reset the work queue */
324 memset(gDvmJit.compilerWorkQueue, 0,
325 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
326 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
327 gDvmJit.compilerQueueLength = 0;
328
Ben Cheng6999d842010-01-26 16:46:15 -0800329 /* Reset the IC patch work queue */
330 dvmLockMutex(&gDvmJit.compilerICPatchLock);
331 gDvmJit.compilerICPatchIndex = 0;
332 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
333
Ben Cheng385828e2011-03-04 16:48:33 -0800334 /*
335 * Reset the inflight compilation address (can only be done in safe points
336 * or by the compiler thread when its thread state is RUNNING).
337 */
338 gDvmJit.inflightBaseAddr = NULL;
339
Ben Cheng60c24f42010-01-04 12:29:56 -0800340 /* All clear now */
341 gDvmJit.codeCacheFull = false;
342
Ben Cheng6999d842010-01-26 16:46:15 -0800343 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800344
Ben Cheng6999d842010-01-26 16:46:15 -0800345 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
346 (dvmGetRelativeTimeUsec() - startTime) / 1000,
347 byteUsed, ++gDvmJit.numCodeCacheReset,
348 gDvmJit.numCodeCacheResetDelayed);
349}
350
351/*
352 * Perform actions that are only safe when all threads are suspended. Currently
353 * we do:
354 * 1) Check if the code cache is full. If so reset it and restart populating it
355 * from scratch.
356 * 2) Patch predicted chaining cells by consuming recorded work orders.
357 */
358void dvmCompilerPerformSafePointChecks(void)
359{
360 if (gDvmJit.codeCacheFull) {
361 resetCodeCache();
362 }
363 dvmCompilerPatchInlineCache();
Ben Cheng60c24f42010-01-04 12:29:56 -0800364}
365
Andy McFadden953a0ed2010-09-17 15:48:38 -0700366static bool compilerThreadStartup(void)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800367{
368 JitEntry *pJitTable = NULL;
369 unsigned char *pJitProfTable = NULL;
buzbee2e152ba2010-12-15 16:32:35 -0800370 JitTraceProfCounters *pJitTraceProfCounters = NULL;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800371 unsigned int i;
372
373 if (!dvmCompilerArchInit())
374 goto fail;
375
376 /*
377 * Setup the code cache if we have not inherited a valid code cache
378 * from the zygote.
379 */
380 if (gDvmJit.codeCache == NULL) {
381 if (!dvmCompilerSetupCodeCache())
382 goto fail;
383 }
384
385 /* Allocate the initial arena block */
386 if (dvmCompilerHeapInit() == false) {
387 goto fail;
388 }
389
Ben Cheng385828e2011-03-04 16:48:33 -0800390 /* Cache the thread pointer */
391 gDvmJit.compilerThread = dvmThreadSelf();
392
Bill Buzbee964a7b02010-01-28 12:54:19 -0800393 dvmLockMutex(&gDvmJit.compilerLock);
394
Bill Buzbee964a7b02010-01-28 12:54:19 -0800395 /* Track method-level compilation statistics */
396 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
Ben Cheng7a2697d2010-06-07 13:44:23 -0700397
398#if defined(WITH_JIT_TUNING)
Ben Cheng452efba2010-04-30 15:14:00 -0700399 gDvm.verboseShutdown = true;
Ben Cheng1357e942010-02-10 17:21:39 -0800400#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800401
402 dvmUnlockMutex(&gDvmJit.compilerLock);
403
404 /* Set up the JitTable */
405
406 /* Power of 2? */
407 assert(gDvmJit.jitTableSize &&
408 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
409
410 dvmInitMutex(&gDvmJit.tableLock);
411 dvmLockMutex(&gDvmJit.tableLock);
412 pJitTable = (JitEntry*)
413 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
414 if (!pJitTable) {
415 LOGE("jit table allocation failed\n");
416 dvmUnlockMutex(&gDvmJit.tableLock);
417 goto fail;
418 }
419 /*
420 * NOTE: the profile table must only be allocated once, globally.
421 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
422 * and then restoring its original value. However, this action
423 * is not syncronized for speed so threads may continue to hold
424 * and update the profile table after profiling has been turned
425 * off by null'ng the global pointer. Be aware.
426 */
427 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
428 if (!pJitProfTable) {
429 LOGE("jit prof table allocation failed\n");
430 dvmUnlockMutex(&gDvmJit.tableLock);
431 goto fail;
432 }
433 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
434 for (i=0; i < gDvmJit.jitTableSize; i++) {
435 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
436 }
437 /* Is chain field wide enough for termination pattern? */
438 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
439
buzbee2e152ba2010-12-15 16:32:35 -0800440 /* Allocate the trace profiling structure */
441 pJitTraceProfCounters = (JitTraceProfCounters*)
442 calloc(1, sizeof(*pJitTraceProfCounters));
443 if (!pJitTraceProfCounters) {
444 LOGE("jit trace prof counters allocation failed\n");
445 dvmUnlockMutex(&gDvmJit.tableLock);
446 goto fail;
447 }
448
Bill Buzbee964a7b02010-01-28 12:54:19 -0800449 gDvmJit.pJitEntryTable = pJitTable;
450 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
451 gDvmJit.jitTableEntriesUsed = 0;
452 gDvmJit.compilerHighWater =
453 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
Ben Chenga4973592010-03-31 11:59:18 -0700454 /*
455 * If the VM is launched with wait-on-the-debugger, we will need to hide
456 * the profile table here
457 */
458 gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800459 gDvmJit.pProfTableCopy = pJitProfTable;
buzbee2e152ba2010-12-15 16:32:35 -0800460 gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800461 dvmUnlockMutex(&gDvmJit.tableLock);
462
463 /* Signal running threads to refresh their cached pJitTable pointers */
464 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
465 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
Ben Chengdca71432010-03-16 16:04:11 -0700466
467 /* Enable signature breakpoints by customizing the following code */
468#if defined(SIGNATURE_BREAKPOINT)
469 /*
470 * Suppose one sees the following native crash in the bugreport:
471 * I/DEBUG ( 1638): Build fingerprint: 'unknown'
472 * I/DEBUG ( 1638): pid: 2468, tid: 2507 >>> com.google.android.gallery3d
473 * I/DEBUG ( 1638): signal 11 (SIGSEGV), fault addr 00001400
474 * I/DEBUG ( 1638): r0 44ea7190 r1 44e4f7b8 r2 44ebc710 r3 00000000
475 * I/DEBUG ( 1638): r4 00000a00 r5 41862dec r6 4710dc10 r7 00000280
476 * I/DEBUG ( 1638): r8 ad010f40 r9 46a37a12 10 001116b0 fp 42a78208
477 * I/DEBUG ( 1638): ip 00000090 sp 4710dbc8 lr ad060e67 pc 46b90682
478 * cpsr 00000030
479 * I/DEBUG ( 1638): #00 pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
480 * I/DEBUG ( 1638): #01 pc 00060e62 /system/lib/libdvm.so
481 *
482 * I/DEBUG ( 1638): code around pc:
483 * I/DEBUG ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
484 * I/DEBUG ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
485 * I/DEBUG ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
486 * I/DEBUG ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
487 * I/DEBUG ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
488 *
489 * Clearly it is a JIT bug. To find out which translation contains the
490 * offending code, the content of the memory dump around the faulting PC
491 * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
492 * when a similar compilation is being created, the JIT compiler replay the
493 * trace in the verbose mode and one can investigate the instruction
494 * sequence in details.
495 *
496 * The length of the signature may need additional experiments to determine.
497 * The rule of thumb is don't include PC-relative instructions in the
498 * signature since it may be affected by the alignment of the compiled code.
499 * However, a signature that's too short might increase the chance of false
500 * positive matches. Using gdbjithelper to disassembly the memory content
501 * first might be a good companion approach.
502 *
503 * For example, if the next 4 words starting from 46b90680 is pasted into
504 * the data structure:
505 */
506
507 gDvmJit.signatureBreakpointSize = 4;
508 gDvmJit.signatureBreakpoint =
509 malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
510 gDvmJit.signatureBreakpoint[0] = 0x512000bc;
511 gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
512 gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
513 gDvmJit.signatureBreakpoint[3] = 0x6f696028;
514
515 /*
516 * The following log will be printed when a match is found in subsequent
517 * testings:
518 *
519 * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
520 * D/dalvikvm( 2468): --------
521 * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
522 * offset 0x1f7
523 * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
524 * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
525 * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
526 * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
527 * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
528 * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
529 * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
530 * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
531 * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
532 * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
533 * blocks
534 * :
535 * :
536 * D/dalvikvm( 2468): 0x20 (0020): ldr r0, [r5, #52]
537 * D/dalvikvm( 2468): 0x22 (0022): ldr r2, [pc, #96]
538 * D/dalvikvm( 2468): 0x24 (0024): cmp r0, #0
539 * D/dalvikvm( 2468): 0x26 (0026): beq 0x00000034
540 * D/dalvikvm( 2468): 0x28 (0028): ldr r1, [r1, #0]
541 * D/dalvikvm( 2468): 0x2a (002a): ldr r0, [r0, #0]
542 * D/dalvikvm( 2468): 0x2c (002c): blx r2
543 * D/dalvikvm( 2468): 0x2e (002e): cmp r0, #0
544 * D/dalvikvm( 2468): 0x30 (0030): beq 0x00000050
545 * D/dalvikvm( 2468): 0x32 (0032): ldr r0, [r5, #52]
546 * D/dalvikvm( 2468): 0x34 (0034): lsls r4, r7, #2
547 * D/dalvikvm( 2468): 0x36 (0036): str r0, [r4, r4]
548 * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
549 * D/dalvikvm( 2468): L0x0195:
550 * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
551 * v26, (#1)
552 * D/dalvikvm( 2468): 0x38 (0038): ldr r7, [r5, #104]
553 * D/dalvikvm( 2468): 0x3a (003a): adds r7, r7, #1
554 * D/dalvikvm( 2468): 0x3c (003c): str r7, [r5, #104]
555 * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
556 * D/dalvikvm( 2468): L0x0165:
557 * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
558 * (#0)
559 * D/dalvikvm( 2468): 0x3e (003e): ldr r0, [r5, #104]
560 * D/dalvikvm( 2468): 0x40 (0040): str r0, [r5, #0]
561 *
562 * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
563 */
564#endif
565
Bill Buzbee964a7b02010-01-28 12:54:19 -0800566 return true;
567
568fail:
569 return false;
570
571}
572
Ben Chengba4fc8b2009-06-01 13:00:29 -0700573static void *compilerThreadStart(void *arg)
574{
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700575 dvmChangeStatus(NULL, THREAD_VMWAIT);
576
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800577 /*
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800578 * If we're not running stand-alone, wait a little before
579 * recieving translation requests on the assumption that process start
580 * up code isn't worth compiling. We'll resume when the framework
581 * signals us that the first screen draw has happened, or the timer
582 * below expires (to catch daemons).
Ben Chengf30acbb2010-02-14 16:17:36 -0800583 *
584 * There is a theoretical race between the callback to
585 * VMRuntime.startJitCompiation and when the compiler thread reaches this
586 * point. In case the callback happens earlier, in order not to permanently
587 * hold the system_server (which is not using the timed wait) in
588 * interpreter-only mode we bypass the delay here.
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800589 */
Ben Chengf30acbb2010-02-14 16:17:36 -0800590 if (gDvmJit.runningInAndroidFramework &&
591 !gDvmJit.alreadyEnabledViaFramework) {
592 /*
593 * If the current VM instance is the system server (detected by having
594 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
595 * conditional variable to determine whether to start the JIT or not.
596 * If the system server detects that the whole system is booted in
597 * safe mode, the conditional variable will never be signaled and the
598 * system server will remain in the interpreter-only mode. All
599 * subsequent apps will be started with the --enable-safemode flag
600 * explicitly appended.
601 */
602 if (gDvm.systemServerPid == 0) {
603 dvmLockMutex(&gDvmJit.compilerLock);
604 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
605 &gDvmJit.compilerLock);
606 dvmUnlockMutex(&gDvmJit.compilerLock);
607 LOGD("JIT started for system_server");
608 } else {
609 dvmLockMutex(&gDvmJit.compilerLock);
610 /*
611 * TUNING: experiment with the delay & perhaps make it
612 * target-specific
613 */
614 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
615 &gDvmJit.compilerLock, 3000, 0);
616 dvmUnlockMutex(&gDvmJit.compilerLock);
617 }
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800618 if (gDvmJit.haltCompilerThread) {
619 return NULL;
620 }
Bill Buzbee94d89f82010-01-29 13:44:19 -0800621 }
622
Bill Buzbee964a7b02010-01-28 12:54:19 -0800623 compilerThreadStartup();
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800624
Ben Chengba4fc8b2009-06-01 13:00:29 -0700625 dvmLockMutex(&gDvmJit.compilerLock);
626 /*
627 * Since the compiler thread will not touch any objects on the heap once
628 * being created, we just fake its state as VMWAIT so that it can be a
629 * bit late when there is suspend request pending.
630 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700631 while (!gDvmJit.haltCompilerThread) {
632 if (workQueueLength() == 0) {
633 int cc;
634 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
635 assert(cc == 0);
636 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
637 &gDvmJit.compilerLock);
638 continue;
639 } else {
640 do {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700641 CompilerWorkOrder work = workDequeue();
642 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng978738d2010-05-13 13:45:57 -0700643#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800644 u8 startTime = dvmGetRelativeTimeUsec();
645#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800646 /*
647 * Check whether there is a suspend request on me. This
648 * is necessary to allow a clean shutdown.
Ben Cheng11d8f142010-03-24 15:24:19 -0700649 *
650 * However, in the blocking stress testing mode, let the
651 * compiler thread continue doing compilations to unblock
652 * other requesting threads. This may occasionally cause
653 * shutdown from proceeding cleanly in the standalone invocation
654 * of the vm but this should be acceptable.
Bill Buzbee964a7b02010-01-28 12:54:19 -0800655 */
Ben Cheng11d8f142010-03-24 15:24:19 -0700656 if (!gDvmJit.blockingMode)
Andy McFaddenab227f72010-04-06 12:37:48 -0700657 dvmCheckSuspendPending(dvmThreadSelf());
Bill Buzbee27176222009-06-09 09:20:16 -0700658 /* Is JitTable filling up? */
659 if (gDvmJit.jitTableEntriesUsed >
660 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
Ben Cheng6999d842010-01-26 16:46:15 -0800661 bool resizeFail =
662 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
663 /*
664 * If the jit table is full, consider it's time to reset
665 * the code cache too.
666 */
667 gDvmJit.codeCacheFull |= resizeFail;
Bill Buzbee27176222009-06-09 09:20:16 -0700668 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700669 if (gDvmJit.haltCompilerThread) {
670 LOGD("Compiler shutdown in progress - discarding request");
Ben Cheng6999d842010-01-26 16:46:15 -0800671 } else if (!gDvmJit.codeCacheFull) {
Bill Buzbeefc519dc2010-03-06 23:30:57 -0800672 jmp_buf jmpBuf;
673 work.bailPtr = &jmpBuf;
674 bool aborted = setjmp(jmpBuf);
675 if (!aborted) {
buzbee2e152ba2010-12-15 16:32:35 -0800676 bool codeCompiled = dvmCompilerDoWork(&work);
677 if (codeCompiled && !work.result.discardResult &&
678 work.result.codeAddress) {
679 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
680 work.result.instructionSet,
Ben Chengcfdeca32011-01-14 11:36:46 -0800681 false, /* not method entry */
buzbee2e152ba2010-12-15 16:32:35 -0800682 work.result.profileCodeSize);
683 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700684 }
buzbee2e152ba2010-12-15 16:32:35 -0800685 dvmCompilerArenaReset();
Ben Chengba4fc8b2009-06-01 13:00:29 -0700686 }
687 free(work.info);
Ben Cheng978738d2010-05-13 13:45:57 -0700688#if defined(WITH_JIT_TUNING)
Ben Cheng86717f72010-03-05 15:27:21 -0800689 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
690#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700691 dvmLockMutex(&gDvmJit.compilerLock);
692 } while (workQueueLength() != 0);
693 }
694 }
695 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
696 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengef00a852009-06-22 22:53:35 -0700697
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700698 /*
699 * As part of detaching the thread we need to call into Java code to update
700 * the ThreadGroup, and we should not be in VMWAIT state while executing
701 * interpreted code.
702 */
703 dvmChangeStatus(NULL, THREAD_RUNNING);
704
Andy McFadden43eb5012010-02-01 16:56:53 -0800705 if (gDvm.verboseShutdown)
706 LOGD("Compiler thread shutting down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700707 return NULL;
708}
709
Ben Chengba4fc8b2009-06-01 13:00:29 -0700710bool dvmCompilerStartup(void)
711{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800712
713 dvmInitMutex(&gDvmJit.compilerLock);
Ben Cheng6999d842010-01-26 16:46:15 -0800714 dvmInitMutex(&gDvmJit.compilerICPatchLock);
Ben Chengb88ec3c2010-05-17 12:50:33 -0700715 dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
Bill Buzbee94d89f82010-01-29 13:44:19 -0800716 dvmLockMutex(&gDvmJit.compilerLock);
717 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
718 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
719
720 /* Reset the work queue */
721 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
722 gDvmJit.compilerQueueLength = 0;
723 dvmUnlockMutex(&gDvmJit.compilerLock);
724
Ben Chengba4fc8b2009-06-01 13:00:29 -0700725 /*
Bill Buzbee94d89f82010-01-29 13:44:19 -0800726 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
Bill Buzbee964a7b02010-01-28 12:54:19 -0800727 * the compiler thread, which will do the real initialization if and
728 * when it is signalled to do so.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700729 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800730 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
731 compilerThreadStart, NULL);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700732}
733
734void dvmCompilerShutdown(void)
735{
736 void *threadReturn;
737
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800738 /* Disable new translation requests */
739 gDvmJit.pProfTable = NULL;
740 gDvmJit.pProfTableCopy = NULL;
741
buzbee2e152ba2010-12-15 16:32:35 -0800742 if (gDvm.verboseShutdown ||
743 gDvmJit.profileMode == kTraceProfilingContinuous) {
Ben Cheng88a0f972010-02-24 15:00:40 -0800744 dvmCompilerDumpStats();
745 while (gDvmJit.compilerQueueLength)
746 sleep(5);
747 }
748
Ben Chengba4fc8b2009-06-01 13:00:29 -0700749 if (gDvmJit.compilerHandle) {
750
751 gDvmJit.haltCompilerThread = true;
752
753 dvmLockMutex(&gDvmJit.compilerLock);
754 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
755 dvmUnlockMutex(&gDvmJit.compilerLock);
756
Ben Chengef00a852009-06-22 22:53:35 -0700757 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
758 LOGW("Compiler thread join failed\n");
Andy McFadden43eb5012010-02-01 16:56:53 -0800759 else if (gDvm.verboseShutdown)
Ben Chengef00a852009-06-22 22:53:35 -0700760 LOGD("Compiler thread has shut down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700761 }
Bill Buzbee06bb8392010-01-31 18:53:15 -0800762
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800763 /* Break loops within the translation cache */
764 dvmJitUnchainAll();
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800765
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800766 /*
767 * NOTE: our current implementatation doesn't allow for the compiler
768 * thread to be restarted after it exits here. We aren't freeing
769 * the JitTable or the ProfTable because threads which still may be
770 * running or in the process of shutting down may hold references to
771 * them.
772 */
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800773}
Bill Buzbee06bb8392010-01-31 18:53:15 -0800774
775void dvmCompilerStateRefresh()
776{
777 bool jitActive;
778 bool jitActivate;
Bill Buzbee3e392682010-02-03 18:13:57 -0800779 bool needUnchain = false;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800780
Ben Chenga4973592010-03-31 11:59:18 -0700781 /*
782 * The tableLock might not be initialized yet by the compiler thread if
783 * debugger is attached from the very beginning of the VM launch. If
784 * pProfTableCopy is NULL, the lock is not initialized yet and we don't
785 * need to refresh anything either.
786 */
787 if (gDvmJit.pProfTableCopy == NULL) {
788 return;
789 }
790
buzbee18fba342011-01-19 15:31:15 -0800791 /*
792 * On the first enabling of method tracing, switch the compiler
793 * into a mode that includes trace support for invokes and returns.
794 * If there are any existing translations, flush them. NOTE: we
795 * can't blindly flush the translation cache because this code
796 * may be executed before the compiler thread has finished
797 * initialization.
798 */
799 if ((gDvm.interpBreak & kSubModeMethodTrace) &&
800 !gDvmJit.methodTraceSupport) {
801 bool resetRequired;
802 /*
803 * compilerLock will prevent new compilations from being
804 * installed while we are working.
805 */
806 dvmLockMutex(&gDvmJit.compilerLock);
807 gDvmJit.cacheVersion++; // invalidate compilations in flight
808 gDvmJit.methodTraceSupport = true;
809 resetRequired = (gDvmJit.numCompilations != 0);
810 dvmUnlockMutex(&gDvmJit.compilerLock);
811 if (resetRequired) {
812 dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
813 resetCodeCache();
814 dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
815 }
816 }
817
Bill Buzbee06bb8392010-01-31 18:53:15 -0800818 dvmLockMutex(&gDvmJit.tableLock);
819 jitActive = gDvmJit.pProfTable != NULL;
buzbeecb3081f2011-01-14 13:37:31 -0800820 jitActivate = !dvmDebuggerOrProfilerActive();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800821
822 if (jitActivate && !jitActive) {
823 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800824 } else if (!jitActivate && jitActive) {
825 gDvmJit.pProfTable = NULL;
Bill Buzbee3e392682010-02-03 18:13:57 -0800826 needUnchain = true;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800827 }
Bill Buzbee3e392682010-02-03 18:13:57 -0800828 dvmUnlockMutex(&gDvmJit.tableLock);
829 if (needUnchain)
830 dvmJitUnchainAll();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800831}