blob: bd12e5689c3f191ed64ce3eb8b094898a51161ea [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
Ben Cheng7c4afdb2010-02-11 15:03:00 -080019#include <cutils/ashmem.h>
Ben Chengba4fc8b2009-06-01 13:00:29 -070020
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24
Ben Chengba4fc8b2009-06-01 13:00:29 -070025static inline bool workQueueLength(void)
26{
27 return gDvmJit.compilerQueueLength;
28}
29
30static CompilerWorkOrder workDequeue(void)
31{
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
Bill Buzbeef9f33282009-11-22 12:45:30 -080042 if (gDvmJit.compilerQueueLength == 0) {
43 int cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
44 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070045
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51}
52
Bill Buzbee964a7b02010-01-28 12:54:19 -080053/*
54 * Attempt to enqueue a work order, returning true if successful.
55 * This routine will not block, but simply return if it couldn't
56 * aquire the lock or if the queue is full.
Ben Cheng1357e942010-02-10 17:21:39 -080057 *
58 * NOTE: Make sure that the caller frees the info pointer if the return value
59 * is false.
Bill Buzbee964a7b02010-01-28 12:54:19 -080060 */
Ben Chengba4fc8b2009-06-01 13:00:29 -070061bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
62{
63 int cc;
64 int i;
65 int numWork;
Ben Cheng60c24f42010-01-04 12:29:56 -080066 bool result = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -070067
Bill Buzbee964a7b02010-01-28 12:54:19 -080068 if (dvmTryLockMutex(&gDvmJit.compilerLock)) {
Ben Cheng3e5cd172010-02-08 20:57:59 -080069 return false; // Couldn't acquire the lock
Bill Buzbee964a7b02010-01-28 12:54:19 -080070 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070071
Ben Cheng7a0bcd02010-01-22 16:45:45 -080072 /*
Ben Cheng6999d842010-01-26 16:46:15 -080073 * Return if queue or code cache is full.
Ben Cheng7a0bcd02010-01-22 16:45:45 -080074 */
Ben Cheng6999d842010-01-26 16:46:15 -080075 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
76 gDvmJit.codeCacheFull == true) {
Ben Cheng60c24f42010-01-04 12:29:56 -080077 result = false;
Bill Buzbee964a7b02010-01-28 12:54:19 -080078 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -070079 }
80
81 for (numWork = gDvmJit.compilerQueueLength,
82 i = gDvmJit.compilerWorkDequeueIndex;
83 numWork > 0;
84 numWork--) {
85 /* Already enqueued */
86 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
Bill Buzbee964a7b02010-01-28 12:54:19 -080087 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -070088 /* Wrap around */
89 if (i == COMPILER_WORK_QUEUE_SIZE)
90 i = 0;
91 }
92
Ben Chengccd6c012009-10-15 14:52:45 -070093 CompilerWorkOrder *newOrder =
94 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
95 newOrder->pc = pc;
96 newOrder->kind = kind;
97 newOrder->info = info;
98 newOrder->result.codeAddress = NULL;
99 newOrder->result.discardResult =
Bill Buzbee1f748632010-03-02 16:14:41 -0800100 (kind == kWorkOrderTraceDebug) ? true : false;
Ben Cheng33672452010-01-12 14:59:30 -0800101 newOrder->result.requestingThread = dvmThreadSelf();
102
Ben Chengba4fc8b2009-06-01 13:00:29 -0700103 gDvmJit.compilerWorkEnqueueIndex++;
104 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
105 gDvmJit.compilerWorkEnqueueIndex = 0;
106 gDvmJit.compilerQueueLength++;
107 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
108 assert(cc == 0);
109
Bill Buzbee964a7b02010-01-28 12:54:19 -0800110unlockAndExit:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700111 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng60c24f42010-01-04 12:29:56 -0800112 return result;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700113}
114
115/* Block until queue length is 0 */
116void dvmCompilerDrainQueue(void)
117{
Bill Buzbeed7269912009-11-10 14:31:32 -0800118 int oldStatus = dvmChangeStatus(NULL, THREAD_VMWAIT);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700119 dvmLockMutex(&gDvmJit.compilerLock);
120 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread) {
Ben Cheng812e6b12010-03-15 15:19:06 -0700121 /*
122 * Use timed wait here - more than one mutator threads may be blocked
123 * but the compiler thread will only signal once when the queue is
124 * emptied. Furthermore, the compiler thread may have been shutdown
125 * so the blocked thread may never get the wakeup signal.
126 */
127 dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700128 }
129 dvmUnlockMutex(&gDvmJit.compilerLock);
Bill Buzbeed7269912009-11-10 14:31:32 -0800130 dvmChangeStatus(NULL, oldStatus);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700131}
132
Ben Cheng60c24f42010-01-04 12:29:56 -0800133bool dvmCompilerSetupCodeCache(void)
134{
135 extern void dvmCompilerTemplateStart(void);
136 extern void dmvCompilerTemplateEnd(void);
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800137 int fd;
Ben Cheng60c24f42010-01-04 12:29:56 -0800138
139 /* Allocate the code cache */
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800140 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
141 if (fd < 0) {
142 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
143 gDvmJit.codeCacheSize);
144 return false;
145 }
146 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
147 PROT_READ | PROT_WRITE | PROT_EXEC,
148 MAP_PRIVATE , fd, 0);
149 close(fd);
Ben Cheng60c24f42010-01-04 12:29:56 -0800150 if (gDvmJit.codeCache == MAP_FAILED) {
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800151 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
Ben Cheng60c24f42010-01-04 12:29:56 -0800152 return false;
153 }
154
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800155 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
156 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800157
Ben Cheng60c24f42010-01-04 12:29:56 -0800158 /* Copy the template code into the beginning of the code cache */
159 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
160 (intptr_t) dvmCompilerTemplateStart;
161 memcpy((void *) gDvmJit.codeCache,
162 (void *) dvmCompilerTemplateStart,
163 templateSize);
164
Ben Cheng72621c92010-03-10 13:12:55 -0800165 /*
166 * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
167 * page.
168 */
169 if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
170 templateSize = (templateSize + 4095) & ~4095;
171 }
172
Ben Cheng60c24f42010-01-04 12:29:56 -0800173 gDvmJit.templateSize = templateSize;
174 gDvmJit.codeCacheByteUsed = templateSize;
175
176 /* Only flush the part in the code cache that is being used now */
177 cacheflush((intptr_t) gDvmJit.codeCache,
178 (intptr_t) gDvmJit.codeCache + templateSize, 0);
179 return true;
180}
181
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800182static void crawlDalvikStack(Thread *thread, bool print)
183{
184 void *fp = thread->curFrame;
185 StackSaveArea* saveArea = NULL;
186 int stackLevel = 0;
187
188 if (print) {
189 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
190 dvmGetThreadStatusStr(thread->status),
191 thread->inJitCodeCache,
192 thread->inJitCodeCache ? "jit" : "interp");
193 }
194 /* Crawl the Dalvik stack frames to clear the returnAddr field */
195 while (fp != NULL) {
196 saveArea = SAVEAREA_FROM_FP(fp);
197
198 if (print) {
199 if (dvmIsBreakFrame(fp)) {
200 LOGD(" #%d: break frame (%p)",
201 stackLevel, saveArea->returnAddr);
202 }
203 else {
204 LOGD(" #%d: %s.%s%s (%p)",
205 stackLevel,
206 saveArea->method->clazz->descriptor,
207 saveArea->method->name,
208 dvmIsNativeMethod(saveArea->method) ?
209 " (native)" : "",
210 saveArea->returnAddr);
211 }
212 }
213 stackLevel++;
214 saveArea->returnAddr = NULL;
215 assert(fp != saveArea->prevFrame);
216 fp = saveArea->prevFrame;
217 }
218 /* Make sure the stack is fully unwound to the bottom */
219 assert(saveArea == NULL ||
220 (u1 *) (saveArea+1) == thread->interpStackStart);
221}
222
Ben Cheng60c24f42010-01-04 12:29:56 -0800223static void resetCodeCache(void)
224{
Ben Cheng60c24f42010-01-04 12:29:56 -0800225 Thread* thread;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800226 u8 startTime = dvmGetRelativeTimeUsec();
227 int inJit = 0;
Ben Cheng6999d842010-01-26 16:46:15 -0800228 int byteUsed = gDvmJit.codeCacheByteUsed;
Ben Cheng60c24f42010-01-04 12:29:56 -0800229
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800230 /* If any thread is found stuck in the JIT state, don't reset the cache */
231 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
Ben Cheng6999d842010-01-26 16:46:15 -0800232 /*
233 * Crawl the stack to wipe out the returnAddr field so that
234 * 1) the soon-to-be-deleted code in the JIT cache won't be used
235 * 2) or the thread stuck in the JIT land will soon return
236 * to the interpreter land
237 */
238 crawlDalvikStack(thread, false);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800239 if (thread->inJitCodeCache) {
240 inJit++;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800241 }
242 }
243
244 if (inJit) {
Ben Cheng6999d842010-01-26 16:46:15 -0800245 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
246 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
247 ++gDvmJit.numCodeCacheResetDelayed);
248 return;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800249 }
250
Ben Cheng6999d842010-01-26 16:46:15 -0800251 /* Lock the mutex to clean up the work queue */
252 dvmLockMutex(&gDvmJit.compilerLock);
253
254 /* Drain the work queue to free the work orders */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800255 while (workQueueLength()) {
256 CompilerWorkOrder work = workDequeue();
257 free(work.info);
258 }
259
Ben Cheng60c24f42010-01-04 12:29:56 -0800260 /* Reset the JitEntry table contents to the initial unpopulated state */
261 dvmJitResetTable();
262
Ben Cheng60c24f42010-01-04 12:29:56 -0800263 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800264 * Wipe out the code cache content to force immediate crashes if
265 * stale JIT'ed code is invoked.
266 */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800267 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
268 0,
269 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
Ben Cheng60c24f42010-01-04 12:29:56 -0800270 cacheflush((intptr_t) gDvmJit.codeCache,
271 (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
Ben Cheng60c24f42010-01-04 12:29:56 -0800272
273 /* Reset the current mark of used bytes to the end of template code */
274 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
275 gDvmJit.numCompilations = 0;
276
277 /* Reset the work queue */
278 memset(gDvmJit.compilerWorkQueue, 0,
279 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
280 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
281 gDvmJit.compilerQueueLength = 0;
282
Ben Cheng6999d842010-01-26 16:46:15 -0800283 /* Reset the IC patch work queue */
284 dvmLockMutex(&gDvmJit.compilerICPatchLock);
285 gDvmJit.compilerICPatchIndex = 0;
286 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
287
Ben Cheng60c24f42010-01-04 12:29:56 -0800288 /* All clear now */
289 gDvmJit.codeCacheFull = false;
290
Ben Cheng6999d842010-01-26 16:46:15 -0800291 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800292
Ben Cheng6999d842010-01-26 16:46:15 -0800293 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
294 (dvmGetRelativeTimeUsec() - startTime) / 1000,
295 byteUsed, ++gDvmJit.numCodeCacheReset,
296 gDvmJit.numCodeCacheResetDelayed);
297}
298
299/*
300 * Perform actions that are only safe when all threads are suspended. Currently
301 * we do:
302 * 1) Check if the code cache is full. If so reset it and restart populating it
303 * from scratch.
304 * 2) Patch predicted chaining cells by consuming recorded work orders.
305 */
306void dvmCompilerPerformSafePointChecks(void)
307{
308 if (gDvmJit.codeCacheFull) {
309 resetCodeCache();
310 }
311 dvmCompilerPatchInlineCache();
Ben Cheng60c24f42010-01-04 12:29:56 -0800312}
313
Bill Buzbee964a7b02010-01-28 12:54:19 -0800314bool compilerThreadStartup(void)
315{
316 JitEntry *pJitTable = NULL;
317 unsigned char *pJitProfTable = NULL;
318 unsigned int i;
319
320 if (!dvmCompilerArchInit())
321 goto fail;
322
323 /*
324 * Setup the code cache if we have not inherited a valid code cache
325 * from the zygote.
326 */
327 if (gDvmJit.codeCache == NULL) {
328 if (!dvmCompilerSetupCodeCache())
329 goto fail;
330 }
331
332 /* Allocate the initial arena block */
333 if (dvmCompilerHeapInit() == false) {
334 goto fail;
335 }
336
Bill Buzbee964a7b02010-01-28 12:54:19 -0800337 dvmLockMutex(&gDvmJit.compilerLock);
338
Ben Cheng1357e942010-02-10 17:21:39 -0800339#if defined(WITH_JIT_TUNING)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800340 /* Track method-level compilation statistics */
341 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
Ben Cheng1357e942010-02-10 17:21:39 -0800342#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800343
344 dvmUnlockMutex(&gDvmJit.compilerLock);
345
346 /* Set up the JitTable */
347
348 /* Power of 2? */
349 assert(gDvmJit.jitTableSize &&
350 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
351
352 dvmInitMutex(&gDvmJit.tableLock);
353 dvmLockMutex(&gDvmJit.tableLock);
354 pJitTable = (JitEntry*)
355 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
356 if (!pJitTable) {
357 LOGE("jit table allocation failed\n");
358 dvmUnlockMutex(&gDvmJit.tableLock);
359 goto fail;
360 }
361 /*
362 * NOTE: the profile table must only be allocated once, globally.
363 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
364 * and then restoring its original value. However, this action
365 * is not syncronized for speed so threads may continue to hold
366 * and update the profile table after profiling has been turned
367 * off by null'ng the global pointer. Be aware.
368 */
369 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
370 if (!pJitProfTable) {
371 LOGE("jit prof table allocation failed\n");
372 dvmUnlockMutex(&gDvmJit.tableLock);
373 goto fail;
374 }
375 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
376 for (i=0; i < gDvmJit.jitTableSize; i++) {
377 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
378 }
379 /* Is chain field wide enough for termination pattern? */
380 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
381
382 gDvmJit.pJitEntryTable = pJitTable;
383 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
384 gDvmJit.jitTableEntriesUsed = 0;
385 gDvmJit.compilerHighWater =
386 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
387 gDvmJit.pProfTable = pJitProfTable;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800388 gDvmJit.pProfTableCopy = pJitProfTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800389 dvmUnlockMutex(&gDvmJit.tableLock);
390
391 /* Signal running threads to refresh their cached pJitTable pointers */
392 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
393 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
394 return true;
395
396fail:
397 return false;
398
399}
400
Ben Chengba4fc8b2009-06-01 13:00:29 -0700401static void *compilerThreadStart(void *arg)
402{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800403 int ret;
404 struct timespec ts;
405
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700406 dvmChangeStatus(NULL, THREAD_VMWAIT);
407
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800408 /*
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800409 * If we're not running stand-alone, wait a little before
410 * recieving translation requests on the assumption that process start
411 * up code isn't worth compiling. We'll resume when the framework
412 * signals us that the first screen draw has happened, or the timer
413 * below expires (to catch daemons).
Ben Chengf30acbb2010-02-14 16:17:36 -0800414 *
415 * There is a theoretical race between the callback to
416 * VMRuntime.startJitCompiation and when the compiler thread reaches this
417 * point. In case the callback happens earlier, in order not to permanently
418 * hold the system_server (which is not using the timed wait) in
419 * interpreter-only mode we bypass the delay here.
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800420 */
Ben Chengf30acbb2010-02-14 16:17:36 -0800421 if (gDvmJit.runningInAndroidFramework &&
422 !gDvmJit.alreadyEnabledViaFramework) {
423 /*
424 * If the current VM instance is the system server (detected by having
425 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
426 * conditional variable to determine whether to start the JIT or not.
427 * If the system server detects that the whole system is booted in
428 * safe mode, the conditional variable will never be signaled and the
429 * system server will remain in the interpreter-only mode. All
430 * subsequent apps will be started with the --enable-safemode flag
431 * explicitly appended.
432 */
433 if (gDvm.systemServerPid == 0) {
434 dvmLockMutex(&gDvmJit.compilerLock);
435 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
436 &gDvmJit.compilerLock);
437 dvmUnlockMutex(&gDvmJit.compilerLock);
438 LOGD("JIT started for system_server");
439 } else {
440 dvmLockMutex(&gDvmJit.compilerLock);
441 /*
442 * TUNING: experiment with the delay & perhaps make it
443 * target-specific
444 */
445 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
446 &gDvmJit.compilerLock, 3000, 0);
447 dvmUnlockMutex(&gDvmJit.compilerLock);
448 }
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800449 if (gDvmJit.haltCompilerThread) {
450 return NULL;
451 }
Bill Buzbee94d89f82010-01-29 13:44:19 -0800452 }
453
Bill Buzbee964a7b02010-01-28 12:54:19 -0800454 compilerThreadStartup();
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800455
Ben Chengba4fc8b2009-06-01 13:00:29 -0700456 dvmLockMutex(&gDvmJit.compilerLock);
457 /*
458 * Since the compiler thread will not touch any objects on the heap once
459 * being created, we just fake its state as VMWAIT so that it can be a
460 * bit late when there is suspend request pending.
461 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700462 while (!gDvmJit.haltCompilerThread) {
463 if (workQueueLength() == 0) {
464 int cc;
465 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
466 assert(cc == 0);
467 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
468 &gDvmJit.compilerLock);
469 continue;
470 } else {
471 do {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700472 CompilerWorkOrder work = workDequeue();
473 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng86717f72010-03-05 15:27:21 -0800474#if defined(JIT_STATS)
475 u8 startTime = dvmGetRelativeTimeUsec();
476#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800477 /*
478 * Check whether there is a suspend request on me. This
479 * is necessary to allow a clean shutdown.
480 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700481 dvmCheckSuspendPending(NULL);
Bill Buzbee27176222009-06-09 09:20:16 -0700482 /* Is JitTable filling up? */
483 if (gDvmJit.jitTableEntriesUsed >
484 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
Ben Cheng6999d842010-01-26 16:46:15 -0800485 bool resizeFail =
486 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
487 /*
488 * If the jit table is full, consider it's time to reset
489 * the code cache too.
490 */
491 gDvmJit.codeCacheFull |= resizeFail;
Bill Buzbee27176222009-06-09 09:20:16 -0700492 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700493 if (gDvmJit.haltCompilerThread) {
494 LOGD("Compiler shutdown in progress - discarding request");
Ben Cheng6999d842010-01-26 16:46:15 -0800495 } else if (!gDvmJit.codeCacheFull) {
Bill Buzbeefc519dc2010-03-06 23:30:57 -0800496 bool compileOK = false;
497 jmp_buf jmpBuf;
498 work.bailPtr = &jmpBuf;
499 bool aborted = setjmp(jmpBuf);
500 if (!aborted) {
501 compileOK = dvmCompilerDoWork(&work);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700502 }
Bill Buzbeefc519dc2010-03-06 23:30:57 -0800503 if (aborted || !compileOK) {
504 dvmCompilerArenaReset();
505 work.result.codeAddress = gDvmJit.interpretTemplate;
506 } else if (!work.result.discardResult) {
Ben Cheng60c24f42010-01-04 12:29:56 -0800507 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
508 work.result.instructionSet);
509 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700510 }
511 free(work.info);
Ben Cheng86717f72010-03-05 15:27:21 -0800512#if defined(JIT_STATS)
513 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
514#endif
Ben Chengba4fc8b2009-06-01 13:00:29 -0700515 dvmLockMutex(&gDvmJit.compilerLock);
516 } while (workQueueLength() != 0);
517 }
518 }
519 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
520 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengef00a852009-06-22 22:53:35 -0700521
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700522 /*
523 * As part of detaching the thread we need to call into Java code to update
524 * the ThreadGroup, and we should not be in VMWAIT state while executing
525 * interpreted code.
526 */
527 dvmChangeStatus(NULL, THREAD_RUNNING);
528
Andy McFadden43eb5012010-02-01 16:56:53 -0800529 if (gDvm.verboseShutdown)
530 LOGD("Compiler thread shutting down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700531 return NULL;
532}
533
Ben Chengba4fc8b2009-06-01 13:00:29 -0700534bool dvmCompilerStartup(void)
535{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800536
537 dvmInitMutex(&gDvmJit.compilerLock);
Ben Cheng6999d842010-01-26 16:46:15 -0800538 dvmInitMutex(&gDvmJit.compilerICPatchLock);
Bill Buzbee94d89f82010-01-29 13:44:19 -0800539 dvmLockMutex(&gDvmJit.compilerLock);
540 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
541 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
542
543 /* Reset the work queue */
544 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
545 gDvmJit.compilerQueueLength = 0;
546 dvmUnlockMutex(&gDvmJit.compilerLock);
547
Ben Chengba4fc8b2009-06-01 13:00:29 -0700548 /*
Bill Buzbee94d89f82010-01-29 13:44:19 -0800549 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
Bill Buzbee964a7b02010-01-28 12:54:19 -0800550 * the compiler thread, which will do the real initialization if and
551 * when it is signalled to do so.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700552 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800553 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
554 compilerThreadStart, NULL);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700555}
556
557void dvmCompilerShutdown(void)
558{
559 void *threadReturn;
560
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800561 /* Disable new translation requests */
562 gDvmJit.pProfTable = NULL;
563 gDvmJit.pProfTableCopy = NULL;
564
Ben Cheng88a0f972010-02-24 15:00:40 -0800565 if (gDvm.verboseShutdown) {
566 dvmCompilerDumpStats();
567 while (gDvmJit.compilerQueueLength)
568 sleep(5);
569 }
570
Ben Chengba4fc8b2009-06-01 13:00:29 -0700571 if (gDvmJit.compilerHandle) {
572
573 gDvmJit.haltCompilerThread = true;
574
575 dvmLockMutex(&gDvmJit.compilerLock);
576 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
577 dvmUnlockMutex(&gDvmJit.compilerLock);
578
Ben Chengef00a852009-06-22 22:53:35 -0700579 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
580 LOGW("Compiler thread join failed\n");
Andy McFadden43eb5012010-02-01 16:56:53 -0800581 else if (gDvm.verboseShutdown)
Ben Chengef00a852009-06-22 22:53:35 -0700582 LOGD("Compiler thread has shut down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700583 }
Bill Buzbee06bb8392010-01-31 18:53:15 -0800584
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800585 /* Break loops within the translation cache */
586 dvmJitUnchainAll();
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800587
Bill Buzbee2fc03c32010-03-08 11:30:19 -0800588 /*
589 * NOTE: our current implementatation doesn't allow for the compiler
590 * thread to be restarted after it exits here. We aren't freeing
591 * the JitTable or the ProfTable because threads which still may be
592 * running or in the process of shutting down may hold references to
593 * them.
594 */
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800595}
Bill Buzbee06bb8392010-01-31 18:53:15 -0800596
597void dvmCompilerStateRefresh()
598{
599 bool jitActive;
600 bool jitActivate;
Bill Buzbee3e392682010-02-03 18:13:57 -0800601 bool needUnchain = false;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800602
603 dvmLockMutex(&gDvmJit.tableLock);
604 jitActive = gDvmJit.pProfTable != NULL;
605 jitActivate = !(gDvm.debuggerActive || (gDvm.activeProfilers > 0));
606
607 if (jitActivate && !jitActive) {
608 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800609 } else if (!jitActivate && jitActive) {
610 gDvmJit.pProfTable = NULL;
Bill Buzbee3e392682010-02-03 18:13:57 -0800611 needUnchain = true;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800612 }
Bill Buzbee3e392682010-02-03 18:13:57 -0800613 dvmUnlockMutex(&gDvmJit.tableLock);
614 if (needUnchain)
615 dvmJitUnchainAll();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800616}