blob: a353b867c0a375e2d45e3d3f9136f043307a8b0d [file] [log] [blame]
Ben Chengba4fc8b2009-06-01 13:00:29 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
Ben Cheng7c4afdb2010-02-11 15:03:00 -080019#include <cutils/ashmem.h>
Ben Chengba4fc8b2009-06-01 13:00:29 -070020
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24
Ben Chengba4fc8b2009-06-01 13:00:29 -070025static inline bool workQueueLength(void)
26{
27 return gDvmJit.compilerQueueLength;
28}
29
30static CompilerWorkOrder workDequeue(void)
31{
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
Bill Buzbeef9f33282009-11-22 12:45:30 -080042 if (gDvmJit.compilerQueueLength == 0) {
43 int cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
44 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070045
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51}
52
Bill Buzbee964a7b02010-01-28 12:54:19 -080053/*
54 * Attempt to enqueue a work order, returning true if successful.
55 * This routine will not block, but simply return if it couldn't
56 * aquire the lock or if the queue is full.
Ben Cheng1357e942010-02-10 17:21:39 -080057 *
58 * NOTE: Make sure that the caller frees the info pointer if the return value
59 * is false.
Bill Buzbee964a7b02010-01-28 12:54:19 -080060 */
Ben Chengba4fc8b2009-06-01 13:00:29 -070061bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
62{
63 int cc;
64 int i;
65 int numWork;
Ben Cheng60c24f42010-01-04 12:29:56 -080066 bool result = true;
Ben Chengba4fc8b2009-06-01 13:00:29 -070067
Bill Buzbee964a7b02010-01-28 12:54:19 -080068 if (dvmTryLockMutex(&gDvmJit.compilerLock)) {
Ben Cheng3e5cd172010-02-08 20:57:59 -080069 return false; // Couldn't acquire the lock
Bill Buzbee964a7b02010-01-28 12:54:19 -080070 }
Ben Chengba4fc8b2009-06-01 13:00:29 -070071
Ben Cheng7a0bcd02010-01-22 16:45:45 -080072 /*
Ben Cheng6999d842010-01-26 16:46:15 -080073 * Return if queue or code cache is full.
Ben Cheng7a0bcd02010-01-22 16:45:45 -080074 */
Ben Cheng6999d842010-01-26 16:46:15 -080075 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
76 gDvmJit.codeCacheFull == true) {
Ben Cheng60c24f42010-01-04 12:29:56 -080077 result = false;
Bill Buzbee964a7b02010-01-28 12:54:19 -080078 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -070079 }
80
81 for (numWork = gDvmJit.compilerQueueLength,
82 i = gDvmJit.compilerWorkDequeueIndex;
83 numWork > 0;
84 numWork--) {
85 /* Already enqueued */
86 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
Bill Buzbee964a7b02010-01-28 12:54:19 -080087 goto unlockAndExit;
Ben Chengba4fc8b2009-06-01 13:00:29 -070088 /* Wrap around */
89 if (i == COMPILER_WORK_QUEUE_SIZE)
90 i = 0;
91 }
92
Ben Chengccd6c012009-10-15 14:52:45 -070093 CompilerWorkOrder *newOrder =
94 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
95 newOrder->pc = pc;
96 newOrder->kind = kind;
97 newOrder->info = info;
98 newOrder->result.codeAddress = NULL;
99 newOrder->result.discardResult =
Ben Cheng6999d842010-01-26 16:46:15 -0800100 (kind == kWorkOrderTraceDebug) ? true : false;
Ben Cheng33672452010-01-12 14:59:30 -0800101 newOrder->result.requestingThread = dvmThreadSelf();
102
Ben Chengba4fc8b2009-06-01 13:00:29 -0700103 gDvmJit.compilerWorkEnqueueIndex++;
104 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
105 gDvmJit.compilerWorkEnqueueIndex = 0;
106 gDvmJit.compilerQueueLength++;
107 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
108 assert(cc == 0);
109
Bill Buzbee964a7b02010-01-28 12:54:19 -0800110unlockAndExit:
Ben Chengba4fc8b2009-06-01 13:00:29 -0700111 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng60c24f42010-01-04 12:29:56 -0800112 return result;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700113}
114
115/* Block until queue length is 0 */
116void dvmCompilerDrainQueue(void)
117{
Bill Buzbeed7269912009-11-10 14:31:32 -0800118 int oldStatus = dvmChangeStatus(NULL, THREAD_VMWAIT);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700119 dvmLockMutex(&gDvmJit.compilerLock);
120 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread) {
121 pthread_cond_wait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock);
122 }
123 dvmUnlockMutex(&gDvmJit.compilerLock);
Bill Buzbeed7269912009-11-10 14:31:32 -0800124 dvmChangeStatus(NULL, oldStatus);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700125}
126
Ben Cheng60c24f42010-01-04 12:29:56 -0800127bool dvmCompilerSetupCodeCache(void)
128{
129 extern void dvmCompilerTemplateStart(void);
130 extern void dmvCompilerTemplateEnd(void);
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800131 int fd;
Ben Cheng60c24f42010-01-04 12:29:56 -0800132
133 /* Allocate the code cache */
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800134 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
135 if (fd < 0) {
136 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
137 gDvmJit.codeCacheSize);
138 return false;
139 }
140 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
141 PROT_READ | PROT_WRITE | PROT_EXEC,
142 MAP_PRIVATE , fd, 0);
143 close(fd);
Ben Cheng60c24f42010-01-04 12:29:56 -0800144 if (gDvmJit.codeCache == MAP_FAILED) {
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800145 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
Ben Cheng60c24f42010-01-04 12:29:56 -0800146 return false;
147 }
148
Ben Cheng7c4afdb2010-02-11 15:03:00 -0800149 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
150 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800151
Ben Cheng60c24f42010-01-04 12:29:56 -0800152 /* Copy the template code into the beginning of the code cache */
153 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
154 (intptr_t) dvmCompilerTemplateStart;
155 memcpy((void *) gDvmJit.codeCache,
156 (void *) dvmCompilerTemplateStart,
157 templateSize);
158
159 gDvmJit.templateSize = templateSize;
160 gDvmJit.codeCacheByteUsed = templateSize;
161
162 /* Only flush the part in the code cache that is being used now */
163 cacheflush((intptr_t) gDvmJit.codeCache,
164 (intptr_t) gDvmJit.codeCache + templateSize, 0);
165 return true;
166}
167
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800168static void crawlDalvikStack(Thread *thread, bool print)
169{
170 void *fp = thread->curFrame;
171 StackSaveArea* saveArea = NULL;
172 int stackLevel = 0;
173
174 if (print) {
175 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
176 dvmGetThreadStatusStr(thread->status),
177 thread->inJitCodeCache,
178 thread->inJitCodeCache ? "jit" : "interp");
179 }
180 /* Crawl the Dalvik stack frames to clear the returnAddr field */
181 while (fp != NULL) {
182 saveArea = SAVEAREA_FROM_FP(fp);
183
184 if (print) {
185 if (dvmIsBreakFrame(fp)) {
186 LOGD(" #%d: break frame (%p)",
187 stackLevel, saveArea->returnAddr);
188 }
189 else {
190 LOGD(" #%d: %s.%s%s (%p)",
191 stackLevel,
192 saveArea->method->clazz->descriptor,
193 saveArea->method->name,
194 dvmIsNativeMethod(saveArea->method) ?
195 " (native)" : "",
196 saveArea->returnAddr);
197 }
198 }
199 stackLevel++;
200 saveArea->returnAddr = NULL;
201 assert(fp != saveArea->prevFrame);
202 fp = saveArea->prevFrame;
203 }
204 /* Make sure the stack is fully unwound to the bottom */
205 assert(saveArea == NULL ||
206 (u1 *) (saveArea+1) == thread->interpStackStart);
207}
208
Ben Cheng60c24f42010-01-04 12:29:56 -0800209static void resetCodeCache(void)
210{
Ben Cheng60c24f42010-01-04 12:29:56 -0800211 Thread* thread;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800212 u8 startTime = dvmGetRelativeTimeUsec();
213 int inJit = 0;
Ben Cheng6999d842010-01-26 16:46:15 -0800214 int byteUsed = gDvmJit.codeCacheByteUsed;
Ben Cheng60c24f42010-01-04 12:29:56 -0800215
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800216 /* If any thread is found stuck in the JIT state, don't reset the cache */
217 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
Ben Cheng6999d842010-01-26 16:46:15 -0800218 /*
219 * Crawl the stack to wipe out the returnAddr field so that
220 * 1) the soon-to-be-deleted code in the JIT cache won't be used
221 * 2) or the thread stuck in the JIT land will soon return
222 * to the interpreter land
223 */
224 crawlDalvikStack(thread, false);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800225 if (thread->inJitCodeCache) {
226 inJit++;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800227 }
228 }
229
230 if (inJit) {
Ben Cheng6999d842010-01-26 16:46:15 -0800231 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
232 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
233 ++gDvmJit.numCodeCacheResetDelayed);
234 return;
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800235 }
236
Ben Cheng6999d842010-01-26 16:46:15 -0800237 /* Lock the mutex to clean up the work queue */
238 dvmLockMutex(&gDvmJit.compilerLock);
239
240 /* Drain the work queue to free the work orders */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800241 while (workQueueLength()) {
242 CompilerWorkOrder work = workDequeue();
243 free(work.info);
244 }
245
Ben Cheng60c24f42010-01-04 12:29:56 -0800246 /* Reset the JitEntry table contents to the initial unpopulated state */
247 dvmJitResetTable();
248
Ben Cheng60c24f42010-01-04 12:29:56 -0800249 /*
Ben Cheng60c24f42010-01-04 12:29:56 -0800250 * Wipe out the code cache content to force immediate crashes if
251 * stale JIT'ed code is invoked.
252 */
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800253 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
254 0,
255 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
Ben Cheng60c24f42010-01-04 12:29:56 -0800256 cacheflush((intptr_t) gDvmJit.codeCache,
257 (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
Ben Cheng60c24f42010-01-04 12:29:56 -0800258
259 /* Reset the current mark of used bytes to the end of template code */
260 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
261 gDvmJit.numCompilations = 0;
262
263 /* Reset the work queue */
264 memset(gDvmJit.compilerWorkQueue, 0,
265 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
266 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
267 gDvmJit.compilerQueueLength = 0;
268
Ben Cheng6999d842010-01-26 16:46:15 -0800269 /* Reset the IC patch work queue */
270 dvmLockMutex(&gDvmJit.compilerICPatchLock);
271 gDvmJit.compilerICPatchIndex = 0;
272 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
273
Ben Cheng60c24f42010-01-04 12:29:56 -0800274 /* All clear now */
275 gDvmJit.codeCacheFull = false;
276
Ben Cheng6999d842010-01-26 16:46:15 -0800277 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800278
Ben Cheng6999d842010-01-26 16:46:15 -0800279 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
280 (dvmGetRelativeTimeUsec() - startTime) / 1000,
281 byteUsed, ++gDvmJit.numCodeCacheReset,
282 gDvmJit.numCodeCacheResetDelayed);
283}
284
285/*
286 * Perform actions that are only safe when all threads are suspended. Currently
287 * we do:
288 * 1) Check if the code cache is full. If so reset it and restart populating it
289 * from scratch.
290 * 2) Patch predicted chaining cells by consuming recorded work orders.
291 */
292void dvmCompilerPerformSafePointChecks(void)
293{
294 if (gDvmJit.codeCacheFull) {
295 resetCodeCache();
296 }
297 dvmCompilerPatchInlineCache();
Ben Cheng60c24f42010-01-04 12:29:56 -0800298}
299
Bill Buzbee964a7b02010-01-28 12:54:19 -0800300bool compilerThreadStartup(void)
301{
302 JitEntry *pJitTable = NULL;
303 unsigned char *pJitProfTable = NULL;
304 unsigned int i;
305
306 if (!dvmCompilerArchInit())
307 goto fail;
308
309 /*
310 * Setup the code cache if we have not inherited a valid code cache
311 * from the zygote.
312 */
313 if (gDvmJit.codeCache == NULL) {
314 if (!dvmCompilerSetupCodeCache())
315 goto fail;
316 }
317
318 /* Allocate the initial arena block */
319 if (dvmCompilerHeapInit() == false) {
320 goto fail;
321 }
322
Bill Buzbee964a7b02010-01-28 12:54:19 -0800323 dvmLockMutex(&gDvmJit.compilerLock);
324
Ben Cheng1357e942010-02-10 17:21:39 -0800325#if defined(WITH_JIT_TUNING)
Bill Buzbee964a7b02010-01-28 12:54:19 -0800326 /* Track method-level compilation statistics */
327 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
Ben Cheng1357e942010-02-10 17:21:39 -0800328#endif
Bill Buzbee964a7b02010-01-28 12:54:19 -0800329
330 dvmUnlockMutex(&gDvmJit.compilerLock);
331
332 /* Set up the JitTable */
333
334 /* Power of 2? */
335 assert(gDvmJit.jitTableSize &&
336 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
337
338 dvmInitMutex(&gDvmJit.tableLock);
339 dvmLockMutex(&gDvmJit.tableLock);
340 pJitTable = (JitEntry*)
341 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
342 if (!pJitTable) {
343 LOGE("jit table allocation failed\n");
344 dvmUnlockMutex(&gDvmJit.tableLock);
345 goto fail;
346 }
347 /*
348 * NOTE: the profile table must only be allocated once, globally.
349 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
350 * and then restoring its original value. However, this action
351 * is not syncronized for speed so threads may continue to hold
352 * and update the profile table after profiling has been turned
353 * off by null'ng the global pointer. Be aware.
354 */
355 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
356 if (!pJitProfTable) {
357 LOGE("jit prof table allocation failed\n");
358 dvmUnlockMutex(&gDvmJit.tableLock);
359 goto fail;
360 }
361 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
362 for (i=0; i < gDvmJit.jitTableSize; i++) {
363 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
364 }
365 /* Is chain field wide enough for termination pattern? */
366 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
367
368 gDvmJit.pJitEntryTable = pJitTable;
369 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
370 gDvmJit.jitTableEntriesUsed = 0;
371 gDvmJit.compilerHighWater =
372 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
373 gDvmJit.pProfTable = pJitProfTable;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800374 gDvmJit.pProfTableCopy = pJitProfTable;
Bill Buzbee964a7b02010-01-28 12:54:19 -0800375 dvmUnlockMutex(&gDvmJit.tableLock);
376
377 /* Signal running threads to refresh their cached pJitTable pointers */
378 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
379 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
380 return true;
381
382fail:
383 return false;
384
385}
386
Ben Chengba4fc8b2009-06-01 13:00:29 -0700387static void *compilerThreadStart(void *arg)
388{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800389 int ret;
390 struct timespec ts;
391
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700392 dvmChangeStatus(NULL, THREAD_VMWAIT);
393
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800394 /*
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800395 * If we're not running stand-alone, wait a little before
396 * recieving translation requests on the assumption that process start
397 * up code isn't worth compiling. We'll resume when the framework
398 * signals us that the first screen draw has happened, or the timer
399 * below expires (to catch daemons).
Ben Chengf30acbb2010-02-14 16:17:36 -0800400 *
401 * There is a theoretical race between the callback to
402 * VMRuntime.startJitCompiation and when the compiler thread reaches this
403 * point. In case the callback happens earlier, in order not to permanently
404 * hold the system_server (which is not using the timed wait) in
405 * interpreter-only mode we bypass the delay here.
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800406 */
Ben Chengf30acbb2010-02-14 16:17:36 -0800407 if (gDvmJit.runningInAndroidFramework &&
408 !gDvmJit.alreadyEnabledViaFramework) {
409 /*
410 * If the current VM instance is the system server (detected by having
411 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
412 * conditional variable to determine whether to start the JIT or not.
413 * If the system server detects that the whole system is booted in
414 * safe mode, the conditional variable will never be signaled and the
415 * system server will remain in the interpreter-only mode. All
416 * subsequent apps will be started with the --enable-safemode flag
417 * explicitly appended.
418 */
419 if (gDvm.systemServerPid == 0) {
420 dvmLockMutex(&gDvmJit.compilerLock);
421 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
422 &gDvmJit.compilerLock);
423 dvmUnlockMutex(&gDvmJit.compilerLock);
424 LOGD("JIT started for system_server");
425 } else {
426 dvmLockMutex(&gDvmJit.compilerLock);
427 /*
428 * TUNING: experiment with the delay & perhaps make it
429 * target-specific
430 */
431 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
432 &gDvmJit.compilerLock, 3000, 0);
433 dvmUnlockMutex(&gDvmJit.compilerLock);
434 }
Bill Buzbeeeb695c62010-02-04 16:09:55 -0800435 if (gDvmJit.haltCompilerThread) {
436 return NULL;
437 }
Bill Buzbee94d89f82010-01-29 13:44:19 -0800438 }
439
Bill Buzbee964a7b02010-01-28 12:54:19 -0800440 compilerThreadStartup();
Bill Buzbeeb1d80442009-12-17 14:55:21 -0800441
Ben Chengba4fc8b2009-06-01 13:00:29 -0700442 dvmLockMutex(&gDvmJit.compilerLock);
443 /*
444 * Since the compiler thread will not touch any objects on the heap once
445 * being created, we just fake its state as VMWAIT so that it can be a
446 * bit late when there is suspend request pending.
447 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700448 while (!gDvmJit.haltCompilerThread) {
449 if (workQueueLength() == 0) {
450 int cc;
451 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
452 assert(cc == 0);
453 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
454 &gDvmJit.compilerLock);
455 continue;
456 } else {
457 do {
Ben Chengba4fc8b2009-06-01 13:00:29 -0700458 CompilerWorkOrder work = workDequeue();
459 dvmUnlockMutex(&gDvmJit.compilerLock);
Bill Buzbee964a7b02010-01-28 12:54:19 -0800460 /*
461 * Check whether there is a suspend request on me. This
462 * is necessary to allow a clean shutdown.
463 */
Ben Chengba4fc8b2009-06-01 13:00:29 -0700464 dvmCheckSuspendPending(NULL);
Bill Buzbee27176222009-06-09 09:20:16 -0700465 /* Is JitTable filling up? */
466 if (gDvmJit.jitTableEntriesUsed >
467 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
Ben Cheng6999d842010-01-26 16:46:15 -0800468 bool resizeFail =
469 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
470 /*
471 * If the jit table is full, consider it's time to reset
472 * the code cache too.
473 */
474 gDvmJit.codeCacheFull |= resizeFail;
Bill Buzbee27176222009-06-09 09:20:16 -0700475 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700476 if (gDvmJit.haltCompilerThread) {
477 LOGD("Compiler shutdown in progress - discarding request");
Ben Cheng6999d842010-01-26 16:46:15 -0800478 } else if (!gDvmJit.codeCacheFull) {
Bill Buzbeed7269912009-11-10 14:31:32 -0800479 /* If compilation failed, use interpret-template */
480 if (!dvmCompilerDoWork(&work)) {
481 work.result.codeAddress = gDvmJit.interpretTemplate;
Ben Chengba4fc8b2009-06-01 13:00:29 -0700482 }
Ben Cheng60c24f42010-01-04 12:29:56 -0800483 if (!work.result.discardResult) {
484 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
485 work.result.instructionSet);
486 }
Ben Chengba4fc8b2009-06-01 13:00:29 -0700487 }
488 free(work.info);
489 dvmLockMutex(&gDvmJit.compilerLock);
490 } while (workQueueLength() != 0);
491 }
492 }
493 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
494 dvmUnlockMutex(&gDvmJit.compilerLock);
Ben Chengef00a852009-06-22 22:53:35 -0700495
Ben Cheng5ccdf0b2009-10-08 16:09:49 -0700496 /*
497 * As part of detaching the thread we need to call into Java code to update
498 * the ThreadGroup, and we should not be in VMWAIT state while executing
499 * interpreted code.
500 */
501 dvmChangeStatus(NULL, THREAD_RUNNING);
502
Andy McFadden43eb5012010-02-01 16:56:53 -0800503 if (gDvm.verboseShutdown)
504 LOGD("Compiler thread shutting down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700505 return NULL;
506}
507
Ben Chengba4fc8b2009-06-01 13:00:29 -0700508bool dvmCompilerStartup(void)
509{
Bill Buzbee94d89f82010-01-29 13:44:19 -0800510
511 dvmInitMutex(&gDvmJit.compilerLock);
Ben Cheng6999d842010-01-26 16:46:15 -0800512 dvmInitMutex(&gDvmJit.compilerICPatchLock);
Bill Buzbee94d89f82010-01-29 13:44:19 -0800513 dvmLockMutex(&gDvmJit.compilerLock);
514 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
515 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
516
517 /* Reset the work queue */
518 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
519 gDvmJit.compilerQueueLength = 0;
520 dvmUnlockMutex(&gDvmJit.compilerLock);
521
Ben Chengba4fc8b2009-06-01 13:00:29 -0700522 /*
Bill Buzbee94d89f82010-01-29 13:44:19 -0800523 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
Bill Buzbee964a7b02010-01-28 12:54:19 -0800524 * the compiler thread, which will do the real initialization if and
525 * when it is signalled to do so.
Ben Chengba4fc8b2009-06-01 13:00:29 -0700526 */
Bill Buzbee964a7b02010-01-28 12:54:19 -0800527 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
528 compilerThreadStart, NULL);
Ben Chengba4fc8b2009-06-01 13:00:29 -0700529}
530
531void dvmCompilerShutdown(void)
532{
533 void *threadReturn;
534
Ben Cheng88a0f972010-02-24 15:00:40 -0800535 if (gDvm.verboseShutdown) {
536 dvmCompilerDumpStats();
537 while (gDvmJit.compilerQueueLength)
538 sleep(5);
539 }
540
Ben Chengba4fc8b2009-06-01 13:00:29 -0700541 if (gDvmJit.compilerHandle) {
542
543 gDvmJit.haltCompilerThread = true;
544
545 dvmLockMutex(&gDvmJit.compilerLock);
546 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
547 dvmUnlockMutex(&gDvmJit.compilerLock);
548
Ben Chengef00a852009-06-22 22:53:35 -0700549 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
550 LOGW("Compiler thread join failed\n");
Andy McFadden43eb5012010-02-01 16:56:53 -0800551 else if (gDvm.verboseShutdown)
Ben Chengef00a852009-06-22 22:53:35 -0700552 LOGD("Compiler thread has shut down\n");
Ben Chengba4fc8b2009-06-01 13:00:29 -0700553 }
Bill Buzbee06bb8392010-01-31 18:53:15 -0800554
Bill Buzbee96cfe6c2010-02-08 17:08:15 -0800555 dvmDestroyMutex(&gDvmJit.tableLock);
556 dvmDestroyMutex(&gDvmJit.compilerLock);
557 dvmDestroyMutex(&gDvmJit.compilerICPatchLock);
558
559 if (gDvmJit.pJitEntryTable) {
560 free(gDvmJit.pJitEntryTable);
561 gDvmJit.pJitEntryTable = NULL;
562 }
563
564 if (gDvmJit.pProfTable) {
565 free(gDvmJit.pProfTable);
566 gDvmJit.pProfTable = NULL;
567 }
568
569}
Bill Buzbee06bb8392010-01-31 18:53:15 -0800570
571void dvmCompilerStateRefresh()
572{
573 bool jitActive;
574 bool jitActivate;
Bill Buzbee3e392682010-02-03 18:13:57 -0800575 bool needUnchain = false;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800576
577 dvmLockMutex(&gDvmJit.tableLock);
578 jitActive = gDvmJit.pProfTable != NULL;
579 jitActivate = !(gDvm.debuggerActive || (gDvm.activeProfilers > 0));
580
581 if (jitActivate && !jitActive) {
582 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800583 } else if (!jitActivate && jitActive) {
584 gDvmJit.pProfTable = NULL;
Bill Buzbee3e392682010-02-03 18:13:57 -0800585 needUnchain = true;
Bill Buzbee06bb8392010-01-31 18:53:15 -0800586 }
Bill Buzbee3e392682010-02-03 18:13:57 -0800587 dvmUnlockMutex(&gDvmJit.tableLock);
588 if (needUnchain)
589 dvmJitUnchainAll();
Bill Buzbee06bb8392010-01-31 18:53:15 -0800590}