blob: 8528c0f96ff5add3533acafec4b027303733f72e [file] [log] [blame]
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Andy McFadden581bed72009-10-15 11:24:54 -070016
The Android Open Source Projectf6c38712009-03-03 19:28:47 -080017/*
18 * Fundamental synchronization mechanisms.
19 *
20 * The top part of the file has operations on "monitor" structs; the
21 * next part has the native calls on objects.
22 *
23 * The current implementation uses "thin locking" to avoid allocating
24 * an Object's full Monitor struct until absolutely necessary (i.e.,
25 * during contention or a call to wait()).
26 *
27 * TODO: make improvements to thin locking
28 * We may be able to improve performance and reduce memory requirements by:
29 * - reverting to a thin lock once the Monitor is no longer necessary
30 * - using a pool of monitor objects, with some sort of recycling scheme
31 *
32 * TODO: recycle native-level monitors when objects are garbage collected.
33 *
34 * NOTE: if we broadcast a notify, and somebody sneaks in a Thread.interrupt
35 * before the notify finishes (i.e. before all threads sleeping on the
36 * condition variable have awoken), we could end up with a nonzero value for
37 * "notifying" after everybody is gone because one of the notified threads
38 * will actually exit via the "interrupted" path. This can be detected as
39 * (notifying + interrupting > waiting), i.e. the number of threads that need
40 * to be woken is greater than the number waiting. The fix is to test and
41 * adjust "notifying" at the start of the wait() call.
42 * -> This is probably not a problem if we notify less than the full set
43 * before the interrupt comes in. If we have four waiters, two pending
44 * notifies, and an interrupt hits, we will interrupt one thread and notify
45 * two others. Doesn't matter if the interrupted thread would have been
46 * one of the notified. Count is only screwed up if we have two waiters,
47 * in which case it's safe to fix it at the start of the next wait().
48 */
49#include "Dalvik.h"
50
51#include <stdlib.h>
52#include <unistd.h>
53#include <pthread.h>
54#include <time.h>
55#include <sys/time.h>
56#include <errno.h>
57
58#define LOG_THIN LOGV
59
60#ifdef WITH_DEADLOCK_PREDICTION /* fwd */
61static const char* kStartBanner =
62 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
63static const char* kEndBanner =
64 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
65
66/*
67 * Unsorted, expanding list of objects.
68 *
69 * This is very similar to PointerSet (which came into existence after this),
70 * but these are unsorted, uniqueness is not enforced by the "add" function,
71 * and the base object isn't allocated on the heap.
72 */
73typedef struct ExpandingObjectList {
74 u2 alloc;
75 u2 count;
76 Object** list;
77} ExpandingObjectList;
78
79/* fwd */
80static void updateDeadlockPrediction(Thread* self, Object* obj);
81static void removeCollectedObject(Object* obj);
82static void expandObjClear(ExpandingObjectList* pList);
83#endif
84
85/*
86 * Every Object has a monitor associated with it, but not every Object is
87 * actually locked. Even the ones that are locked do not need a
88 * full-fledged monitor until a) there is actual contention or b) wait()
89 * is called on the Object.
90 *
91 * For Dalvik, we have implemented a scheme similar to the one described
92 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
93 * (ACM 1998). Things are even easier for us, though, because we have
94 * a full 32 bits to work with.
95 *
96 * The two states that an Object's lock may have are referred to as
97 * "thin" and "fat". The lock may transition between the two states
98 * for various reasons.
99 *
100 * The lock value itself is stored in Object.lock, which is a union of
101 * the form:
102 *
103 * typedef union Lock {
104 * u4 thin;
105 * Monitor* mon;
106 * } Lock;
107 *
108 * It is possible to tell the current state of the lock from the actual
109 * value, so we do not need to store any additional state. When the
110 * lock is "thin", it has the form:
111 *
112 * [31 ---- 16] [15 ---- 1] [0]
113 * lock count thread id 1
114 *
115 * When it is "fat", the field is simply a (Monitor *). Since the pointer
116 * will always be 4-byte-aligned, bits 1 and 0 will always be zero when
117 * the field holds a pointer. Hence, we can tell the current fat-vs-thin
118 * state by checking the least-significant bit.
119 *
120 * For an in-depth description of the mechanics of thin-vs-fat locking,
121 * read the paper referred to above.
122 *
123 * To reduce the amount of work when attempting a compare and exchange,
124 * Thread.threadId is guaranteed to have bit 0 set, and all new Objects
125 * have their lock fields initialized to the value 0x1, or
126 * DVM_LOCK_INITIAL_THIN_VALUE, via DVM_OBJECT_INIT().
127 */
128
129/*
130 * Monitors provide:
131 * - mutually exclusive access to resources
132 * - a way for multiple threads to wait for notification
133 *
134 * In effect, they fill the role of both mutexes and condition variables.
135 *
136 * Only one thread can own the monitor at any time. There may be several
137 * threads waiting on it (the wait call unlocks it). One or more waiting
138 * threads may be getting interrupted or notified at any given time.
139 */
140struct Monitor {
141 Thread* owner; /* which thread currently owns the lock? */
142 int lockCount; /* owner's recursive lock depth */
143 Object* obj; /* what object are we part of [debug only] */
144
145 int waiting; /* total #of threads waiting on this */
146 int notifying; /* #of threads being notified */
147 int interrupting; /* #of threads being interrupted */
148
149 pthread_mutex_t lock;
150 pthread_cond_t cond;
151
152 Monitor* next;
153
154#ifdef WITH_DEADLOCK_PREDICTION
155 /*
156 * Objects that have been locked immediately after this one in the
157 * past. We use an expanding flat array, allocated on first use, to
158 * minimize allocations. Deletions from the list, expected to be
159 * infrequent, are crunched down.
160 */
161 ExpandingObjectList historyChildren;
162
163 /*
164 * We also track parents. This isn't strictly necessary, but it makes
165 * the cleanup at GC time significantly faster.
166 */
167 ExpandingObjectList historyParents;
168
169 /* used during cycle detection */
170 bool historyMark;
171
172 /* stack trace, established the first time we locked the object */
173 int historyStackDepth;
174 int* historyRawStackTrace;
175#endif
176};
177
178
179/*
180 * Create and initialize a monitor.
181 */
182Monitor* dvmCreateMonitor(Object* obj)
183{
184 Monitor* mon;
185
186 mon = (Monitor*) calloc(1, sizeof(Monitor));
187 if (mon == NULL) {
188 LOGE("Unable to allocate monitor\n");
189 dvmAbort();
190 }
191 mon->obj = obj;
192 dvmInitMutex(&mon->lock);
193 pthread_cond_init(&mon->cond, NULL);
194
195 /* replace the head of the list with the new monitor */
196 do {
197 mon->next = gDvm.monitorList;
198 } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
199 (int32_t)mon->next, (int32_t)mon));
200
201 return mon;
202}
203
204/*
205 * Release a Monitor.
206 */
207static void releaseMonitor(Monitor* mon)
208{
209 // TODO
210}
211
212/*
213 * Free the monitor list. Only used when shutting the VM down.
214 */
215void dvmFreeMonitorList(void)
216{
217 Monitor* mon;
218 Monitor* nextMon;
219
220 mon = gDvm.monitorList;
221 while (mon != NULL) {
222 nextMon = mon->next;
223
224#ifdef WITH_DEADLOCK_PREDICTION
225 expandObjClear(&mon->historyChildren);
226 expandObjClear(&mon->historyParents);
227 free(mon->historyRawStackTrace);
228#endif
229 free(mon);
230 mon = nextMon;
231 }
232}
233
234/*
235 * Log some info about our monitors.
236 */
237void dvmDumpMonitorInfo(const char* msg)
238{
239#if QUIET_ZYGOTE_MONITOR
240 if (gDvm.zygote) {
241 return;
242 }
243#endif
244
245 int totalCount;
246 int liveCount;
247
248 totalCount = liveCount = 0;
249 Monitor* mon = gDvm.monitorList;
250 while (mon != NULL) {
251 totalCount++;
252 if (mon->obj != NULL)
253 liveCount++;
254 mon = mon->next;
255 }
256
257 LOGD("%s: monitor list has %d entries (%d live)\n",
258 msg, totalCount, liveCount);
259}
260
261/*
262 * Get the object that a monitor is part of.
263 */
264Object* dvmGetMonitorObject(Monitor* mon)
265{
266 if (mon == NULL)
267 return NULL;
268 else
269 return mon->obj;
270}
271
272/*
273 * Checks whether the given thread holds the given
274 * objects's lock.
275 */
276bool dvmHoldsLock(Thread* thread, Object* obj)
277{
278 if (thread == NULL || obj == NULL) {
279 return false;
280 }
281
282 /* Since we're reading the lock value multiple times,
283 * latch it so that it doesn't change out from under
284 * us if we get preempted.
285 */
286 Lock lock = obj->lock;
287 if (IS_LOCK_FAT(&lock)) {
288 return thread == lock.mon->owner;
289 } else {
290 return thread->threadId == (lock.thin & 0xffff);
291 }
292}
293
294/*
295 * Free the monitor associated with an object and make the object's lock
296 * thin again. This is called during garbage collection.
297 */
298void dvmFreeObjectMonitor_internal(Lock *lock)
299{
300 Monitor *mon;
301
302 /* The macro that wraps this function checks IS_LOCK_FAT() first.
303 */
304 assert(IS_LOCK_FAT(lock));
305
306#ifdef WITH_DEADLOCK_PREDICTION
307 if (gDvm.deadlockPredictMode != kDPOff)
308 removeCollectedObject(lock->mon->obj);
309#endif
310
311 mon = lock->mon;
312 lock->thin = DVM_LOCK_INITIAL_THIN_VALUE;
313
314 /* This lock is associated with an object
315 * that's being swept. The only possible way
316 * anyone could be holding this lock would be
317 * if some JNI code locked but didn't unlock
318 * the object, in which case we've got some bad
319 * native code somewhere.
320 */
321 assert(pthread_mutex_trylock(&mon->lock) == 0);
322 pthread_mutex_destroy(&mon->lock);
323 pthread_cond_destroy(&mon->cond);
324#if 1
325//TODO: unlink from the monitor list (would require a lock)
326// (might not -- the GC suspension may be enough)
327 {
328 Monitor *next;
329 next = mon->next;
330#ifdef WITH_DEADLOCK_PREDICTION
331 expandObjClear(&mon->historyChildren);
332 expandObjClear(&mon->historyParents);
333 free(mon->historyRawStackTrace);
334#endif
335 memset(mon, 0, sizeof (*mon));
336 mon->next = next;
337 }
338//free(mon);
339#endif
340}
341
342
343/*
344 * Lock a monitor.
345 */
346static void lockMonitor(Thread* self, Monitor* mon)
347{
348 int cc;
349
350 if (mon->owner == self) {
351 mon->lockCount++;
352 } else {
353 ThreadStatus oldStatus;
354
355 if (pthread_mutex_trylock(&mon->lock) != 0) {
356 /* mutex is locked, switch to wait status and sleep on it */
357 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
358 cc = pthread_mutex_lock(&mon->lock);
359 assert(cc == 0);
360 dvmChangeStatus(self, oldStatus);
361 }
362
363 mon->owner = self;
364 assert(mon->lockCount == 0);
365
366 /*
367 * "waiting", "notifying", and "interrupting" could all be nonzero
368 * if we're locking an object on which other threads are waiting.
369 * Nothing worth assert()ing about here.
370 */
371 }
372}
373
374/*
375 * Try to lock a monitor.
376 *
377 * Returns "true" on success.
378 */
379static bool tryLockMonitor(Thread* self, Monitor* mon)
380{
381 int cc;
382
383 if (mon->owner == self) {
384 mon->lockCount++;
385 return true;
386 } else {
387 cc = pthread_mutex_trylock(&mon->lock);
388 if (cc == 0) {
389 mon->owner = self;
390 assert(mon->lockCount == 0);
391 return true;
392 } else {
393 return false;
394 }
395 }
396}
397
398
399/*
400 * Unlock a monitor.
401 *
402 * Returns true if the unlock succeeded.
403 * If the unlock failed, an exception will be pending.
404 */
405static bool unlockMonitor(Thread* self, Monitor* mon)
406{
407 assert(mon != NULL); // can this happen?
408
409 if (mon->owner == self) {
410 /*
411 * We own the monitor, so nobody else can be in here.
412 */
413 if (mon->lockCount == 0) {
414 int cc;
415 mon->owner = NULL;
416 cc = pthread_mutex_unlock(&mon->lock);
417 assert(cc == 0);
418 } else {
419 mon->lockCount--;
420 }
421 } else {
422 /*
423 * We don't own this, so we're not allowed to unlock it.
424 * The JNI spec says that we should throw IllegalMonitorStateException
425 * in this case.
426 */
427 if (mon->owner == NULL) {
428 //LOGW("Unlock fat %p: not owned\n", mon->obj);
429 } else {
430 //LOGW("Unlock fat %p: id %d vs %d\n",
431 // mon->obj, mon->owner->threadId, self->threadId);
432 }
433 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
434 "unlock of unowned monitor");
435 return false;
436 }
437 return true;
438}
439
440/*
441 * Wait on a monitor until timeout, interrupt, or notification. Used for
442 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
443 *
444 * If another thread calls Thread.interrupt(), we throw InterruptedException
445 * and return immediately if one of the following are true:
446 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
447 * - blocked in join(), join(long), or join(long, int) methods of Thread
448 * - blocked in sleep(long), or sleep(long, int) methods of Thread
449 * Otherwise, we set the "interrupted" flag.
450 *
451 * Checks to make sure that "nsec" is in the range 0-999999
452 * (i.e. fractions of a millisecond) and throws the appropriate
453 * exception if it isn't.
454 *
455 * The spec allows "spurious wakeups", and recommends that all code using
456 * Object.wait() do so in a loop. This appears to derive from concerns
457 * about pthread_cond_wait() on multiprocessor systems. Some commentary
458 * on the web casts doubt on whether these can/should occur.
459 *
460 * Since we're allowed to wake up "early", we clamp extremely long durations
461 * to return at the end of the 32-bit time epoch.
462 */
463static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
464 bool interruptShouldThrow)
465{
466 struct timespec ts;
467 bool wasInterrupted = false;
468 bool timed;
469 int cc;
470
471 /* Make sure that the lock is fat and that we hold it. */
472 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
473 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
474 "object not locked by thread before wait()");
475 return;
476 }
477
478 /*
479 * Enforce the timeout range.
480 */
481 if (msec < 0 || nsec < 0 || nsec > 999999) {
482 dvmThrowException("Ljava/lang/IllegalArgumentException;",
483 "timeout arguments out of range");
484 return;
485 }
486
487 /*
488 * Compute absolute wakeup time, if necessary.
489 */
490 if (msec == 0 && nsec == 0) {
491 timed = false;
492 } else {
493 s8 endSec;
494
495#ifdef HAVE_TIMEDWAIT_MONOTONIC
496 struct timespec now;
497 clock_gettime(CLOCK_MONOTONIC, &now);
498 endSec = now.tv_sec + msec / 1000;
499 if (endSec >= 0x7fffffff) {
500 LOGV("NOTE: end time exceeds epoch\n");
501 endSec = 0x7ffffffe;
502 }
503 ts.tv_sec = endSec;
504 ts.tv_nsec = (now.tv_nsec + (msec % 1000) * 1000 * 1000) + nsec;
505#else
506 struct timeval now;
507 gettimeofday(&now, NULL);
508 endSec = now.tv_sec + msec / 1000;
509 if (endSec >= 0x7fffffff) {
510 LOGV("NOTE: end time exceeds epoch\n");
511 endSec = 0x7ffffffe;
512 }
513 ts.tv_sec = endSec;
514 ts.tv_nsec = (now.tv_usec + (msec % 1000) * 1000) * 1000 + nsec;
515#endif
516
517 /* catch rollover */
518 if (ts.tv_nsec >= 1000000000L) {
519 ts.tv_sec++;
520 ts.tv_nsec -= 1000000000L;
521 }
522 timed = true;
523 }
524
525 /*
526 * Make sure "notifying" wasn't screwed up by earlier activity. If this
527 * is wrong we could end up waking up too many people. (This is a rare
528 * situation, but we need to handle it correctly.)
529 */
530 if (mon->notifying + mon->interrupting > mon->waiting) {
531 LOGD("threadid=%d: bogus mon %d+%d>%d; adjusting\n",
532 self->threadId, mon->notifying, mon->interrupting,
533 mon->waiting);
534
535 assert(mon->waiting >= mon->interrupting);
536 mon->notifying = mon->waiting - mon->interrupting;
537 }
538
539 /*
540 * Add ourselves to the set of threads waiting on this monitor, and
541 * release our hold. We need to let it go even if we're a few levels
542 * deep in a recursive lock, and we need to restore that later.
543 *
544 * The order of operations here isn't significant, because we still
545 * hold the pthread mutex.
546 */
547 int prevLockCount;
548
549 prevLockCount = mon->lockCount;
550 mon->lockCount = 0;
551 mon->waiting++;
552 mon->owner = NULL;
553
554 /*
555 * Update thread status. If the GC wakes up, it'll ignore us, knowing
556 * that we won't touch any references in this state, and we'll check
557 * our suspend mode before we transition out.
558 */
559 if (timed)
560 dvmChangeStatus(self, THREAD_TIMED_WAIT);
561 else
562 dvmChangeStatus(self, THREAD_WAIT);
563
564 /*
565 * Tell the thread which monitor we're waiting on. This is necessary
566 * so that Thread.interrupt() can wake us up. Thread.interrupt needs
567 * to gain ownership of the monitor mutex before it can signal us, so
568 * we're still not worried about race conditions.
569 */
570 self->waitMonitor = mon;
571
572 /*
573 * Handle the case where the thread was interrupted before we called
574 * wait().
575 */
576 if (self->interrupted) {
577 wasInterrupted = true;
578 goto done;
579 }
580
581 LOGVV("threadid=%d: waiting on %p\n", self->threadId, mon);
582
583 while (true) {
584 if (!timed) {
585 cc = pthread_cond_wait(&mon->cond, &mon->lock);
586 assert(cc == 0);
587 } else {
588#ifdef HAVE_TIMEDWAIT_MONOTONIC
589 cc = pthread_cond_timedwait_monotonic(&mon->cond, &mon->lock, &ts);
590#else
591 cc = pthread_cond_timedwait(&mon->cond, &mon->lock, &ts);
592#endif
593 if (cc == ETIMEDOUT) {
594 LOGVV("threadid=%d wakeup: timeout\n", self->threadId);
595 break;
596 }
597 }
598
599 /*
600 * We woke up because of an interrupt (which does a broadcast) or
601 * a notification (which might be a signal or a broadcast). Figure
602 * out what we need to do.
603 */
604 if (self->interruptingWait) {
605 /*
606 * The other thread successfully gained the monitor lock, and
607 * has confirmed that we were waiting on it. If this is an
608 * interruptible wait, we bail out immediately. If not, we
609 * continue on.
610 */
611 self->interruptingWait = false;
612 mon->interrupting--;
613 assert(self->interrupted);
614 if (interruptShouldThrow) {
615 wasInterrupted = true;
616 LOGD("threadid=%d wakeup: interrupted\n", self->threadId);
617 break;
618 } else {
619 LOGD("threadid=%d wakeup: not interruptible\n", self->threadId);
620 }
621 }
622 if (mon->notifying) {
623 /*
624 * One or more threads are being notified. Remove ourselves
625 * from the set.
626 */
627 mon->notifying--;
628 LOGVV("threadid=%d wakeup: notified\n", self->threadId);
629 break;
630 } else {
631 /*
632 * Looks like we were woken unnecessarily, probably as a
633 * result of another thread being interrupted. Go back to
634 * sleep.
635 */
636 LOGVV("threadid=%d wakeup: going back to sleep\n", self->threadId);
637 }
638 }
639
640done:
641 //if (wasInterrupted) {
642 // LOGW("threadid=%d: throwing InterruptedException:\n", self->threadId);
643 // dvmDumpThread(self, false);
644 //}
645
646 /*
647 * Put everything back. Again, we hold the pthread mutex, so the order
648 * here isn't significant.
649 */
650 self->waitMonitor = NULL;
651 mon->owner = self;
652 mon->waiting--;
653 mon->lockCount = prevLockCount;
654
655 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
656 dvmChangeStatus(self, THREAD_RUNNING);
657
658 if (wasInterrupted) {
659 /*
660 * We were interrupted while waiting, or somebody interrupted an
661 * un-interruptable thread earlier and we're bailing out immediately.
662 *
663 * The doc sayeth: "The interrupted status of the current thread is
664 * cleared when this exception is thrown."
665 */
666 self->interrupted = false;
667 if (interruptShouldThrow)
668 dvmThrowException("Ljava/lang/InterruptedException;", NULL);
669 }
670}
671
672/*
673 * Notify one thread waiting on this monitor.
674 */
675static void notifyMonitor(Thread* self, Monitor* mon)
676{
677 /* Make sure that the lock is fat and that we hold it. */
678 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
679 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
680 "object not locked by thread before notify()");
681 return;
682 }
683
684 /*
685 * Check to see if anybody is there to notify. We subtract off
686 * threads that are being interrupted and anything that has
687 * potentially already been notified.
688 */
689 if (mon->notifying + mon->interrupting < mon->waiting) {
690 /* wake up one thread */
691 int cc;
692
693 LOGVV("threadid=%d: signaling on %p\n", self->threadId, mon);
694
695 mon->notifying++;
696 cc = pthread_cond_signal(&mon->cond);
697 assert(cc == 0);
698 } else {
699 LOGVV("threadid=%d: nobody to signal on %p\n", self->threadId, mon);
700 }
701}
702
703/*
704 * Notify all threads waiting on this monitor.
705 *
706 * We keep a count of how many threads we notified, so that our various
707 * counts remain accurate.
708 */
709static void notifyAllMonitor(Thread* self, Monitor* mon)
710{
711 /* Make sure that the lock is fat and that we hold it. */
712 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
713 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
714 "object not locked by thread before notifyAll()");
715 return;
716 }
717
718 mon->notifying = mon->waiting - mon->interrupting;
719 if (mon->notifying > 0) {
720 int cc;
721
722 LOGVV("threadid=%d: broadcasting to %d threads on %p\n",
723 self->threadId, mon->notifying, mon);
724
725 cc = pthread_cond_broadcast(&mon->cond);
726 assert(cc == 0);
727 } else {
728 LOGVV("threadid=%d: nobody to broadcast to on %p\n", self->threadId,mon);
729 }
730}
731
732#if THIN_LOCKING
733/*
734 * Thin locking support
735 */
736
737/*
738 * Implements monitorenter for "synchronized" stuff.
739 *
740 * This does not fail or throw an exception (unless deadlock prediction
741 * is enabled and set to "err" mode).
742 */
743void dvmLockObject(Thread* self, Object *obj)
744{
745 volatile u4 *thinp = &obj->lock.thin;
746 u4 threadId = self->threadId;
747
748 /* First, try to grab the lock as if it's thin;
749 * this is the common case and will usually succeed.
750 */
751 if (!ATOMIC_CMP_SWAP((int32_t *)thinp,
752 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
753 (int32_t)threadId)) {
754 /* The lock is either a thin lock held by someone (possibly 'self'),
755 * or a fat lock.
756 */
757 if ((*thinp & 0xffff) == threadId) {
758 /* 'self' is already holding the thin lock; we can just
759 * bump the count. Atomic operations are not necessary
760 * because only the thread holding the lock is allowed
761 * to modify the Lock field.
762 */
763 *thinp += 1<<16;
764 } else {
765 /* If this is a thin lock we need to spin on it, if it's fat
766 * we need to acquire the monitor.
767 */
768 if ((*thinp & 1) != 0) {
769 ThreadStatus oldStatus;
770 static const unsigned long maxSleepDelay = 1 * 1024 * 1024;
771 unsigned long sleepDelay;
772
773 LOG_THIN("(%d) spin on lock 0x%08x: 0x%08x (0x%08x) 0x%08x\n",
774 threadId, (uint)&obj->lock,
775 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
776
777 /* The lock is still thin, but some other thread is
778 * holding it. Let the VM know that we're about
779 * to wait on another thread.
780 */
781 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
782
783 /* Spin until the other thread lets go.
784 */
785 sleepDelay = 0;
786 do {
787 /* In addition to looking for an unlock,
788 * we need to watch out for some other thread
789 * fattening the lock behind our back.
790 */
791 while (*thinp != DVM_LOCK_INITIAL_THIN_VALUE) {
792 if ((*thinp & 1) == 0) {
793 /* The lock has been fattened already.
794 */
795 LOG_THIN("(%d) lock 0x%08x surprise-fattened\n",
796 threadId, (uint)&obj->lock);
797 dvmChangeStatus(self, oldStatus);
798 goto fat_lock;
799 }
800
801 if (sleepDelay == 0) {
802 sched_yield();
803 sleepDelay = 1 * 1000;
804 } else {
805 usleep(sleepDelay);
806 if (sleepDelay < maxSleepDelay / 2) {
807 sleepDelay *= 2;
808 }
809 }
810 }
811 } while (!ATOMIC_CMP_SWAP((int32_t *)thinp,
812 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
813 (int32_t)threadId));
814 LOG_THIN("(%d) spin on lock done 0x%08x: "
815 "0x%08x (0x%08x) 0x%08x\n",
816 threadId, (uint)&obj->lock,
817 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
818
819 /* We've got the thin lock; let the VM know that we're
820 * done waiting.
821 */
822 dvmChangeStatus(self, oldStatus);
823
824 /* Fatten the lock. Note this relinquishes ownership.
825 * We could also create the monitor in an "owned" state
826 * to avoid "re-locking" it in fat_lock.
827 */
828 obj->lock.mon = dvmCreateMonitor(obj);
829 LOG_THIN("(%d) lock 0x%08x fattened\n",
830 threadId, (uint)&obj->lock);
831
832 /* Fall through to acquire the newly fat lock.
833 */
834 }
835
836 /* The lock is already fat, which means
837 * that obj->lock.mon is a regular (Monitor *).
838 */
839 fat_lock:
840 assert(obj->lock.mon != NULL);
841 lockMonitor(self, obj->lock.mon);
842 }
843 }
844 // else, the lock was acquired with the ATOMIC_CMP_SWAP().
845
846#ifdef WITH_DEADLOCK_PREDICTION
847 /*
848 * See if we were allowed to grab the lock at this time. We do it
849 * *after* acquiring the lock, rather than before, so that we can
850 * freely update the Monitor struct. This seems counter-intuitive,
851 * but our goal is deadlock *prediction* not deadlock *prevention*.
852 * (If we actually deadlock, the situation is easy to diagnose from
853 * a thread dump, so there's no point making a special effort to do
854 * the checks before the lock is held.)
855 *
856 * This needs to happen before we add the object to the thread's
857 * monitor list, so we can tell the difference between first-lock and
858 * re-lock.
859 *
860 * It's also important that we do this while in THREAD_RUNNING, so
861 * that we don't interfere with cleanup operations in the GC.
862 */
863 if (gDvm.deadlockPredictMode != kDPOff) {
864 if (self->status != THREAD_RUNNING) {
865 LOGE("Bad thread status (%d) in DP\n", self->status);
866 dvmDumpThread(self, false);
867 dvmAbort();
868 }
869 assert(!dvmCheckException(self));
870 updateDeadlockPrediction(self, obj);
871 if (dvmCheckException(self)) {
872 /*
873 * If we're throwing an exception here, we need to free the
874 * lock. We add the object to the thread's monitor list so the
875 * "unlock" code can remove it.
876 */
877 dvmAddToMonitorList(self, obj, false);
878 dvmUnlockObject(self, obj);
879 LOGV("--- unlocked, pending is '%s'\n",
880 dvmGetException(self)->clazz->descriptor);
881 }
882 }
883
884 /*
885 * Add the locked object, and the current stack trace, to the list
886 * held by the Thread object. If deadlock prediction isn't on,
887 * don't capture the stack trace.
888 */
889 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
890#elif defined(WITH_MONITOR_TRACKING)
891 /*
892 * Add the locked object to the list held by the Thread object.
893 */
894 dvmAddToMonitorList(self, obj, false);
895#endif
896}
897
898/*
899 * Implements monitorexit for "synchronized" stuff.
900 *
901 * On failure, throws an exception and returns "false".
902 */
903bool dvmUnlockObject(Thread* self, Object *obj)
904{
905 volatile u4 *thinp = &obj->lock.thin;
906 u4 threadId = self->threadId;
907
908 /* Check the common case, where 'self' has locked 'obj' once, first.
909 */
910 if (*thinp == threadId) {
911 /* Unlock 'obj' by clearing our threadId from 'thin'.
912 * The lock protects the lock field itself, so it's
913 * safe to update non-atomically.
914 */
915 *thinp = DVM_LOCK_INITIAL_THIN_VALUE;
916 } else if ((*thinp & 1) != 0) {
917 /* If the object is locked, it had better be locked by us.
918 */
919 if ((*thinp & 0xffff) != threadId) {
920 /* The JNI spec says that we should throw an exception
921 * in this case.
922 */
923 //LOGW("Unlock thin %p: id %d vs %d\n",
924 // obj, (*thinp & 0xfff), threadId);
925 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
926 "unlock of unowned monitor");
927 return false;
928 }
929
930 /* It's a thin lock, but 'self' has locked 'obj'
931 * more than once. Decrement the count.
932 */
933 *thinp -= 1<<16;
934 } else {
935 /* It's a fat lock.
936 */
937 assert(obj->lock.mon != NULL);
938 if (!unlockMonitor(self, obj->lock.mon)) {
939 /* exception has been raised */
940 return false;
941 }
942 }
943
944#ifdef WITH_MONITOR_TRACKING
945 /*
946 * Remove the object from the Thread's list.
947 */
948 dvmRemoveFromMonitorList(self, obj);
949#endif
950
951 return true;
952}
953
954/*
955 * Object.wait(). Also called for class init.
956 */
957void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
958 bool interruptShouldThrow)
959{
960 Monitor* mon = obj->lock.mon;
961 u4 thin = obj->lock.thin;
962
963 /* If the lock is still thin, we need to fatten it.
964 */
965 if ((thin & 1) != 0) {
966 /* Make sure that 'self' holds the lock.
967 */
968 if ((thin & 0xffff) != self->threadId) {
969 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
970 "object not locked by thread before wait()");
971 return;
972 }
973
974 /* This thread holds the lock. We need to fatten the lock
975 * so 'self' can block on it. Don't update the object lock
976 * field yet, because 'self' needs to acquire the lock before
977 * any other thread gets a chance.
978 */
979 mon = dvmCreateMonitor(obj);
980
981 /* 'self' has actually locked the object one or more times;
982 * make sure that the monitor reflects this.
983 */
984 lockMonitor(self, mon);
985 mon->lockCount = thin >> 16;
986 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
987 self->threadId, (uint)&obj->lock, mon->lockCount);
988
Andy McFadden581bed72009-10-15 11:24:54 -0700989
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800990 /* Make the monitor public now that it's in the right state.
991 */
Andy McFadden581bed72009-10-15 11:24:54 -0700992 MEM_BARRIER();
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800993 obj->lock.mon = mon;
994 }
995
996 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
997}
998
999/*
1000 * Object.notify().
1001 */
1002void dvmObjectNotify(Thread* self, Object *obj)
1003{
1004 Monitor* mon = obj->lock.mon;
1005 u4 thin = obj->lock.thin;
1006
1007 /* If the lock is still thin, there aren't any waiters;
1008 * waiting on an object forces lock fattening.
1009 */
1010 if ((thin & 1) != 0) {
1011 /* Make sure that 'self' holds the lock.
1012 */
1013 if ((thin & 0xffff) != self->threadId) {
1014 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1015 "object not locked by thread before notify()");
1016 return;
1017 }
1018
1019 /* no-op; there are no waiters to notify.
1020 */
1021 } else {
1022 /* It's a fat lock.
1023 */
1024 notifyMonitor(self, mon);
1025 }
1026}
1027
1028/*
1029 * Object.notifyAll().
1030 */
1031void dvmObjectNotifyAll(Thread* self, Object *obj)
1032{
1033 u4 thin = obj->lock.thin;
1034
1035 /* If the lock is still thin, there aren't any waiters;
1036 * waiting on an object forces lock fattening.
1037 */
1038 if ((thin & 1) != 0) {
1039 /* Make sure that 'self' holds the lock.
1040 */
1041 if ((thin & 0xffff) != self->threadId) {
1042 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1043 "object not locked by thread before notifyAll()");
1044 return;
1045 }
1046
1047 /* no-op; there are no waiters to notify.
1048 */
1049 } else {
1050 Monitor* mon = obj->lock.mon;
1051
1052 /* It's a fat lock.
1053 */
1054 notifyAllMonitor(self, mon);
1055 }
1056}
1057
1058#else // not THIN_LOCKING
1059
1060/*
1061 * Implements monitorenter for "synchronized" stuff.
1062 *
1063 * This does not fail or throw an exception.
1064 */
1065void dvmLockObject(Thread* self, Object* obj)
1066{
1067 Monitor* mon = obj->lock.mon;
1068
1069 if (mon == NULL) {
1070 mon = dvmCreateMonitor(obj);
1071 if (!ATOMIC_CMP_SWAP((int32_t *)&obj->lock.mon,
1072 (int32_t)NULL, (int32_t)mon)) {
1073 /* somebody else beat us to it */
1074 releaseMonitor(mon);
1075 mon = obj->lock.mon;
1076 }
1077 }
1078
1079 lockMonitor(self, mon);
1080}
1081
1082/*
1083 * Implements monitorexit for "synchronized" stuff.
1084 */
1085bool dvmUnlockObject(Thread* self, Object* obj)
1086{
1087 Monitor* mon = obj->lock.mon;
1088
1089 return unlockMonitor(self, mon);
1090}
1091
1092
1093/*
1094 * Object.wait().
1095 */
1096void dvmObjectWait(Thread* self, Object* obj, u8 msec, u4 nsec)
1097{
1098 Monitor* mon = obj->lock.mon;
1099
1100 waitMonitor(self, mon, msec, nsec);
1101}
1102
1103/*
1104 * Object.notify().
1105 */
1106void dvmObjectNotify(Thread* self, Object* obj)
1107{
1108 Monitor* mon = obj->lock.mon;
1109
1110 notifyMonitor(self, mon);
1111}
1112
1113/*
1114 * Object.notifyAll().
1115 */
1116void dvmObjectNotifyAll(Thread* self, Object* obj)
1117{
1118 Monitor* mon = obj->lock.mon;
1119
1120 notifyAllMonitor(self, mon);
1121}
1122
1123#endif // not THIN_LOCKING
1124
1125
1126/*
1127 * This implements java.lang.Thread.sleep(long msec, int nsec).
1128 *
1129 * The sleep is interruptible by other threads, which means we can't just
1130 * plop into an OS sleep call. (We probably could if we wanted to send
1131 * signals around and rely on EINTR, but that's inefficient and relies
1132 * on native code respecting our signal mask.)
1133 *
1134 * We have to do all of this stuff for Object.wait() as well, so it's
1135 * easiest to just sleep on a private Monitor.
1136 *
1137 * It appears that we want sleep(0,0) to go through the motions of sleeping
1138 * for a very short duration, rather than just returning.
1139 */
1140void dvmThreadSleep(u8 msec, u4 nsec)
1141{
1142 Thread* self = dvmThreadSelf();
1143 Monitor* mon = gDvm.threadSleepMon;
1144
1145 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1146 if (msec == 0 && nsec == 0)
1147 nsec++;
1148
1149 lockMonitor(self, mon);
1150 waitMonitor(self, mon, msec, nsec, true);
1151 unlockMonitor(self, mon);
1152}
1153
1154/*
1155 * Implement java.lang.Thread.interrupt().
1156 *
1157 * We need to increment the monitor's "interrupting" count, and set the
1158 * interrupted status for the thread in question. Doing so requires
1159 * gaining the monitor's lock, which may not happen in a timely fashion.
1160 * We are left with a decision between failing to interrupt the thread
1161 * and stalling the interrupting thread.
1162 *
1163 * We must take some care to ensure that we don't try to interrupt the same
1164 * thread on the same mutex twice. Doing so would leave us with an
1165 * incorrect value for Monitor.interrupting.
1166 */
1167void dvmThreadInterrupt(volatile Thread* thread)
1168{
1169 Monitor* mon;
1170
1171 /*
1172 * Raise the "interrupted" flag. This will cause it to bail early out
1173 * of the next wait() attempt, if it's not currently waiting on
1174 * something.
1175 */
1176 thread->interrupted = true;
1177 MEM_BARRIER();
1178
1179 /*
1180 * Is the thread waiting?
1181 *
1182 * Note that fat vs. thin doesn't matter here; waitMonitor
1183 * is only set when a thread actually waits on a monitor,
1184 * which implies that the monitor has already been fattened.
1185 */
1186 mon = thread->waitMonitor;
1187 if (mon == NULL)
1188 return;
1189
1190 /*
1191 * Try to acquire the monitor, if we don't already own it. We need
1192 * to hold the same mutex as the thread in order to signal the
1193 * condition it's waiting on. When the thread goes to sleep it will
1194 * release the monitor's mutex, allowing us to signal it.
1195 *
1196 * TODO: we may be able to get rid of the explicit lock by coordinating
1197 * this more closely with waitMonitor.
1198 */
1199 Thread* self = dvmThreadSelf();
1200 if (!tryLockMonitor(self, mon)) {
1201 /*
1202 * Failed to get the monitor the thread is waiting on; most likely
1203 * the other thread is in the middle of doing something.
1204 */
1205 const int kSpinSleepTime = 500*1000; /* 0.5s */
1206 u8 startWhen = dvmGetRelativeTimeUsec();
1207 int sleepIter = 0;
1208
1209 while (dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) {
1210 /*
1211 * Still time left on the clock, try to grab it again.
1212 */
1213 if (tryLockMonitor(self, mon))
1214 goto gotit;
1215
1216 /*
1217 * If the target thread is no longer waiting on the same monitor,
1218 * the "interrupted" flag we set earlier will have caused the
1219 * interrupt when the thread woke up, so we can stop now.
1220 */
1221 if (thread->waitMonitor != mon)
1222 return;
1223 }
1224
1225 /*
1226 * We have to give up or risk deadlock.
1227 */
1228 LOGW("threadid=%d: unable to interrupt threadid=%d\n",
1229 self->threadId, thread->threadId);
1230 return;
1231 }
1232
1233gotit:
1234 /*
1235 * We've got the monitor lock, which means nobody can be added or
1236 * removed from the wait list. This also means that the Thread's
1237 * waitMonitor/interruptingWait fields can't be modified by anyone
1238 * else.
1239 *
1240 * If things look good, raise flags and wake the threads sleeping
1241 * on the monitor's condition variable.
1242 */
1243 if (thread->waitMonitor == mon && // still on same monitor?
1244 thread->interrupted && // interrupt still pending?
1245 !thread->interruptingWait) // nobody else is interrupting too?
1246 {
1247 int cc;
1248
1249 LOGVV("threadid=%d: interrupting threadid=%d waiting on %p\n",
1250 self->threadId, thread->threadId, mon);
1251
1252 thread->interruptingWait = true; // prevent re-interrupt...
1253 mon->interrupting++; // ...so we only do this once
1254 cc = pthread_cond_broadcast(&mon->cond);
1255 assert(cc == 0);
1256 }
1257
1258 unlockMonitor(self, mon);
1259}
1260
1261
1262#ifdef WITH_DEADLOCK_PREDICTION
1263/*
1264 * ===========================================================================
1265 * Deadlock prediction
1266 * ===========================================================================
1267 */
1268/*
1269The idea is to predict the possibility of deadlock by recording the order
1270in which monitors are acquired. If we see an attempt to acquire a lock
1271out of order, we can identify the locks and offending code.
1272
1273To make this work, we need to keep track of the locks held by each thread,
1274and create history trees for each lock. When a thread tries to acquire
1275a new lock, we walk through the "history children" of the lock, looking
1276for a match with locks the thread already holds. If we find a match,
1277it means the thread has made a request that could result in a deadlock.
1278
1279To support recursive locks, we always allow re-locking a currently-held
1280lock, and maintain a recursion depth count.
1281
1282An ASCII-art example, where letters represent Objects:
1283
1284 A
1285 /|\
1286 / | \
1287 B | D
1288 \ |
1289 \|
1290 C
1291
1292The above is the tree we'd have after handling Object synchronization
1293sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
1294a child of B. (The lines represent pointers between parent and child.
1295Every node can have multiple parents and multiple children.)
1296
1297If we hold AC, and want to lock B, we recursively search through B's
1298children to see if A or C appears. It does, so we reject the attempt.
1299(A straightforward way to implement it: add a link from C to B, then
1300determine whether the graph starting at B contains a cycle.)
1301
1302If we hold AC and want to lock D, we would succeed, creating a new link
1303from C to D.
1304
1305The lock history and a stack trace is attached to the Object's Monitor
1306struct, which means we need to fatten every Object we lock (thin locking
1307is effectively disabled). If we don't need the stack trace we can
1308avoid fattening the leaf nodes, only fattening objects that need to hold
1309history trees.
1310
1311Updates to Monitor structs are only allowed for the thread that holds
1312the Monitor, so we actually do most of our deadlock prediction work after
1313the lock has been acquired.
1314
1315When an object with a monitor is GCed, we need to remove it from the
1316history trees. There are two basic approaches:
1317 (1) For through the entire set of known monitors, search all child
1318 lists for the object in question. This is rather slow, resulting
1319 in GC passes that take upwards of 10 seconds to complete.
1320 (2) Maintain "parent" pointers in each node. Remove the entries as
1321 required. This requires additional storage and maintenance for
1322 every operation, but is significantly faster at GC time.
1323For each GCed object, we merge all of the object's children into each of
1324the object's parents.
1325*/
1326
1327#if !defined(WITH_MONITOR_TRACKING)
1328# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1329#endif
1330
1331/*
1332 * Clear out the contents of an ExpandingObjectList, freeing any
1333 * dynamic allocations.
1334 */
1335static void expandObjClear(ExpandingObjectList* pList)
1336{
1337 if (pList->list != NULL) {
1338 free(pList->list);
1339 pList->list = NULL;
1340 }
1341 pList->alloc = pList->count = 0;
1342}
1343
1344/*
1345 * Get the number of objects currently stored in the list.
1346 */
1347static inline int expandBufGetCount(const ExpandingObjectList* pList)
1348{
1349 return pList->count;
1350}
1351
1352/*
1353 * Get the Nth entry from the list.
1354 */
1355static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1356 int i)
1357{
1358 return pList->list[i];
1359}
1360
1361/*
1362 * Add a new entry to the list.
1363 *
1364 * We don't check for or try to enforce uniqueness. It's expected that
1365 * the higher-level code does this for us.
1366 */
1367static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1368{
1369 if (pList->count == pList->alloc) {
1370 /* time to expand */
1371 Object** newList;
1372
1373 if (pList->alloc == 0)
1374 pList->alloc = 4;
1375 else
1376 pList->alloc *= 2;
1377 LOGVV("expanding %p to %d\n", pList, pList->alloc);
1378 newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1379 if (newList == NULL) {
1380 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1381 dvmAbort();
1382 }
1383 pList->list = newList;
1384 }
1385
1386 pList->list[pList->count++] = obj;
1387}
1388
1389/*
1390 * Returns "true" if the element was successfully removed.
1391 */
1392static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1393{
1394 int i;
1395
1396 for (i = pList->count-1; i >= 0; i--) {
1397 if (pList->list[i] == obj)
1398 break;
1399 }
1400 if (i < 0)
1401 return false;
1402
1403 if (i != pList->count-1) {
1404 /*
1405 * The order of elements is not important, so we just copy the
1406 * last entry into the new slot.
1407 */
1408 //memmove(&pList->list[i], &pList->list[i+1],
1409 // (pList->count-1 - i) * sizeof(pList->list[0]));
1410 pList->list[i] = pList->list[pList->count-1];
1411 }
1412
1413 pList->count--;
1414 pList->list[pList->count] = (Object*) 0xdecadead;
1415 return true;
1416}
1417
1418/*
1419 * Returns "true" if "obj" appears in the list.
1420 */
1421static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1422{
1423 int i;
1424
1425 for (i = 0; i < pList->count; i++) {
1426 if (pList->list[i] == obj)
1427 return true;
1428 }
1429 return false;
1430}
1431
1432/*
1433 * Print the list contents to stdout. For debugging.
1434 */
1435static void expandObjDump(const ExpandingObjectList* pList)
1436{
1437 int i;
1438 for (i = 0; i < pList->count; i++)
1439 printf(" %p", pList->list[i]);
1440}
1441
1442/*
1443 * Check for duplicate entries. Returns the index of the first instance
1444 * of the duplicated value, or -1 if no duplicates were found.
1445 */
1446static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1447{
1448 int i, j;
1449 for (i = 0; i < pList->count-1; i++) {
1450 for (j = i + 1; j < pList->count; j++) {
1451 if (pList->list[i] == pList->list[j]) {
1452 return i;
1453 }
1454 }
1455 }
1456
1457 return -1;
1458}
1459
1460
1461/*
1462 * Determine whether "child" appears in the list of objects associated
1463 * with the Monitor in "parent". If "parent" is a thin lock, we return
1464 * false immediately.
1465 */
1466static bool objectInChildList(const Object* parent, Object* child)
1467{
1468 Lock lock = parent->lock;
1469 if (!IS_LOCK_FAT(&lock)) {
1470 //LOGI("on thin\n");
1471 return false;
1472 }
1473
1474 return expandObjHas(&lock.mon->historyChildren, child);
1475}
1476
1477/*
1478 * Print the child list.
1479 */
1480static void dumpKids(Object* parent)
1481{
1482 Monitor* mon = parent->lock.mon;
1483
1484 printf("Children of %p:", parent);
1485 expandObjDump(&mon->historyChildren);
1486 printf("\n");
1487}
1488
1489/*
1490 * Add "child" to the list of children in "parent", and add "parent" to
1491 * the list of parents in "child".
1492 */
1493static void linkParentToChild(Object* parent, Object* child)
1494{
1495 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for merge
1496 assert(IS_LOCK_FAT(&parent->lock));
1497 assert(IS_LOCK_FAT(&child->lock));
1498 assert(parent != child);
1499 Monitor* mon;
1500
1501 mon = parent->lock.mon;
1502 assert(!expandObjHas(&mon->historyChildren, child));
1503 expandObjAddEntry(&mon->historyChildren, child);
1504
1505 mon = child->lock.mon;
1506 assert(!expandObjHas(&mon->historyParents, parent));
1507 expandObjAddEntry(&mon->historyParents, parent);
1508}
1509
1510
1511/*
1512 * Remove "child" from the list of children in "parent".
1513 */
1514static void unlinkParentFromChild(Object* parent, Object* child)
1515{
1516 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for GC
1517 assert(IS_LOCK_FAT(&parent->lock));
1518 assert(IS_LOCK_FAT(&child->lock));
1519 assert(parent != child);
1520 Monitor* mon;
1521
1522 mon = parent->lock.mon;
1523 if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1524 LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1525 }
1526 assert(!expandObjHas(&mon->historyChildren, child));
1527 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1528
1529 mon = child->lock.mon;
1530 if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1531 LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1532 }
1533 assert(!expandObjHas(&mon->historyParents, parent));
1534 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1535}
1536
1537
1538/*
1539 * Log the monitors held by the current thread. This is done as part of
1540 * flagging an error.
1541 */
1542static void logHeldMonitors(Thread* self)
1543{
1544 char* name = NULL;
1545
1546 name = dvmGetThreadName(self);
1547 LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1548 self->threadId, name);
1549 LOGW("(most-recently-acquired on top):\n");
1550 free(name);
1551
1552 LockedObjectData* lod = self->pLockedObjects;
1553 while (lod != NULL) {
1554 LOGW("--- object %p[%d] (%s)\n",
1555 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1556 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1557
1558 lod = lod->next;
1559 }
1560}
1561
1562/*
1563 * Recursively traverse the object hierarchy starting at "obj". We mark
1564 * ourselves on entry and clear the mark on exit. If we ever encounter
1565 * a marked object, we have a cycle.
1566 *
1567 * Returns "true" if all is well, "false" if we found a cycle.
1568 */
1569static bool traverseTree(Thread* self, const Object* obj)
1570{
1571 assert(IS_LOCK_FAT(&obj->lock));
1572 Monitor* mon = obj->lock.mon;
1573
1574 /*
1575 * Have we been here before?
1576 */
1577 if (mon->historyMark) {
1578 int* rawStackTrace;
1579 int stackDepth;
1580
1581 LOGW("%s\n", kStartBanner);
1582 LOGW("Illegal lock attempt:\n");
1583 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1584
1585 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1586 dvmLogRawStackTrace(rawStackTrace, stackDepth);
1587 free(rawStackTrace);
1588
1589 LOGW(" ");
1590 logHeldMonitors(self);
1591
1592 LOGW(" ");
1593 LOGW("Earlier, the following lock order (from last to first) was\n");
1594 LOGW("established -- stack trace is from first successful lock):\n");
1595 return false;
1596 }
1597 mon->historyMark = true;
1598
1599 /*
1600 * Examine the children. We do NOT hold these locks, so they might
1601 * very well transition from thin to fat or change ownership while
1602 * we work.
1603 *
1604 * NOTE: we rely on the fact that they cannot revert from fat to thin
1605 * while we work. This is currently a safe assumption.
1606 *
1607 * We can safely ignore thin-locked children, because by definition
1608 * they have no history and are leaf nodes. In the current
1609 * implementation we always fatten the locks to provide a place to
1610 * hang the stack trace.
1611 */
1612 ExpandingObjectList* pList = &mon->historyChildren;
1613 int i;
1614 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1615 const Object* child = expandBufGetEntry(pList, i);
1616 Lock lock = child->lock;
1617 if (!IS_LOCK_FAT(&lock))
1618 continue;
1619 if (!traverseTree(self, child)) {
1620 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1621 dvmLogRawStackTrace(mon->historyRawStackTrace,
1622 mon->historyStackDepth);
1623 mon->historyMark = false;
1624 return false;
1625 }
1626 }
1627
1628 mon->historyMark = false;
1629
1630 return true;
1631}
1632
1633/*
1634 * Update the deadlock prediction tree, based on the current thread
1635 * acquiring "acqObj". This must be called before the object is added to
1636 * the thread's list of held monitors.
1637 *
1638 * If the thread already holds the lock (recursion), or this is a known
1639 * lock configuration, we return without doing anything. Otherwise, we add
1640 * a link from the most-recently-acquired lock in this thread to "acqObj"
1641 * after ensuring that the parent lock is "fat".
1642 *
1643 * This MUST NOT be called while a GC is in progress in another thread,
1644 * because we assume exclusive access to history trees in owned monitors.
1645 */
1646static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1647{
1648 LockedObjectData* lod;
1649 LockedObjectData* mrl;
1650
1651 /*
1652 * Quick check for recursive access.
1653 */
1654 lod = dvmFindInMonitorList(self, acqObj);
1655 if (lod != NULL) {
1656 LOGV("+++ DP: recursive %p\n", acqObj);
1657 return;
1658 }
1659
1660 /*
1661 * Make the newly-acquired object's monitor "fat". In some ways this
1662 * isn't strictly necessary, but we need the GC to tell us when
1663 * "interesting" objects go away, and right now the only way to make
1664 * an object look interesting is to give it a monitor.
1665 *
1666 * This also gives us a place to hang a stack trace.
1667 *
1668 * Our thread holds the lock, so we're allowed to rewrite the lock
1669 * without worrying that something will change out from under us.
1670 */
1671 if (!IS_LOCK_FAT(&acqObj->lock)) {
1672 LOGVV("fattening lockee %p (recur=%d)\n",
1673 acqObj, acqObj->lock.thin >> 16);
1674 Monitor* newMon = dvmCreateMonitor(acqObj);
1675 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1676 newMon->lockCount += acqObj->lock.thin >> 16;
1677 acqObj->lock.mon = newMon;
1678 }
1679
1680 /* if we don't have a stack trace for this monitor, establish one */
1681 if (acqObj->lock.mon->historyRawStackTrace == NULL) {
1682 Monitor* mon = acqObj->lock.mon;
1683 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1684 &mon->historyStackDepth);
1685 }
1686
1687 /*
1688 * We need to examine and perhaps modify the most-recently-locked
1689 * monitor. We own that, so there's no risk of another thread
1690 * stepping on us.
1691 *
1692 * Retrieve the most-recently-locked entry from our thread.
1693 */
1694 mrl = self->pLockedObjects;
1695 if (mrl == NULL)
1696 return; /* no other locks held */
1697
1698 /*
1699 * Do a quick check to see if "acqObj" is a direct descendant. We can do
1700 * this without holding the global lock because of our assertion that
1701 * a GC is not running in parallel -- nobody except the GC can
1702 * modify a history list in a Monitor they don't own, and we own "mrl".
1703 * (There might be concurrent *reads*, but no concurrent *writes.)
1704 *
1705 * If we find it, this is a known good configuration, and we're done.
1706 */
1707 if (objectInChildList(mrl->obj, acqObj))
1708 return;
1709
1710 /*
1711 * "mrl" is going to need to have a history tree. If it's currently
1712 * a thin lock, we make it fat now. The thin lock might have a
1713 * nonzero recursive lock count, which we need to carry over.
1714 *
1715 * Our thread holds the lock, so we're allowed to rewrite the lock
1716 * without worrying that something will change out from under us.
1717 */
1718 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1719 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1720 mrl->obj, acqObj, mrl->obj->lock.thin >> 16);
1721 Monitor* newMon = dvmCreateMonitor(mrl->obj);
1722 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1723 newMon->lockCount += mrl->obj->lock.thin >> 16;
1724 mrl->obj->lock.mon = newMon;
1725 }
1726
1727 /*
1728 * We haven't seen this configuration before. We need to scan down
1729 * acqObj's tree to see if any of the monitors in self->pLockedObjects
1730 * appear. We grab a global lock before traversing or updating the
1731 * history list.
1732 *
1733 * If we find a match for any of our held locks, we know that the lock
1734 * has previously been acquired *after* acqObj, and we throw an error.
1735 *
1736 * The easiest way to do this is to create a link from "mrl" to "acqObj"
1737 * and do a recursive traversal, marking nodes as we cross them. If
1738 * we cross one a second time, we have a cycle and can throw an error.
1739 * (We do the flag-clearing traversal before adding the new link, so
1740 * that we're guaranteed to terminate.)
1741 *
1742 * If "acqObj" is a thin lock, it has no history, and we can create a
1743 * link to it without additional checks. [ We now guarantee that it's
1744 * always fat. ]
1745 */
1746 bool failed = false;
1747 dvmLockMutex(&gDvm.deadlockHistoryLock);
1748 linkParentToChild(mrl->obj, acqObj);
1749 if (!traverseTree(self, acqObj)) {
1750 LOGW("%s\n", kEndBanner);
1751 failed = true;
1752
1753 /* remove the entry so we're still okay when in "warning" mode */
1754 unlinkParentFromChild(mrl->obj, acqObj);
1755 }
1756 dvmUnlockMutex(&gDvm.deadlockHistoryLock);
1757
1758 if (failed) {
1759 switch (gDvm.deadlockPredictMode) {
1760 case kDPErr:
1761 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
1762 break;
1763 case kDPAbort:
1764 LOGE("Aborting due to potential deadlock\n");
1765 dvmAbort();
1766 break;
1767 default:
1768 /* warn only */
1769 break;
1770 }
1771 }
1772}
1773
1774/*
1775 * We're removing "child" from existence. We want to pull all of
1776 * child's children into "parent", filtering out duplicates. This is
1777 * called during the GC.
1778 *
1779 * This does not modify "child", which might have multiple parents.
1780 */
1781static void mergeChildren(Object* parent, const Object* child)
1782{
1783 Monitor* mon;
1784 int i;
1785
1786 assert(IS_LOCK_FAT(&child->lock));
1787 mon = child->lock.mon;
1788 ExpandingObjectList* pList = &mon->historyChildren;
1789
1790 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1791 Object* grandChild = expandBufGetEntry(pList, i);
1792
1793 if (!objectInChildList(parent, grandChild)) {
1794 LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
1795 linkParentToChild(parent, grandChild);
1796 } else {
1797 LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
1798 }
1799 }
1800}
1801
1802/*
1803 * An object with a fat lock is being collected during a GC pass. We
1804 * want to remove it from any lock history trees that it is a part of.
1805 *
1806 * This may require updating the history trees in several monitors. The
1807 * monitor semantics guarantee that no other thread will be accessing
1808 * the history trees at the same time.
1809 */
1810static void removeCollectedObject(Object* obj)
1811{
1812 Monitor* mon;
1813
1814 LOGVV("+++ collecting %p\n", obj);
1815
1816#if 0
1817 /*
1818 * We're currently running through the entire set of known monitors.
1819 * This can be somewhat slow. We may want to keep lists of parents
1820 * in each child to speed up GC.
1821 */
1822 mon = gDvm.monitorList;
1823 while (mon != NULL) {
1824 Object* parent = mon->obj;
1825 if (parent != NULL) { /* value nulled for deleted entries */
1826 if (objectInChildList(parent, obj)) {
1827 LOGVV("removing child %p from parent %p\n", obj, parent);
1828 unlinkParentFromChild(parent, obj);
1829 mergeChildren(parent, obj);
1830 }
1831 }
1832 mon = mon->next;
1833 }
1834#endif
1835
1836 /*
1837 * For every parent of this object:
1838 * - merge all of our children into the parent's child list (creates
1839 * a two-way link between parent and child)
1840 * - remove ourselves from the parent's child list
1841 */
1842 ExpandingObjectList* pList;
1843 int i;
1844
1845 assert(IS_LOCK_FAT(&obj->lock));
1846 mon = obj->lock.mon;
1847 pList = &mon->historyParents;
1848 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1849 Object* parent = expandBufGetEntry(pList, i);
1850 Monitor* parentMon = parent->lock.mon;
1851
1852 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
1853 LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
1854 }
1855 assert(!expandObjHas(&parentMon->historyChildren, obj));
1856
1857 mergeChildren(parent, obj);
1858 }
1859
1860 /*
1861 * For every child of this object:
1862 * - remove ourselves from the child's parent list
1863 */
1864 pList = &mon->historyChildren;
1865 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1866 Object* child = expandBufGetEntry(pList, i);
1867 Monitor* childMon = child->lock.mon;
1868
1869 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
1870 LOGW("WARNING: parent %p not found in child %p\n", obj, child);
1871 }
1872 assert(!expandObjHas(&childMon->historyParents, obj));
1873 }
1874}
1875
1876#endif /*WITH_DEADLOCK_PREDICTION*/
1877