blob: bfb93fe1c99cf303fbbc2dd4f6b967c6c0c32ed7 [file] [log] [blame]
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Fundamental synchronization mechanisms.
18 *
19 * The top part of the file has operations on "monitor" structs; the
20 * next part has the native calls on objects.
21 *
22 * The current implementation uses "thin locking" to avoid allocating
23 * an Object's full Monitor struct until absolutely necessary (i.e.,
24 * during contention or a call to wait()).
25 *
26 * TODO: make improvements to thin locking
27 * We may be able to improve performance and reduce memory requirements by:
28 * - reverting to a thin lock once the Monitor is no longer necessary
29 * - using a pool of monitor objects, with some sort of recycling scheme
30 *
31 * TODO: recycle native-level monitors when objects are garbage collected.
32 *
33 * NOTE: if we broadcast a notify, and somebody sneaks in a Thread.interrupt
34 * before the notify finishes (i.e. before all threads sleeping on the
35 * condition variable have awoken), we could end up with a nonzero value for
36 * "notifying" after everybody is gone because one of the notified threads
37 * will actually exit via the "interrupted" path. This can be detected as
38 * (notifying + interrupting > waiting), i.e. the number of threads that need
39 * to be woken is greater than the number waiting. The fix is to test and
40 * adjust "notifying" at the start of the wait() call.
41 * -> This is probably not a problem if we notify less than the full set
42 * before the interrupt comes in. If we have four waiters, two pending
43 * notifies, and an interrupt hits, we will interrupt one thread and notify
44 * two others. Doesn't matter if the interrupted thread would have been
45 * one of the notified. Count is only screwed up if we have two waiters,
46 * in which case it's safe to fix it at the start of the next wait().
47 */
48#include "Dalvik.h"
49
50#include <stdlib.h>
51#include <unistd.h>
52#include <pthread.h>
53#include <time.h>
54#include <sys/time.h>
55#include <errno.h>
56
57#define LOG_THIN LOGV
58
59#ifdef WITH_DEADLOCK_PREDICTION /* fwd */
60static const char* kStartBanner =
61 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
62static const char* kEndBanner =
63 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
64
65/*
66 * Unsorted, expanding list of objects.
67 *
68 * This is very similar to PointerSet (which came into existence after this),
69 * but these are unsorted, uniqueness is not enforced by the "add" function,
70 * and the base object isn't allocated on the heap.
71 */
72typedef struct ExpandingObjectList {
73 u2 alloc;
74 u2 count;
75 Object** list;
76} ExpandingObjectList;
77
78/* fwd */
79static void updateDeadlockPrediction(Thread* self, Object* obj);
80static void removeCollectedObject(Object* obj);
81static void expandObjClear(ExpandingObjectList* pList);
82#endif
83
84/*
85 * Every Object has a monitor associated with it, but not every Object is
86 * actually locked. Even the ones that are locked do not need a
87 * full-fledged monitor until a) there is actual contention or b) wait()
88 * is called on the Object.
89 *
90 * For Dalvik, we have implemented a scheme similar to the one described
91 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
92 * (ACM 1998). Things are even easier for us, though, because we have
93 * a full 32 bits to work with.
94 *
95 * The two states that an Object's lock may have are referred to as
96 * "thin" and "fat". The lock may transition between the two states
97 * for various reasons.
98 *
99 * The lock value itself is stored in Object.lock, which is a union of
100 * the form:
101 *
102 * typedef union Lock {
103 * u4 thin;
104 * Monitor* mon;
105 * } Lock;
106 *
107 * It is possible to tell the current state of the lock from the actual
108 * value, so we do not need to store any additional state. When the
109 * lock is "thin", it has the form:
110 *
111 * [31 ---- 16] [15 ---- 1] [0]
112 * lock count thread id 1
113 *
114 * When it is "fat", the field is simply a (Monitor *). Since the pointer
115 * will always be 4-byte-aligned, bits 1 and 0 will always be zero when
116 * the field holds a pointer. Hence, we can tell the current fat-vs-thin
117 * state by checking the least-significant bit.
118 *
119 * For an in-depth description of the mechanics of thin-vs-fat locking,
120 * read the paper referred to above.
121 *
122 * To reduce the amount of work when attempting a compare and exchange,
123 * Thread.threadId is guaranteed to have bit 0 set, and all new Objects
124 * have their lock fields initialized to the value 0x1, or
125 * DVM_LOCK_INITIAL_THIN_VALUE, via DVM_OBJECT_INIT().
126 */
127
128/*
129 * Monitors provide:
130 * - mutually exclusive access to resources
131 * - a way for multiple threads to wait for notification
132 *
133 * In effect, they fill the role of both mutexes and condition variables.
134 *
135 * Only one thread can own the monitor at any time. There may be several
136 * threads waiting on it (the wait call unlocks it). One or more waiting
137 * threads may be getting interrupted or notified at any given time.
138 */
139struct Monitor {
140 Thread* owner; /* which thread currently owns the lock? */
141 int lockCount; /* owner's recursive lock depth */
142 Object* obj; /* what object are we part of [debug only] */
143
144 int waiting; /* total #of threads waiting on this */
145 int notifying; /* #of threads being notified */
146 int interrupting; /* #of threads being interrupted */
147
148 pthread_mutex_t lock;
149 pthread_cond_t cond;
150
151 Monitor* next;
152
153#ifdef WITH_DEADLOCK_PREDICTION
154 /*
155 * Objects that have been locked immediately after this one in the
156 * past. We use an expanding flat array, allocated on first use, to
157 * minimize allocations. Deletions from the list, expected to be
158 * infrequent, are crunched down.
159 */
160 ExpandingObjectList historyChildren;
161
162 /*
163 * We also track parents. This isn't strictly necessary, but it makes
164 * the cleanup at GC time significantly faster.
165 */
166 ExpandingObjectList historyParents;
167
168 /* used during cycle detection */
169 bool historyMark;
170
171 /* stack trace, established the first time we locked the object */
172 int historyStackDepth;
173 int* historyRawStackTrace;
174#endif
175};
176
177
178/*
179 * Create and initialize a monitor.
180 */
181Monitor* dvmCreateMonitor(Object* obj)
182{
183 Monitor* mon;
184
185 mon = (Monitor*) calloc(1, sizeof(Monitor));
186 if (mon == NULL) {
187 LOGE("Unable to allocate monitor\n");
188 dvmAbort();
189 }
190 mon->obj = obj;
191 dvmInitMutex(&mon->lock);
192 pthread_cond_init(&mon->cond, NULL);
193
194 /* replace the head of the list with the new monitor */
195 do {
196 mon->next = gDvm.monitorList;
197 } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
198 (int32_t)mon->next, (int32_t)mon));
199
200 return mon;
201}
202
203/*
204 * Release a Monitor.
205 */
206static void releaseMonitor(Monitor* mon)
207{
208 // TODO
209}
210
211/*
212 * Free the monitor list. Only used when shutting the VM down.
213 */
214void dvmFreeMonitorList(void)
215{
216 Monitor* mon;
217 Monitor* nextMon;
218
219 mon = gDvm.monitorList;
220 while (mon != NULL) {
221 nextMon = mon->next;
222
223#ifdef WITH_DEADLOCK_PREDICTION
224 expandObjClear(&mon->historyChildren);
225 expandObjClear(&mon->historyParents);
226 free(mon->historyRawStackTrace);
227#endif
228 free(mon);
229 mon = nextMon;
230 }
231}
232
233/*
234 * Log some info about our monitors.
235 */
236void dvmDumpMonitorInfo(const char* msg)
237{
238#if QUIET_ZYGOTE_MONITOR
239 if (gDvm.zygote) {
240 return;
241 }
242#endif
243
244 int totalCount;
245 int liveCount;
246
247 totalCount = liveCount = 0;
248 Monitor* mon = gDvm.monitorList;
249 while (mon != NULL) {
250 totalCount++;
251 if (mon->obj != NULL)
252 liveCount++;
253 mon = mon->next;
254 }
255
256 LOGD("%s: monitor list has %d entries (%d live)\n",
257 msg, totalCount, liveCount);
258}
259
260/*
261 * Get the object that a monitor is part of.
262 */
263Object* dvmGetMonitorObject(Monitor* mon)
264{
265 if (mon == NULL)
266 return NULL;
267 else
268 return mon->obj;
269}
270
271/*
272 * Checks whether the given thread holds the given
273 * objects's lock.
274 */
275bool dvmHoldsLock(Thread* thread, Object* obj)
276{
277 if (thread == NULL || obj == NULL) {
278 return false;
279 }
280
281 /* Since we're reading the lock value multiple times,
282 * latch it so that it doesn't change out from under
283 * us if we get preempted.
284 */
285 Lock lock = obj->lock;
286 if (IS_LOCK_FAT(&lock)) {
287 return thread == lock.mon->owner;
288 } else {
289 return thread->threadId == (lock.thin & 0xffff);
290 }
291}
292
293/*
294 * Free the monitor associated with an object and make the object's lock
295 * thin again. This is called during garbage collection.
296 */
297void dvmFreeObjectMonitor_internal(Lock *lock)
298{
299 Monitor *mon;
300
301 /* The macro that wraps this function checks IS_LOCK_FAT() first.
302 */
303 assert(IS_LOCK_FAT(lock));
304
305#ifdef WITH_DEADLOCK_PREDICTION
306 if (gDvm.deadlockPredictMode != kDPOff)
307 removeCollectedObject(lock->mon->obj);
308#endif
309
310 mon = lock->mon;
311 lock->thin = DVM_LOCK_INITIAL_THIN_VALUE;
312
313 /* This lock is associated with an object
314 * that's being swept. The only possible way
315 * anyone could be holding this lock would be
316 * if some JNI code locked but didn't unlock
317 * the object, in which case we've got some bad
318 * native code somewhere.
319 */
320 assert(pthread_mutex_trylock(&mon->lock) == 0);
321 pthread_mutex_destroy(&mon->lock);
322 pthread_cond_destroy(&mon->cond);
323#if 1
324//TODO: unlink from the monitor list (would require a lock)
325// (might not -- the GC suspension may be enough)
326 {
327 Monitor *next;
328 next = mon->next;
329#ifdef WITH_DEADLOCK_PREDICTION
330 expandObjClear(&mon->historyChildren);
331 expandObjClear(&mon->historyParents);
332 free(mon->historyRawStackTrace);
333#endif
334 memset(mon, 0, sizeof (*mon));
335 mon->next = next;
336 }
337//free(mon);
338#endif
339}
340
341
342/*
343 * Lock a monitor.
344 */
345static void lockMonitor(Thread* self, Monitor* mon)
346{
347 int cc;
348
349 if (mon->owner == self) {
350 mon->lockCount++;
351 } else {
352 ThreadStatus oldStatus;
353
354 if (pthread_mutex_trylock(&mon->lock) != 0) {
355 /* mutex is locked, switch to wait status and sleep on it */
356 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
357 cc = pthread_mutex_lock(&mon->lock);
358 assert(cc == 0);
359 dvmChangeStatus(self, oldStatus);
360 }
361
362 mon->owner = self;
363 assert(mon->lockCount == 0);
364
365 /*
366 * "waiting", "notifying", and "interrupting" could all be nonzero
367 * if we're locking an object on which other threads are waiting.
368 * Nothing worth assert()ing about here.
369 */
370 }
371}
372
373/*
374 * Try to lock a monitor.
375 *
376 * Returns "true" on success.
377 */
378static bool tryLockMonitor(Thread* self, Monitor* mon)
379{
380 int cc;
381
382 if (mon->owner == self) {
383 mon->lockCount++;
384 return true;
385 } else {
386 cc = pthread_mutex_trylock(&mon->lock);
387 if (cc == 0) {
388 mon->owner = self;
389 assert(mon->lockCount == 0);
390 return true;
391 } else {
392 return false;
393 }
394 }
395}
396
397
398/*
399 * Unlock a monitor.
400 *
401 * Returns true if the unlock succeeded.
402 * If the unlock failed, an exception will be pending.
403 */
404static bool unlockMonitor(Thread* self, Monitor* mon)
405{
406 assert(mon != NULL); // can this happen?
407
408 if (mon->owner == self) {
409 /*
410 * We own the monitor, so nobody else can be in here.
411 */
412 if (mon->lockCount == 0) {
413 int cc;
414 mon->owner = NULL;
415 cc = pthread_mutex_unlock(&mon->lock);
416 assert(cc == 0);
417 } else {
418 mon->lockCount--;
419 }
420 } else {
421 /*
422 * We don't own this, so we're not allowed to unlock it.
423 * The JNI spec says that we should throw IllegalMonitorStateException
424 * in this case.
425 */
426 if (mon->owner == NULL) {
427 //LOGW("Unlock fat %p: not owned\n", mon->obj);
428 } else {
429 //LOGW("Unlock fat %p: id %d vs %d\n",
430 // mon->obj, mon->owner->threadId, self->threadId);
431 }
432 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
433 "unlock of unowned monitor");
434 return false;
435 }
436 return true;
437}
438
439/*
440 * Wait on a monitor until timeout, interrupt, or notification. Used for
441 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
442 *
443 * If another thread calls Thread.interrupt(), we throw InterruptedException
444 * and return immediately if one of the following are true:
445 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
446 * - blocked in join(), join(long), or join(long, int) methods of Thread
447 * - blocked in sleep(long), or sleep(long, int) methods of Thread
448 * Otherwise, we set the "interrupted" flag.
449 *
450 * Checks to make sure that "nsec" is in the range 0-999999
451 * (i.e. fractions of a millisecond) and throws the appropriate
452 * exception if it isn't.
453 *
454 * The spec allows "spurious wakeups", and recommends that all code using
455 * Object.wait() do so in a loop. This appears to derive from concerns
456 * about pthread_cond_wait() on multiprocessor systems. Some commentary
457 * on the web casts doubt on whether these can/should occur.
458 *
459 * Since we're allowed to wake up "early", we clamp extremely long durations
460 * to return at the end of the 32-bit time epoch.
461 */
462static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
463 bool interruptShouldThrow)
464{
465 struct timespec ts;
466 bool wasInterrupted = false;
467 bool timed;
468 int cc;
469
470 /* Make sure that the lock is fat and that we hold it. */
471 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
472 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
473 "object not locked by thread before wait()");
474 return;
475 }
476
477 /*
478 * Enforce the timeout range.
479 */
480 if (msec < 0 || nsec < 0 || nsec > 999999) {
481 dvmThrowException("Ljava/lang/IllegalArgumentException;",
482 "timeout arguments out of range");
483 return;
484 }
485
486 /*
487 * Compute absolute wakeup time, if necessary.
488 */
489 if (msec == 0 && nsec == 0) {
490 timed = false;
491 } else {
492 s8 endSec;
493
494#ifdef HAVE_TIMEDWAIT_MONOTONIC
495 struct timespec now;
496 clock_gettime(CLOCK_MONOTONIC, &now);
497 endSec = now.tv_sec + msec / 1000;
498 if (endSec >= 0x7fffffff) {
499 LOGV("NOTE: end time exceeds epoch\n");
500 endSec = 0x7ffffffe;
501 }
502 ts.tv_sec = endSec;
503 ts.tv_nsec = (now.tv_nsec + (msec % 1000) * 1000 * 1000) + nsec;
504#else
505 struct timeval now;
506 gettimeofday(&now, NULL);
507 endSec = now.tv_sec + msec / 1000;
508 if (endSec >= 0x7fffffff) {
509 LOGV("NOTE: end time exceeds epoch\n");
510 endSec = 0x7ffffffe;
511 }
512 ts.tv_sec = endSec;
513 ts.tv_nsec = (now.tv_usec + (msec % 1000) * 1000) * 1000 + nsec;
514#endif
515
516 /* catch rollover */
517 if (ts.tv_nsec >= 1000000000L) {
518 ts.tv_sec++;
519 ts.tv_nsec -= 1000000000L;
520 }
521 timed = true;
522 }
523
524 /*
525 * Make sure "notifying" wasn't screwed up by earlier activity. If this
526 * is wrong we could end up waking up too many people. (This is a rare
527 * situation, but we need to handle it correctly.)
528 */
529 if (mon->notifying + mon->interrupting > mon->waiting) {
530 LOGI("threadid=%d: bogus mon %d+%d>%d; adjusting\n",
531 self->threadId, mon->notifying, mon->interrupting,
532 mon->waiting);
533
534 assert(mon->waiting >= mon->interrupting);
535 mon->notifying = mon->waiting - mon->interrupting;
536 }
537
538 /*
539 * Add ourselves to the set of threads waiting on this monitor, and
540 * release our hold. We need to let it go even if we're a few levels
541 * deep in a recursive lock, and we need to restore that later.
542 *
543 * The order of operations here isn't significant, because we still
544 * hold the pthread mutex.
545 */
546 int prevLockCount;
547
548 prevLockCount = mon->lockCount;
549 mon->lockCount = 0;
550 mon->waiting++;
551 mon->owner = NULL;
552
553 /*
554 * Update thread status. If the GC wakes up, it'll ignore us, knowing
555 * that we won't touch any references in this state, and we'll check
556 * our suspend mode before we transition out.
557 */
558 if (timed)
559 dvmChangeStatus(self, THREAD_TIMED_WAIT);
560 else
561 dvmChangeStatus(self, THREAD_WAIT);
562
563 /*
564 * Tell the thread which monitor we're waiting on. This is necessary
565 * so that Thread.interrupt() can wake us up. Thread.interrupt needs
566 * to gain ownership of the monitor mutex before it can signal us, so
567 * we're still not worried about race conditions.
568 */
569 self->waitMonitor = mon;
570
571 /*
572 * Handle the case where the thread was interrupted before we called
573 * wait().
574 */
575 if (self->interrupted) {
576 wasInterrupted = true;
577 goto done;
578 }
579
580 LOGVV("threadid=%d: waiting on %p\n", self->threadId, mon);
581
582 while (true) {
583 if (!timed) {
584 cc = pthread_cond_wait(&mon->cond, &mon->lock);
585 assert(cc == 0);
586 } else {
587#ifdef HAVE_TIMEDWAIT_MONOTONIC
588 cc = pthread_cond_timedwait_monotonic(&mon->cond, &mon->lock, &ts);
589#else
590 cc = pthread_cond_timedwait(&mon->cond, &mon->lock, &ts);
591#endif
592 if (cc == ETIMEDOUT) {
593 LOGVV("threadid=%d wakeup: timeout\n", self->threadId);
594 break;
595 }
596 }
597
598 /*
599 * We woke up because of an interrupt (which does a broadcast) or
600 * a notification (which might be a signal or a broadcast). Figure
601 * out what we need to do.
602 */
603 if (self->interruptingWait) {
604 /*
605 * The other thread successfully gained the monitor lock, and
606 * has confirmed that we were waiting on it. If this is an
607 * interruptible wait, we bail out immediately. If not, we
608 * continue on.
609 */
610 self->interruptingWait = false;
611 mon->interrupting--;
612 assert(self->interrupted);
613 if (interruptShouldThrow) {
614 wasInterrupted = true;
615 LOGD("threadid=%d wakeup: interrupted\n", self->threadId);
616 break;
617 } else {
618 LOGD("threadid=%d wakeup: not interruptible\n", self->threadId);
619 }
620 }
621 if (mon->notifying) {
622 /*
623 * One or more threads are being notified. Remove ourselves
624 * from the set.
625 */
626 mon->notifying--;
627 LOGVV("threadid=%d wakeup: notified\n", self->threadId);
628 break;
629 } else {
630 /*
631 * Looks like we were woken unnecessarily, probably as a
632 * result of another thread being interrupted. Go back to
633 * sleep.
634 */
635 LOGVV("threadid=%d wakeup: going back to sleep\n", self->threadId);
636 }
637 }
638
639done:
640 //if (wasInterrupted) {
641 // LOGW("threadid=%d: throwing InterruptedException:\n", self->threadId);
642 // dvmDumpThread(self, false);
643 //}
644
645 /*
646 * Put everything back. Again, we hold the pthread mutex, so the order
647 * here isn't significant.
648 */
649 self->waitMonitor = NULL;
650 mon->owner = self;
651 mon->waiting--;
652 mon->lockCount = prevLockCount;
653
654 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
655 dvmChangeStatus(self, THREAD_RUNNING);
656
657 if (wasInterrupted) {
658 /*
659 * We were interrupted while waiting, or somebody interrupted an
660 * un-interruptable thread earlier and we're bailing out immediately.
661 *
662 * The doc sayeth: "The interrupted status of the current thread is
663 * cleared when this exception is thrown."
664 */
665 self->interrupted = false;
666 if (interruptShouldThrow)
667 dvmThrowException("Ljava/lang/InterruptedException;", NULL);
668 }
669}
670
671/*
672 * Notify one thread waiting on this monitor.
673 */
674static void notifyMonitor(Thread* self, Monitor* mon)
675{
676 /* Make sure that the lock is fat and that we hold it. */
677 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
678 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
679 "object not locked by thread before notify()");
680 return;
681 }
682
683 /*
684 * Check to see if anybody is there to notify. We subtract off
685 * threads that are being interrupted and anything that has
686 * potentially already been notified.
687 */
688 if (mon->notifying + mon->interrupting < mon->waiting) {
689 /* wake up one thread */
690 int cc;
691
692 LOGVV("threadid=%d: signaling on %p\n", self->threadId, mon);
693
694 mon->notifying++;
695 cc = pthread_cond_signal(&mon->cond);
696 assert(cc == 0);
697 } else {
698 LOGVV("threadid=%d: nobody to signal on %p\n", self->threadId, mon);
699 }
700}
701
702/*
703 * Notify all threads waiting on this monitor.
704 *
705 * We keep a count of how many threads we notified, so that our various
706 * counts remain accurate.
707 */
708static void notifyAllMonitor(Thread* self, Monitor* mon)
709{
710 /* Make sure that the lock is fat and that we hold it. */
711 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
712 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
713 "object not locked by thread before notifyAll()");
714 return;
715 }
716
717 mon->notifying = mon->waiting - mon->interrupting;
718 if (mon->notifying > 0) {
719 int cc;
720
721 LOGVV("threadid=%d: broadcasting to %d threads on %p\n",
722 self->threadId, mon->notifying, mon);
723
724 cc = pthread_cond_broadcast(&mon->cond);
725 assert(cc == 0);
726 } else {
727 LOGVV("threadid=%d: nobody to broadcast to on %p\n", self->threadId,mon);
728 }
729}
730
731#if THIN_LOCKING
732/*
733 * Thin locking support
734 */
735
736/*
737 * Implements monitorenter for "synchronized" stuff.
738 *
739 * This does not fail or throw an exception (unless deadlock prediction
740 * is enabled and set to "err" mode).
741 */
742void dvmLockObject(Thread* self, Object *obj)
743{
744 volatile u4 *thinp = &obj->lock.thin;
745 u4 threadId = self->threadId;
746
747 /* First, try to grab the lock as if it's thin;
748 * this is the common case and will usually succeed.
749 */
750 if (!ATOMIC_CMP_SWAP((int32_t *)thinp,
751 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
752 (int32_t)threadId)) {
753 /* The lock is either a thin lock held by someone (possibly 'self'),
754 * or a fat lock.
755 */
756 if ((*thinp & 0xffff) == threadId) {
757 /* 'self' is already holding the thin lock; we can just
758 * bump the count. Atomic operations are not necessary
759 * because only the thread holding the lock is allowed
760 * to modify the Lock field.
761 */
762 *thinp += 1<<16;
763 } else {
764 /* If this is a thin lock we need to spin on it, if it's fat
765 * we need to acquire the monitor.
766 */
767 if ((*thinp & 1) != 0) {
768 ThreadStatus oldStatus;
769 static const unsigned long maxSleepDelay = 1 * 1024 * 1024;
770 unsigned long sleepDelay;
771
772 LOG_THIN("(%d) spin on lock 0x%08x: 0x%08x (0x%08x) 0x%08x\n",
773 threadId, (uint)&obj->lock,
774 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
775
776 /* The lock is still thin, but some other thread is
777 * holding it. Let the VM know that we're about
778 * to wait on another thread.
779 */
780 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
781
782 /* Spin until the other thread lets go.
783 */
784 sleepDelay = 0;
785 do {
786 /* In addition to looking for an unlock,
787 * we need to watch out for some other thread
788 * fattening the lock behind our back.
789 */
790 while (*thinp != DVM_LOCK_INITIAL_THIN_VALUE) {
791 if ((*thinp & 1) == 0) {
792 /* The lock has been fattened already.
793 */
794 LOG_THIN("(%d) lock 0x%08x surprise-fattened\n",
795 threadId, (uint)&obj->lock);
796 dvmChangeStatus(self, oldStatus);
797 goto fat_lock;
798 }
799
800 if (sleepDelay == 0) {
801 sched_yield();
802 sleepDelay = 1 * 1000;
803 } else {
804 usleep(sleepDelay);
805 if (sleepDelay < maxSleepDelay / 2) {
806 sleepDelay *= 2;
807 }
808 }
809 }
810 } while (!ATOMIC_CMP_SWAP((int32_t *)thinp,
811 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
812 (int32_t)threadId));
813 LOG_THIN("(%d) spin on lock done 0x%08x: "
814 "0x%08x (0x%08x) 0x%08x\n",
815 threadId, (uint)&obj->lock,
816 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
817
818 /* We've got the thin lock; let the VM know that we're
819 * done waiting.
820 */
821 dvmChangeStatus(self, oldStatus);
822
823 /* Fatten the lock. Note this relinquishes ownership.
824 * We could also create the monitor in an "owned" state
825 * to avoid "re-locking" it in fat_lock.
826 */
827 obj->lock.mon = dvmCreateMonitor(obj);
828 LOG_THIN("(%d) lock 0x%08x fattened\n",
829 threadId, (uint)&obj->lock);
830
831 /* Fall through to acquire the newly fat lock.
832 */
833 }
834
835 /* The lock is already fat, which means
836 * that obj->lock.mon is a regular (Monitor *).
837 */
838 fat_lock:
839 assert(obj->lock.mon != NULL);
840 lockMonitor(self, obj->lock.mon);
841 }
842 }
843 // else, the lock was acquired with the ATOMIC_CMP_SWAP().
844
845#ifdef WITH_DEADLOCK_PREDICTION
846 /*
847 * See if we were allowed to grab the lock at this time. We do it
848 * *after* acquiring the lock, rather than before, so that we can
849 * freely update the Monitor struct. This seems counter-intuitive,
850 * but our goal is deadlock *prediction* not deadlock *prevention*.
851 * (If we actually deadlock, the situation is easy to diagnose from
852 * a thread dump, so there's no point making a special effort to do
853 * the checks before the lock is held.)
854 *
855 * This needs to happen before we add the object to the thread's
856 * monitor list, so we can tell the difference between first-lock and
857 * re-lock.
858 *
859 * It's also important that we do this while in THREAD_RUNNING, so
860 * that we don't interfere with cleanup operations in the GC.
861 */
862 if (gDvm.deadlockPredictMode != kDPOff) {
863 if (self->status != THREAD_RUNNING) {
864 LOGE("Bad thread status (%d) in DP\n", self->status);
865 dvmDumpThread(self, false);
866 dvmAbort();
867 }
868 assert(!dvmCheckException(self));
869 updateDeadlockPrediction(self, obj);
870 if (dvmCheckException(self)) {
871 /*
872 * If we're throwing an exception here, we need to free the
873 * lock. We add the object to the thread's monitor list so the
874 * "unlock" code can remove it.
875 */
876 dvmAddToMonitorList(self, obj, false);
877 dvmUnlockObject(self, obj);
878 LOGV("--- unlocked, pending is '%s'\n",
879 dvmGetException(self)->clazz->descriptor);
880 }
881 }
882
883 /*
884 * Add the locked object, and the current stack trace, to the list
885 * held by the Thread object. If deadlock prediction isn't on,
886 * don't capture the stack trace.
887 */
888 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
889#elif defined(WITH_MONITOR_TRACKING)
890 /*
891 * Add the locked object to the list held by the Thread object.
892 */
893 dvmAddToMonitorList(self, obj, false);
894#endif
895}
896
897/*
898 * Implements monitorexit for "synchronized" stuff.
899 *
900 * On failure, throws an exception and returns "false".
901 */
902bool dvmUnlockObject(Thread* self, Object *obj)
903{
904 volatile u4 *thinp = &obj->lock.thin;
905 u4 threadId = self->threadId;
906
907 /* Check the common case, where 'self' has locked 'obj' once, first.
908 */
909 if (*thinp == threadId) {
910 /* Unlock 'obj' by clearing our threadId from 'thin'.
911 * The lock protects the lock field itself, so it's
912 * safe to update non-atomically.
913 */
914 *thinp = DVM_LOCK_INITIAL_THIN_VALUE;
915 } else if ((*thinp & 1) != 0) {
916 /* If the object is locked, it had better be locked by us.
917 */
918 if ((*thinp & 0xffff) != threadId) {
919 /* The JNI spec says that we should throw an exception
920 * in this case.
921 */
922 //LOGW("Unlock thin %p: id %d vs %d\n",
923 // obj, (*thinp & 0xfff), threadId);
924 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
925 "unlock of unowned monitor");
926 return false;
927 }
928
929 /* It's a thin lock, but 'self' has locked 'obj'
930 * more than once. Decrement the count.
931 */
932 *thinp -= 1<<16;
933 } else {
934 /* It's a fat lock.
935 */
936 assert(obj->lock.mon != NULL);
937 if (!unlockMonitor(self, obj->lock.mon)) {
938 /* exception has been raised */
939 return false;
940 }
941 }
942
943#ifdef WITH_MONITOR_TRACKING
944 /*
945 * Remove the object from the Thread's list.
946 */
947 dvmRemoveFromMonitorList(self, obj);
948#endif
949
950 return true;
951}
952
953/*
954 * Object.wait(). Also called for class init.
955 */
956void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
957 bool interruptShouldThrow)
958{
959 Monitor* mon = obj->lock.mon;
960 u4 thin = obj->lock.thin;
961
962 /* If the lock is still thin, we need to fatten it.
963 */
964 if ((thin & 1) != 0) {
965 /* Make sure that 'self' holds the lock.
966 */
967 if ((thin & 0xffff) != self->threadId) {
968 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
969 "object not locked by thread before wait()");
970 return;
971 }
972
973 /* This thread holds the lock. We need to fatten the lock
974 * so 'self' can block on it. Don't update the object lock
975 * field yet, because 'self' needs to acquire the lock before
976 * any other thread gets a chance.
977 */
978 mon = dvmCreateMonitor(obj);
979
980 /* 'self' has actually locked the object one or more times;
981 * make sure that the monitor reflects this.
982 */
983 lockMonitor(self, mon);
984 mon->lockCount = thin >> 16;
985 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
986 self->threadId, (uint)&obj->lock, mon->lockCount);
987
988 /* Make the monitor public now that it's in the right state.
989 */
990 obj->lock.mon = mon;
991 }
992
993 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
994}
995
996/*
997 * Object.notify().
998 */
999void dvmObjectNotify(Thread* self, Object *obj)
1000{
1001 Monitor* mon = obj->lock.mon;
1002 u4 thin = obj->lock.thin;
1003
1004 /* If the lock is still thin, there aren't any waiters;
1005 * waiting on an object forces lock fattening.
1006 */
1007 if ((thin & 1) != 0) {
1008 /* Make sure that 'self' holds the lock.
1009 */
1010 if ((thin & 0xffff) != self->threadId) {
1011 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1012 "object not locked by thread before notify()");
1013 return;
1014 }
1015
1016 /* no-op; there are no waiters to notify.
1017 */
1018 } else {
1019 /* It's a fat lock.
1020 */
1021 notifyMonitor(self, mon);
1022 }
1023}
1024
1025/*
1026 * Object.notifyAll().
1027 */
1028void dvmObjectNotifyAll(Thread* self, Object *obj)
1029{
1030 u4 thin = obj->lock.thin;
1031
1032 /* If the lock is still thin, there aren't any waiters;
1033 * waiting on an object forces lock fattening.
1034 */
1035 if ((thin & 1) != 0) {
1036 /* Make sure that 'self' holds the lock.
1037 */
1038 if ((thin & 0xffff) != self->threadId) {
1039 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1040 "object not locked by thread before notifyAll()");
1041 return;
1042 }
1043
1044 /* no-op; there are no waiters to notify.
1045 */
1046 } else {
1047 Monitor* mon = obj->lock.mon;
1048
1049 /* It's a fat lock.
1050 */
1051 notifyAllMonitor(self, mon);
1052 }
1053}
1054
1055#else // not THIN_LOCKING
1056
1057/*
1058 * Implements monitorenter for "synchronized" stuff.
1059 *
1060 * This does not fail or throw an exception.
1061 */
1062void dvmLockObject(Thread* self, Object* obj)
1063{
1064 Monitor* mon = obj->lock.mon;
1065
1066 if (mon == NULL) {
1067 mon = dvmCreateMonitor(obj);
1068 if (!ATOMIC_CMP_SWAP((int32_t *)&obj->lock.mon,
1069 (int32_t)NULL, (int32_t)mon)) {
1070 /* somebody else beat us to it */
1071 releaseMonitor(mon);
1072 mon = obj->lock.mon;
1073 }
1074 }
1075
1076 lockMonitor(self, mon);
1077}
1078
1079/*
1080 * Implements monitorexit for "synchronized" stuff.
1081 */
1082bool dvmUnlockObject(Thread* self, Object* obj)
1083{
1084 Monitor* mon = obj->lock.mon;
1085
1086 return unlockMonitor(self, mon);
1087}
1088
1089
1090/*
1091 * Object.wait().
1092 */
1093void dvmObjectWait(Thread* self, Object* obj, u8 msec, u4 nsec)
1094{
1095 Monitor* mon = obj->lock.mon;
1096
1097 waitMonitor(self, mon, msec, nsec);
1098}
1099
1100/*
1101 * Object.notify().
1102 */
1103void dvmObjectNotify(Thread* self, Object* obj)
1104{
1105 Monitor* mon = obj->lock.mon;
1106
1107 notifyMonitor(self, mon);
1108}
1109
1110/*
1111 * Object.notifyAll().
1112 */
1113void dvmObjectNotifyAll(Thread* self, Object* obj)
1114{
1115 Monitor* mon = obj->lock.mon;
1116
1117 notifyAllMonitor(self, mon);
1118}
1119
1120#endif // not THIN_LOCKING
1121
1122
1123/*
1124 * This implements java.lang.Thread.sleep(long msec, int nsec).
1125 *
1126 * The sleep is interruptible by other threads, which means we can't just
1127 * plop into an OS sleep call. (We probably could if we wanted to send
1128 * signals around and rely on EINTR, but that's inefficient and relies
1129 * on native code respecting our signal mask.)
1130 *
1131 * We have to do all of this stuff for Object.wait() as well, so it's
1132 * easiest to just sleep on a private Monitor.
1133 *
1134 * It appears that we want sleep(0,0) to go through the motions of sleeping
1135 * for a very short duration, rather than just returning.
1136 */
1137void dvmThreadSleep(u8 msec, u4 nsec)
1138{
1139 Thread* self = dvmThreadSelf();
1140 Monitor* mon = gDvm.threadSleepMon;
1141
1142 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1143 if (msec == 0 && nsec == 0)
1144 nsec++;
1145
1146 lockMonitor(self, mon);
1147 waitMonitor(self, mon, msec, nsec, true);
1148 unlockMonitor(self, mon);
1149}
1150
1151/*
1152 * Implement java.lang.Thread.interrupt().
1153 *
1154 * We need to increment the monitor's "interrupting" count, and set the
1155 * interrupted status for the thread in question. Doing so requires
1156 * gaining the monitor's lock, which may not happen in a timely fashion.
1157 * We are left with a decision between failing to interrupt the thread
1158 * and stalling the interrupting thread.
1159 *
1160 * We must take some care to ensure that we don't try to interrupt the same
1161 * thread on the same mutex twice. Doing so would leave us with an
1162 * incorrect value for Monitor.interrupting.
1163 */
1164void dvmThreadInterrupt(Thread* thread)
1165{
1166 static const int kMaxRetries = 4;
1167 Monitor* mon;
1168 Thread* self;
1169 int retry;
1170
1171 /*
1172 * Raise the "interrupted" flag. This will cause it to bail early out
1173 * of the next wait() attempt, if it's not currently waiting on
1174 * something.
1175 */
1176 thread->interrupted = true;
1177 MEM_BARRIER();
1178
1179 /*
1180 * Is the thread waiting?
1181 *
1182 * Note that fat vs. thin doesn't matter here; waitMonitor
1183 * is only set when a thread actually waits on a monitor,
1184 * which implies that the monitor has already been fattened.
1185 */
1186 mon = thread->waitMonitor;
1187 if (mon == NULL)
1188 return;
1189
1190 /*
1191 * Try to acquire the monitor, if we don't already own it.
1192 * We need to hold the same mutex as the thread in order
1193 * to signal the condition it's waiting on.
1194 *
1195 * TODO: we may be able to get rid of the explicit lock by coordinating
1196 * this more closely with waitMonitor.
1197 */
1198 self = dvmThreadSelf();
1199 retry = 0;
1200 do {
1201 if (tryLockMonitor(self, mon))
1202 goto gotit;
1203
1204 /* wait a bit */
1205 sched_yield();
1206
1207 /*
1208 * If they've moved on to a different monitor, the "interrupted"
1209 * flag we set earlier will have taken care of things, so we don't
1210 * want to continue.
1211 */
1212 if (thread->waitMonitor != mon)
1213 return;
1214 } while (retry++ < kMaxRetries);
1215
1216 LOGW("threadid=%d: unable to interrupt threadid=%d\n",
1217 self->threadId, thread->threadId);
1218 return;
1219
1220gotit:
1221 /*
1222 * We've got the monitor lock, which means nobody can be added or
1223 * removed from the wait list. This also means that the Thread's
1224 * waitMonitor/interruptingWait fields can't be modified by anyone
1225 * else.
1226 *
1227 * If things look good, raise flags and wake the threads sleeping
1228 * on the monitor's condition variable.
1229 */
1230 if (thread->waitMonitor == mon && // still on same monitor?
1231 thread->interrupted && // interrupt still pending?
1232 !thread->interruptingWait) // nobody else is interrupting too?
1233 {
1234 int cc;
1235
1236 LOGVV("threadid=%d: interrupting threadid=%d waiting on %p\n",
1237 self->threadId, thread->threadId, mon);
1238
1239 thread->interruptingWait = true; // prevent re-interrupt...
1240 mon->interrupting++; // ...so we only do this once
1241 cc = pthread_cond_broadcast(&mon->cond);
1242 assert(cc == 0);
1243 }
1244
1245 unlockMonitor(self, mon);
1246}
1247
1248
1249#ifdef WITH_DEADLOCK_PREDICTION
1250/*
1251 * ===========================================================================
1252 * Deadlock prediction
1253 * ===========================================================================
1254 */
1255/*
1256The idea is to predict the possibility of deadlock by recording the order
1257in which monitors are acquired. If we see an attempt to acquire a lock
1258out of order, we can identify the locks and offending code.
1259
1260To make this work, we need to keep track of the locks held by each thread,
1261and create history trees for each lock. When a thread tries to acquire
1262a new lock, we walk through the "history children" of the lock, looking
1263for a match with locks the thread already holds. If we find a match,
1264it means the thread has made a request that could result in a deadlock.
1265
1266To support recursive locks, we always allow re-locking a currently-held
1267lock, and maintain a recursion depth count.
1268
1269An ASCII-art example, where letters represent Objects:
1270
1271 A
1272 /|\
1273 / | \
1274 B | D
1275 \ |
1276 \|
1277 C
1278
1279The above is the tree we'd have after handling Object synchronization
1280sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
1281a child of B. (The lines represent pointers between parent and child.
1282Every node can have multiple parents and multiple children.)
1283
1284If we hold AC, and want to lock B, we recursively search through B's
1285children to see if A or C appears. It does, so we reject the attempt.
1286(A straightforward way to implement it: add a link from C to B, then
1287determine whether the graph starting at B contains a cycle.)
1288
1289If we hold AC and want to lock D, we would succeed, creating a new link
1290from C to D.
1291
1292The lock history and a stack trace is attached to the Object's Monitor
1293struct, which means we need to fatten every Object we lock (thin locking
1294is effectively disabled). If we don't need the stack trace we can
1295avoid fattening the leaf nodes, only fattening objects that need to hold
1296history trees.
1297
1298Updates to Monitor structs are only allowed for the thread that holds
1299the Monitor, so we actually do most of our deadlock prediction work after
1300the lock has been acquired.
1301
1302When an object with a monitor is GCed, we need to remove it from the
1303history trees. There are two basic approaches:
1304 (1) For through the entire set of known monitors, search all child
1305 lists for the object in question. This is rather slow, resulting
1306 in GC passes that take upwards of 10 seconds to complete.
1307 (2) Maintain "parent" pointers in each node. Remove the entries as
1308 required. This requires additional storage and maintenance for
1309 every operation, but is significantly faster at GC time.
1310For each GCed object, we merge all of the object's children into each of
1311the object's parents.
1312*/
1313
1314#if !defined(WITH_MONITOR_TRACKING)
1315# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1316#endif
1317
1318/*
1319 * Clear out the contents of an ExpandingObjectList, freeing any
1320 * dynamic allocations.
1321 */
1322static void expandObjClear(ExpandingObjectList* pList)
1323{
1324 if (pList->list != NULL) {
1325 free(pList->list);
1326 pList->list = NULL;
1327 }
1328 pList->alloc = pList->count = 0;
1329}
1330
1331/*
1332 * Get the number of objects currently stored in the list.
1333 */
1334static inline int expandBufGetCount(const ExpandingObjectList* pList)
1335{
1336 return pList->count;
1337}
1338
1339/*
1340 * Get the Nth entry from the list.
1341 */
1342static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1343 int i)
1344{
1345 return pList->list[i];
1346}
1347
1348/*
1349 * Add a new entry to the list.
1350 *
1351 * We don't check for or try to enforce uniqueness. It's expected that
1352 * the higher-level code does this for us.
1353 */
1354static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1355{
1356 if (pList->count == pList->alloc) {
1357 /* time to expand */
1358 Object** newList;
1359
1360 if (pList->alloc == 0)
1361 pList->alloc = 4;
1362 else
1363 pList->alloc *= 2;
1364 LOGVV("expanding %p to %d\n", pList, pList->alloc);
1365 newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1366 if (newList == NULL) {
1367 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1368 dvmAbort();
1369 }
1370 pList->list = newList;
1371 }
1372
1373 pList->list[pList->count++] = obj;
1374}
1375
1376/*
1377 * Returns "true" if the element was successfully removed.
1378 */
1379static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1380{
1381 int i;
1382
1383 for (i = pList->count-1; i >= 0; i--) {
1384 if (pList->list[i] == obj)
1385 break;
1386 }
1387 if (i < 0)
1388 return false;
1389
1390 if (i != pList->count-1) {
1391 /*
1392 * The order of elements is not important, so we just copy the
1393 * last entry into the new slot.
1394 */
1395 //memmove(&pList->list[i], &pList->list[i+1],
1396 // (pList->count-1 - i) * sizeof(pList->list[0]));
1397 pList->list[i] = pList->list[pList->count-1];
1398 }
1399
1400 pList->count--;
1401 pList->list[pList->count] = (Object*) 0xdecadead;
1402 return true;
1403}
1404
1405/*
1406 * Returns "true" if "obj" appears in the list.
1407 */
1408static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1409{
1410 int i;
1411
1412 for (i = 0; i < pList->count; i++) {
1413 if (pList->list[i] == obj)
1414 return true;
1415 }
1416 return false;
1417}
1418
1419/*
1420 * Print the list contents to stdout. For debugging.
1421 */
1422static void expandObjDump(const ExpandingObjectList* pList)
1423{
1424 int i;
1425 for (i = 0; i < pList->count; i++)
1426 printf(" %p", pList->list[i]);
1427}
1428
1429/*
1430 * Check for duplicate entries. Returns the index of the first instance
1431 * of the duplicated value, or -1 if no duplicates were found.
1432 */
1433static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1434{
1435 int i, j;
1436 for (i = 0; i < pList->count-1; i++) {
1437 for (j = i + 1; j < pList->count; j++) {
1438 if (pList->list[i] == pList->list[j]) {
1439 return i;
1440 }
1441 }
1442 }
1443
1444 return -1;
1445}
1446
1447
1448/*
1449 * Determine whether "child" appears in the list of objects associated
1450 * with the Monitor in "parent". If "parent" is a thin lock, we return
1451 * false immediately.
1452 */
1453static bool objectInChildList(const Object* parent, Object* child)
1454{
1455 Lock lock = parent->lock;
1456 if (!IS_LOCK_FAT(&lock)) {
1457 //LOGI("on thin\n");
1458 return false;
1459 }
1460
1461 return expandObjHas(&lock.mon->historyChildren, child);
1462}
1463
1464/*
1465 * Print the child list.
1466 */
1467static void dumpKids(Object* parent)
1468{
1469 Monitor* mon = parent->lock.mon;
1470
1471 printf("Children of %p:", parent);
1472 expandObjDump(&mon->historyChildren);
1473 printf("\n");
1474}
1475
1476/*
1477 * Add "child" to the list of children in "parent", and add "parent" to
1478 * the list of parents in "child".
1479 */
1480static void linkParentToChild(Object* parent, Object* child)
1481{
1482 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for merge
1483 assert(IS_LOCK_FAT(&parent->lock));
1484 assert(IS_LOCK_FAT(&child->lock));
1485 assert(parent != child);
1486 Monitor* mon;
1487
1488 mon = parent->lock.mon;
1489 assert(!expandObjHas(&mon->historyChildren, child));
1490 expandObjAddEntry(&mon->historyChildren, child);
1491
1492 mon = child->lock.mon;
1493 assert(!expandObjHas(&mon->historyParents, parent));
1494 expandObjAddEntry(&mon->historyParents, parent);
1495}
1496
1497
1498/*
1499 * Remove "child" from the list of children in "parent".
1500 */
1501static void unlinkParentFromChild(Object* parent, Object* child)
1502{
1503 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for GC
1504 assert(IS_LOCK_FAT(&parent->lock));
1505 assert(IS_LOCK_FAT(&child->lock));
1506 assert(parent != child);
1507 Monitor* mon;
1508
1509 mon = parent->lock.mon;
1510 if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1511 LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1512 }
1513 assert(!expandObjHas(&mon->historyChildren, child));
1514 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1515
1516 mon = child->lock.mon;
1517 if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1518 LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1519 }
1520 assert(!expandObjHas(&mon->historyParents, parent));
1521 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1522}
1523
1524
1525/*
1526 * Log the monitors held by the current thread. This is done as part of
1527 * flagging an error.
1528 */
1529static void logHeldMonitors(Thread* self)
1530{
1531 char* name = NULL;
1532
1533 name = dvmGetThreadName(self);
1534 LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1535 self->threadId, name);
1536 LOGW("(most-recently-acquired on top):\n");
1537 free(name);
1538
1539 LockedObjectData* lod = self->pLockedObjects;
1540 while (lod != NULL) {
1541 LOGW("--- object %p[%d] (%s)\n",
1542 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1543 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1544
1545 lod = lod->next;
1546 }
1547}
1548
1549/*
1550 * Recursively traverse the object hierarchy starting at "obj". We mark
1551 * ourselves on entry and clear the mark on exit. If we ever encounter
1552 * a marked object, we have a cycle.
1553 *
1554 * Returns "true" if all is well, "false" if we found a cycle.
1555 */
1556static bool traverseTree(Thread* self, const Object* obj)
1557{
1558 assert(IS_LOCK_FAT(&obj->lock));
1559 Monitor* mon = obj->lock.mon;
1560
1561 /*
1562 * Have we been here before?
1563 */
1564 if (mon->historyMark) {
1565 int* rawStackTrace;
1566 int stackDepth;
1567
1568 LOGW("%s\n", kStartBanner);
1569 LOGW("Illegal lock attempt:\n");
1570 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1571
1572 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1573 dvmLogRawStackTrace(rawStackTrace, stackDepth);
1574 free(rawStackTrace);
1575
1576 LOGW(" ");
1577 logHeldMonitors(self);
1578
1579 LOGW(" ");
1580 LOGW("Earlier, the following lock order (from last to first) was\n");
1581 LOGW("established -- stack trace is from first successful lock):\n");
1582 return false;
1583 }
1584 mon->historyMark = true;
1585
1586 /*
1587 * Examine the children. We do NOT hold these locks, so they might
1588 * very well transition from thin to fat or change ownership while
1589 * we work.
1590 *
1591 * NOTE: we rely on the fact that they cannot revert from fat to thin
1592 * while we work. This is currently a safe assumption.
1593 *
1594 * We can safely ignore thin-locked children, because by definition
1595 * they have no history and are leaf nodes. In the current
1596 * implementation we always fatten the locks to provide a place to
1597 * hang the stack trace.
1598 */
1599 ExpandingObjectList* pList = &mon->historyChildren;
1600 int i;
1601 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1602 const Object* child = expandBufGetEntry(pList, i);
1603 Lock lock = child->lock;
1604 if (!IS_LOCK_FAT(&lock))
1605 continue;
1606 if (!traverseTree(self, child)) {
1607 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1608 dvmLogRawStackTrace(mon->historyRawStackTrace,
1609 mon->historyStackDepth);
1610 mon->historyMark = false;
1611 return false;
1612 }
1613 }
1614
1615 mon->historyMark = false;
1616
1617 return true;
1618}
1619
1620/*
1621 * Update the deadlock prediction tree, based on the current thread
1622 * acquiring "acqObj". This must be called before the object is added to
1623 * the thread's list of held monitors.
1624 *
1625 * If the thread already holds the lock (recursion), or this is a known
1626 * lock configuration, we return without doing anything. Otherwise, we add
1627 * a link from the most-recently-acquired lock in this thread to "acqObj"
1628 * after ensuring that the parent lock is "fat".
1629 *
1630 * This MUST NOT be called while a GC is in progress in another thread,
1631 * because we assume exclusive access to history trees in owned monitors.
1632 */
1633static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1634{
1635 LockedObjectData* lod;
1636 LockedObjectData* mrl;
1637
1638 /*
1639 * Quick check for recursive access.
1640 */
1641 lod = dvmFindInMonitorList(self, acqObj);
1642 if (lod != NULL) {
1643 LOGV("+++ DP: recursive %p\n", acqObj);
1644 return;
1645 }
1646
1647 /*
1648 * Make the newly-acquired object's monitor "fat". In some ways this
1649 * isn't strictly necessary, but we need the GC to tell us when
1650 * "interesting" objects go away, and right now the only way to make
1651 * an object look interesting is to give it a monitor.
1652 *
1653 * This also gives us a place to hang a stack trace.
1654 *
1655 * Our thread holds the lock, so we're allowed to rewrite the lock
1656 * without worrying that something will change out from under us.
1657 */
1658 if (!IS_LOCK_FAT(&acqObj->lock)) {
1659 LOGVV("fattening lockee %p (recur=%d)\n",
1660 acqObj, acqObj->lock.thin >> 16);
1661 Monitor* newMon = dvmCreateMonitor(acqObj);
1662 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1663 newMon->lockCount += acqObj->lock.thin >> 16;
1664 acqObj->lock.mon = newMon;
1665 }
1666
1667 /* if we don't have a stack trace for this monitor, establish one */
1668 if (acqObj->lock.mon->historyRawStackTrace == NULL) {
1669 Monitor* mon = acqObj->lock.mon;
1670 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1671 &mon->historyStackDepth);
1672 }
1673
1674 /*
1675 * We need to examine and perhaps modify the most-recently-locked
1676 * monitor. We own that, so there's no risk of another thread
1677 * stepping on us.
1678 *
1679 * Retrieve the most-recently-locked entry from our thread.
1680 */
1681 mrl = self->pLockedObjects;
1682 if (mrl == NULL)
1683 return; /* no other locks held */
1684
1685 /*
1686 * Do a quick check to see if "acqObj" is a direct descendant. We can do
1687 * this without holding the global lock because of our assertion that
1688 * a GC is not running in parallel -- nobody except the GC can
1689 * modify a history list in a Monitor they don't own, and we own "mrl".
1690 * (There might be concurrent *reads*, but no concurrent *writes.)
1691 *
1692 * If we find it, this is a known good configuration, and we're done.
1693 */
1694 if (objectInChildList(mrl->obj, acqObj))
1695 return;
1696
1697 /*
1698 * "mrl" is going to need to have a history tree. If it's currently
1699 * a thin lock, we make it fat now. The thin lock might have a
1700 * nonzero recursive lock count, which we need to carry over.
1701 *
1702 * Our thread holds the lock, so we're allowed to rewrite the lock
1703 * without worrying that something will change out from under us.
1704 */
1705 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1706 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1707 mrl->obj, acqObj, mrl->obj->lock.thin >> 16);
1708 Monitor* newMon = dvmCreateMonitor(mrl->obj);
1709 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1710 newMon->lockCount += mrl->obj->lock.thin >> 16;
1711 mrl->obj->lock.mon = newMon;
1712 }
1713
1714 /*
1715 * We haven't seen this configuration before. We need to scan down
1716 * acqObj's tree to see if any of the monitors in self->pLockedObjects
1717 * appear. We grab a global lock before traversing or updating the
1718 * history list.
1719 *
1720 * If we find a match for any of our held locks, we know that the lock
1721 * has previously been acquired *after* acqObj, and we throw an error.
1722 *
1723 * The easiest way to do this is to create a link from "mrl" to "acqObj"
1724 * and do a recursive traversal, marking nodes as we cross them. If
1725 * we cross one a second time, we have a cycle and can throw an error.
1726 * (We do the flag-clearing traversal before adding the new link, so
1727 * that we're guaranteed to terminate.)
1728 *
1729 * If "acqObj" is a thin lock, it has no history, and we can create a
1730 * link to it without additional checks. [ We now guarantee that it's
1731 * always fat. ]
1732 */
1733 bool failed = false;
1734 dvmLockMutex(&gDvm.deadlockHistoryLock);
1735 linkParentToChild(mrl->obj, acqObj);
1736 if (!traverseTree(self, acqObj)) {
1737 LOGW("%s\n", kEndBanner);
1738 failed = true;
1739
1740 /* remove the entry so we're still okay when in "warning" mode */
1741 unlinkParentFromChild(mrl->obj, acqObj);
1742 }
1743 dvmUnlockMutex(&gDvm.deadlockHistoryLock);
1744
1745 if (failed) {
1746 switch (gDvm.deadlockPredictMode) {
1747 case kDPErr:
1748 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
1749 break;
1750 case kDPAbort:
1751 LOGE("Aborting due to potential deadlock\n");
1752 dvmAbort();
1753 break;
1754 default:
1755 /* warn only */
1756 break;
1757 }
1758 }
1759}
1760
1761/*
1762 * We're removing "child" from existence. We want to pull all of
1763 * child's children into "parent", filtering out duplicates. This is
1764 * called during the GC.
1765 *
1766 * This does not modify "child", which might have multiple parents.
1767 */
1768static void mergeChildren(Object* parent, const Object* child)
1769{
1770 Monitor* mon;
1771 int i;
1772
1773 assert(IS_LOCK_FAT(&child->lock));
1774 mon = child->lock.mon;
1775 ExpandingObjectList* pList = &mon->historyChildren;
1776
1777 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1778 Object* grandChild = expandBufGetEntry(pList, i);
1779
1780 if (!objectInChildList(parent, grandChild)) {
1781 LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
1782 linkParentToChild(parent, grandChild);
1783 } else {
1784 LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
1785 }
1786 }
1787}
1788
1789/*
1790 * An object with a fat lock is being collected during a GC pass. We
1791 * want to remove it from any lock history trees that it is a part of.
1792 *
1793 * This may require updating the history trees in several monitors. The
1794 * monitor semantics guarantee that no other thread will be accessing
1795 * the history trees at the same time.
1796 */
1797static void removeCollectedObject(Object* obj)
1798{
1799 Monitor* mon;
1800
1801 LOGVV("+++ collecting %p\n", obj);
1802
1803#if 0
1804 /*
1805 * We're currently running through the entire set of known monitors.
1806 * This can be somewhat slow. We may want to keep lists of parents
1807 * in each child to speed up GC.
1808 */
1809 mon = gDvm.monitorList;
1810 while (mon != NULL) {
1811 Object* parent = mon->obj;
1812 if (parent != NULL) { /* value nulled for deleted entries */
1813 if (objectInChildList(parent, obj)) {
1814 LOGVV("removing child %p from parent %p\n", obj, parent);
1815 unlinkParentFromChild(parent, obj);
1816 mergeChildren(parent, obj);
1817 }
1818 }
1819 mon = mon->next;
1820 }
1821#endif
1822
1823 /*
1824 * For every parent of this object:
1825 * - merge all of our children into the parent's child list (creates
1826 * a two-way link between parent and child)
1827 * - remove ourselves from the parent's child list
1828 */
1829 ExpandingObjectList* pList;
1830 int i;
1831
1832 assert(IS_LOCK_FAT(&obj->lock));
1833 mon = obj->lock.mon;
1834 pList = &mon->historyParents;
1835 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1836 Object* parent = expandBufGetEntry(pList, i);
1837 Monitor* parentMon = parent->lock.mon;
1838
1839 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
1840 LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
1841 }
1842 assert(!expandObjHas(&parentMon->historyChildren, obj));
1843
1844 mergeChildren(parent, obj);
1845 }
1846
1847 /*
1848 * For every child of this object:
1849 * - remove ourselves from the child's parent list
1850 */
1851 pList = &mon->historyChildren;
1852 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1853 Object* child = expandBufGetEntry(pList, i);
1854 Monitor* childMon = child->lock.mon;
1855
1856 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
1857 LOGW("WARNING: parent %p not found in child %p\n", obj, child);
1858 }
1859 assert(!expandObjHas(&childMon->historyParents, obj));
1860 }
1861}
1862
1863#endif /*WITH_DEADLOCK_PREDICTION*/
1864