blob: 1f813490ca670af49b888c0c0748411cf48338b2 [file] [log] [blame]
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Fundamental synchronization mechanisms.
18 *
19 * The top part of the file has operations on "monitor" structs; the
20 * next part has the native calls on objects.
21 *
22 * The current implementation uses "thin locking" to avoid allocating
23 * an Object's full Monitor struct until absolutely necessary (i.e.,
24 * during contention or a call to wait()).
25 *
26 * TODO: make improvements to thin locking
27 * We may be able to improve performance and reduce memory requirements by:
28 * - reverting to a thin lock once the Monitor is no longer necessary
29 * - using a pool of monitor objects, with some sort of recycling scheme
30 *
31 * TODO: recycle native-level monitors when objects are garbage collected.
32 *
33 * NOTE: if we broadcast a notify, and somebody sneaks in a Thread.interrupt
34 * before the notify finishes (i.e. before all threads sleeping on the
35 * condition variable have awoken), we could end up with a nonzero value for
36 * "notifying" after everybody is gone because one of the notified threads
37 * will actually exit via the "interrupted" path. This can be detected as
38 * (notifying + interrupting > waiting), i.e. the number of threads that need
39 * to be woken is greater than the number waiting. The fix is to test and
40 * adjust "notifying" at the start of the wait() call.
41 * -> This is probably not a problem if we notify less than the full set
42 * before the interrupt comes in. If we have four waiters, two pending
43 * notifies, and an interrupt hits, we will interrupt one thread and notify
44 * two others. Doesn't matter if the interrupted thread would have been
45 * one of the notified. Count is only screwed up if we have two waiters,
46 * in which case it's safe to fix it at the start of the next wait().
47 */
48#include "Dalvik.h"
49
50#include <stdlib.h>
51#include <unistd.h>
52#include <pthread.h>
53#include <time.h>
54#include <sys/time.h>
55#include <errno.h>
56
57#define LOG_THIN LOGV
58
59#ifdef WITH_DEADLOCK_PREDICTION /* fwd */
60static const char* kStartBanner =
61 "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
62static const char* kEndBanner =
63 "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
64
65/*
66 * Unsorted, expanding list of objects.
67 *
68 * This is very similar to PointerSet (which came into existence after this),
69 * but these are unsorted, uniqueness is not enforced by the "add" function,
70 * and the base object isn't allocated on the heap.
71 */
72typedef struct ExpandingObjectList {
73 u2 alloc;
74 u2 count;
75 Object** list;
76} ExpandingObjectList;
77
78/* fwd */
79static void updateDeadlockPrediction(Thread* self, Object* obj);
80static void removeCollectedObject(Object* obj);
81static void expandObjClear(ExpandingObjectList* pList);
82#endif
83
84/*
85 * Every Object has a monitor associated with it, but not every Object is
86 * actually locked. Even the ones that are locked do not need a
87 * full-fledged monitor until a) there is actual contention or b) wait()
88 * is called on the Object.
89 *
90 * For Dalvik, we have implemented a scheme similar to the one described
91 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
92 * (ACM 1998). Things are even easier for us, though, because we have
93 * a full 32 bits to work with.
94 *
95 * The two states that an Object's lock may have are referred to as
96 * "thin" and "fat". The lock may transition between the two states
97 * for various reasons.
98 *
99 * The lock value itself is stored in Object.lock, which is a union of
100 * the form:
101 *
102 * typedef union Lock {
103 * u4 thin;
104 * Monitor* mon;
105 * } Lock;
106 *
107 * It is possible to tell the current state of the lock from the actual
108 * value, so we do not need to store any additional state. When the
109 * lock is "thin", it has the form:
110 *
111 * [31 ---- 16] [15 ---- 1] [0]
112 * lock count thread id 1
113 *
114 * When it is "fat", the field is simply a (Monitor *). Since the pointer
115 * will always be 4-byte-aligned, bits 1 and 0 will always be zero when
116 * the field holds a pointer. Hence, we can tell the current fat-vs-thin
117 * state by checking the least-significant bit.
118 *
119 * For an in-depth description of the mechanics of thin-vs-fat locking,
120 * read the paper referred to above.
121 *
122 * To reduce the amount of work when attempting a compare and exchange,
123 * Thread.threadId is guaranteed to have bit 0 set, and all new Objects
124 * have their lock fields initialized to the value 0x1, or
125 * DVM_LOCK_INITIAL_THIN_VALUE, via DVM_OBJECT_INIT().
126 */
127
128/*
129 * Monitors provide:
130 * - mutually exclusive access to resources
131 * - a way for multiple threads to wait for notification
132 *
133 * In effect, they fill the role of both mutexes and condition variables.
134 *
135 * Only one thread can own the monitor at any time. There may be several
136 * threads waiting on it (the wait call unlocks it). One or more waiting
137 * threads may be getting interrupted or notified at any given time.
138 */
139struct Monitor {
140 Thread* owner; /* which thread currently owns the lock? */
141 int lockCount; /* owner's recursive lock depth */
142 Object* obj; /* what object are we part of [debug only] */
143
144 int waiting; /* total #of threads waiting on this */
145 int notifying; /* #of threads being notified */
146 int interrupting; /* #of threads being interrupted */
147
148 pthread_mutex_t lock;
149 pthread_cond_t cond;
150
151 Monitor* next;
152
153#ifdef WITH_DEADLOCK_PREDICTION
154 /*
155 * Objects that have been locked immediately after this one in the
156 * past. We use an expanding flat array, allocated on first use, to
157 * minimize allocations. Deletions from the list, expected to be
158 * infrequent, are crunched down.
159 */
160 ExpandingObjectList historyChildren;
161
162 /*
163 * We also track parents. This isn't strictly necessary, but it makes
164 * the cleanup at GC time significantly faster.
165 */
166 ExpandingObjectList historyParents;
167
168 /* used during cycle detection */
169 bool historyMark;
170
171 /* stack trace, established the first time we locked the object */
172 int historyStackDepth;
173 int* historyRawStackTrace;
174#endif
175};
176
177
178/*
179 * Create and initialize a monitor.
180 */
181Monitor* dvmCreateMonitor(Object* obj)
182{
183 Monitor* mon;
184
185 mon = (Monitor*) calloc(1, sizeof(Monitor));
186 if (mon == NULL) {
187 LOGE("Unable to allocate monitor\n");
188 dvmAbort();
189 }
190 mon->obj = obj;
191 dvmInitMutex(&mon->lock);
192 pthread_cond_init(&mon->cond, NULL);
193
194 /* replace the head of the list with the new monitor */
195 do {
196 mon->next = gDvm.monitorList;
197 } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
198 (int32_t)mon->next, (int32_t)mon));
199
200 return mon;
201}
202
203/*
204 * Release a Monitor.
205 */
206static void releaseMonitor(Monitor* mon)
207{
208 // TODO
209}
210
211/*
212 * Free the monitor list. Only used when shutting the VM down.
213 */
214void dvmFreeMonitorList(void)
215{
216 Monitor* mon;
217 Monitor* nextMon;
218
219 mon = gDvm.monitorList;
220 while (mon != NULL) {
221 nextMon = mon->next;
222
223#ifdef WITH_DEADLOCK_PREDICTION
224 expandObjClear(&mon->historyChildren);
225 expandObjClear(&mon->historyParents);
226 free(mon->historyRawStackTrace);
227#endif
228 free(mon);
229 mon = nextMon;
230 }
231}
232
233/*
234 * Log some info about our monitors.
235 */
236void dvmDumpMonitorInfo(const char* msg)
237{
238#if QUIET_ZYGOTE_MONITOR
239 if (gDvm.zygote) {
240 return;
241 }
242#endif
243
244 int totalCount;
245 int liveCount;
246
247 totalCount = liveCount = 0;
248 Monitor* mon = gDvm.monitorList;
249 while (mon != NULL) {
250 totalCount++;
251 if (mon->obj != NULL)
252 liveCount++;
253 mon = mon->next;
254 }
255
256 LOGD("%s: monitor list has %d entries (%d live)\n",
257 msg, totalCount, liveCount);
258}
259
260/*
261 * Get the object that a monitor is part of.
262 */
263Object* dvmGetMonitorObject(Monitor* mon)
264{
265 if (mon == NULL)
266 return NULL;
267 else
268 return mon->obj;
269}
270
271/*
272 * Checks whether the given thread holds the given
273 * objects's lock.
274 */
275bool dvmHoldsLock(Thread* thread, Object* obj)
276{
277 if (thread == NULL || obj == NULL) {
278 return false;
279 }
280
281 /* Since we're reading the lock value multiple times,
282 * latch it so that it doesn't change out from under
283 * us if we get preempted.
284 */
285 Lock lock = obj->lock;
286 if (IS_LOCK_FAT(&lock)) {
287 return thread == lock.mon->owner;
288 } else {
289 return thread->threadId == (lock.thin & 0xffff);
290 }
291}
292
293/*
294 * Free the monitor associated with an object and make the object's lock
295 * thin again. This is called during garbage collection.
296 */
297void dvmFreeObjectMonitor_internal(Lock *lock)
298{
299 Monitor *mon;
300
301 /* The macro that wraps this function checks IS_LOCK_FAT() first.
302 */
303 assert(IS_LOCK_FAT(lock));
304
305#ifdef WITH_DEADLOCK_PREDICTION
306 if (gDvm.deadlockPredictMode != kDPOff)
307 removeCollectedObject(lock->mon->obj);
308#endif
309
310 mon = lock->mon;
311 lock->thin = DVM_LOCK_INITIAL_THIN_VALUE;
312
313 /* This lock is associated with an object
314 * that's being swept. The only possible way
315 * anyone could be holding this lock would be
316 * if some JNI code locked but didn't unlock
317 * the object, in which case we've got some bad
318 * native code somewhere.
319 */
320 assert(pthread_mutex_trylock(&mon->lock) == 0);
321 pthread_mutex_destroy(&mon->lock);
322 pthread_cond_destroy(&mon->cond);
323#if 1
324//TODO: unlink from the monitor list (would require a lock)
325// (might not -- the GC suspension may be enough)
326 {
327 Monitor *next;
328 next = mon->next;
329#ifdef WITH_DEADLOCK_PREDICTION
330 expandObjClear(&mon->historyChildren);
331 expandObjClear(&mon->historyParents);
332 free(mon->historyRawStackTrace);
333#endif
334 memset(mon, 0, sizeof (*mon));
335 mon->next = next;
336 }
337//free(mon);
338#endif
339}
340
341
342/*
343 * Lock a monitor.
344 */
345static void lockMonitor(Thread* self, Monitor* mon)
346{
347 int cc;
348
349 if (mon->owner == self) {
350 mon->lockCount++;
351 } else {
352 ThreadStatus oldStatus;
353
354 if (pthread_mutex_trylock(&mon->lock) != 0) {
355 /* mutex is locked, switch to wait status and sleep on it */
356 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
357 cc = pthread_mutex_lock(&mon->lock);
358 assert(cc == 0);
359 dvmChangeStatus(self, oldStatus);
360 }
361
362 mon->owner = self;
363 assert(mon->lockCount == 0);
364
365 /*
366 * "waiting", "notifying", and "interrupting" could all be nonzero
367 * if we're locking an object on which other threads are waiting.
368 * Nothing worth assert()ing about here.
369 */
370 }
371}
372
373/*
374 * Try to lock a monitor.
375 *
376 * Returns "true" on success.
377 */
378static bool tryLockMonitor(Thread* self, Monitor* mon)
379{
380 int cc;
381
382 if (mon->owner == self) {
383 mon->lockCount++;
384 return true;
385 } else {
386 cc = pthread_mutex_trylock(&mon->lock);
387 if (cc == 0) {
388 mon->owner = self;
389 assert(mon->lockCount == 0);
390 return true;
391 } else {
392 return false;
393 }
394 }
395}
396
397
398/*
399 * Unlock a monitor.
400 *
401 * Returns true if the unlock succeeded.
402 * If the unlock failed, an exception will be pending.
403 */
404static bool unlockMonitor(Thread* self, Monitor* mon)
405{
406 assert(mon != NULL); // can this happen?
407
408 if (mon->owner == self) {
409 /*
410 * We own the monitor, so nobody else can be in here.
411 */
412 if (mon->lockCount == 0) {
413 int cc;
414 mon->owner = NULL;
415 cc = pthread_mutex_unlock(&mon->lock);
416 assert(cc == 0);
417 } else {
418 mon->lockCount--;
419 }
420 } else {
421 /*
422 * We don't own this, so we're not allowed to unlock it.
423 * The JNI spec says that we should throw IllegalMonitorStateException
424 * in this case.
425 */
426 if (mon->owner == NULL) {
427 //LOGW("Unlock fat %p: not owned\n", mon->obj);
428 } else {
429 //LOGW("Unlock fat %p: id %d vs %d\n",
430 // mon->obj, mon->owner->threadId, self->threadId);
431 }
432 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
433 "unlock of unowned monitor");
434 return false;
435 }
436 return true;
437}
438
439/*
440 * Wait on a monitor until timeout, interrupt, or notification. Used for
441 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
442 *
443 * If another thread calls Thread.interrupt(), we throw InterruptedException
444 * and return immediately if one of the following are true:
445 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
446 * - blocked in join(), join(long), or join(long, int) methods of Thread
447 * - blocked in sleep(long), or sleep(long, int) methods of Thread
448 * Otherwise, we set the "interrupted" flag.
449 *
450 * Checks to make sure that "nsec" is in the range 0-999999
451 * (i.e. fractions of a millisecond) and throws the appropriate
452 * exception if it isn't.
453 *
454 * The spec allows "spurious wakeups", and recommends that all code using
455 * Object.wait() do so in a loop. This appears to derive from concerns
456 * about pthread_cond_wait() on multiprocessor systems. Some commentary
457 * on the web casts doubt on whether these can/should occur.
458 *
459 * Since we're allowed to wake up "early", we clamp extremely long durations
460 * to return at the end of the 32-bit time epoch.
461 */
462static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
463 bool interruptShouldThrow)
464{
465 struct timespec ts;
466 bool wasInterrupted = false;
467 bool timed;
468 int cc;
469
470 /* Make sure that the lock is fat and that we hold it. */
471 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
472 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
473 "object not locked by thread before wait()");
474 return;
475 }
476
477 /*
478 * Enforce the timeout range.
479 */
480 if (msec < 0 || nsec < 0 || nsec > 999999) {
481 dvmThrowException("Ljava/lang/IllegalArgumentException;",
482 "timeout arguments out of range");
483 return;
484 }
485
486 /*
487 * Compute absolute wakeup time, if necessary.
488 */
489 if (msec == 0 && nsec == 0) {
490 timed = false;
491 } else {
492 s8 endSec;
493
494#ifdef HAVE_TIMEDWAIT_MONOTONIC
495 struct timespec now;
496 clock_gettime(CLOCK_MONOTONIC, &now);
497 endSec = now.tv_sec + msec / 1000;
498 if (endSec >= 0x7fffffff) {
499 LOGV("NOTE: end time exceeds epoch\n");
500 endSec = 0x7ffffffe;
501 }
502 ts.tv_sec = endSec;
503 ts.tv_nsec = (now.tv_nsec + (msec % 1000) * 1000 * 1000) + nsec;
504#else
505 struct timeval now;
506 gettimeofday(&now, NULL);
507 endSec = now.tv_sec + msec / 1000;
508 if (endSec >= 0x7fffffff) {
509 LOGV("NOTE: end time exceeds epoch\n");
510 endSec = 0x7ffffffe;
511 }
512 ts.tv_sec = endSec;
513 ts.tv_nsec = (now.tv_usec + (msec % 1000) * 1000) * 1000 + nsec;
514#endif
515
516 /* catch rollover */
517 if (ts.tv_nsec >= 1000000000L) {
518 ts.tv_sec++;
519 ts.tv_nsec -= 1000000000L;
520 }
521 timed = true;
522 }
523
524 /*
525 * Make sure "notifying" wasn't screwed up by earlier activity. If this
526 * is wrong we could end up waking up too many people. (This is a rare
527 * situation, but we need to handle it correctly.)
528 */
529 if (mon->notifying + mon->interrupting > mon->waiting) {
The Android Open Source Project5d709782009-02-10 15:43:57 -0800530 LOGD("threadid=%d: bogus mon %d+%d>%d; adjusting\n",
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -0700531 self->threadId, mon->notifying, mon->interrupting,
532 mon->waiting);
533
534 assert(mon->waiting >= mon->interrupting);
535 mon->notifying = mon->waiting - mon->interrupting;
536 }
537
538 /*
539 * Add ourselves to the set of threads waiting on this monitor, and
540 * release our hold. We need to let it go even if we're a few levels
541 * deep in a recursive lock, and we need to restore that later.
542 *
543 * The order of operations here isn't significant, because we still
544 * hold the pthread mutex.
545 */
546 int prevLockCount;
547
548 prevLockCount = mon->lockCount;
549 mon->lockCount = 0;
550 mon->waiting++;
551 mon->owner = NULL;
552
553 /*
554 * Update thread status. If the GC wakes up, it'll ignore us, knowing
555 * that we won't touch any references in this state, and we'll check
556 * our suspend mode before we transition out.
557 */
558 if (timed)
559 dvmChangeStatus(self, THREAD_TIMED_WAIT);
560 else
561 dvmChangeStatus(self, THREAD_WAIT);
562
563 /*
564 * Tell the thread which monitor we're waiting on. This is necessary
565 * so that Thread.interrupt() can wake us up. Thread.interrupt needs
566 * to gain ownership of the monitor mutex before it can signal us, so
567 * we're still not worried about race conditions.
568 */
569 self->waitMonitor = mon;
570
571 /*
572 * Handle the case where the thread was interrupted before we called
573 * wait().
574 */
575 if (self->interrupted) {
576 wasInterrupted = true;
577 goto done;
578 }
579
580 LOGVV("threadid=%d: waiting on %p\n", self->threadId, mon);
581
582 while (true) {
583 if (!timed) {
584 cc = pthread_cond_wait(&mon->cond, &mon->lock);
585 assert(cc == 0);
586 } else {
587#ifdef HAVE_TIMEDWAIT_MONOTONIC
588 cc = pthread_cond_timedwait_monotonic(&mon->cond, &mon->lock, &ts);
589#else
590 cc = pthread_cond_timedwait(&mon->cond, &mon->lock, &ts);
591#endif
592 if (cc == ETIMEDOUT) {
593 LOGVV("threadid=%d wakeup: timeout\n", self->threadId);
594 break;
595 }
596 }
597
598 /*
599 * We woke up because of an interrupt (which does a broadcast) or
600 * a notification (which might be a signal or a broadcast). Figure
601 * out what we need to do.
602 */
603 if (self->interruptingWait) {
604 /*
605 * The other thread successfully gained the monitor lock, and
606 * has confirmed that we were waiting on it. If this is an
607 * interruptible wait, we bail out immediately. If not, we
608 * continue on.
609 */
610 self->interruptingWait = false;
611 mon->interrupting--;
612 assert(self->interrupted);
613 if (interruptShouldThrow) {
614 wasInterrupted = true;
615 LOGD("threadid=%d wakeup: interrupted\n", self->threadId);
616 break;
617 } else {
618 LOGD("threadid=%d wakeup: not interruptible\n", self->threadId);
619 }
620 }
621 if (mon->notifying) {
622 /*
623 * One or more threads are being notified. Remove ourselves
624 * from the set.
625 */
626 mon->notifying--;
627 LOGVV("threadid=%d wakeup: notified\n", self->threadId);
628 break;
629 } else {
630 /*
631 * Looks like we were woken unnecessarily, probably as a
632 * result of another thread being interrupted. Go back to
633 * sleep.
634 */
635 LOGVV("threadid=%d wakeup: going back to sleep\n", self->threadId);
636 }
637 }
638
639done:
640 //if (wasInterrupted) {
641 // LOGW("threadid=%d: throwing InterruptedException:\n", self->threadId);
642 // dvmDumpThread(self, false);
643 //}
644
645 /*
646 * Put everything back. Again, we hold the pthread mutex, so the order
647 * here isn't significant.
648 */
649 self->waitMonitor = NULL;
650 mon->owner = self;
651 mon->waiting--;
652 mon->lockCount = prevLockCount;
653
654 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
655 dvmChangeStatus(self, THREAD_RUNNING);
656
657 if (wasInterrupted) {
658 /*
659 * We were interrupted while waiting, or somebody interrupted an
660 * un-interruptable thread earlier and we're bailing out immediately.
661 *
662 * The doc sayeth: "The interrupted status of the current thread is
663 * cleared when this exception is thrown."
664 */
665 self->interrupted = false;
666 if (interruptShouldThrow)
667 dvmThrowException("Ljava/lang/InterruptedException;", NULL);
668 }
669}
670
671/*
672 * Notify one thread waiting on this monitor.
673 */
674static void notifyMonitor(Thread* self, Monitor* mon)
675{
676 /* Make sure that the lock is fat and that we hold it. */
677 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
678 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
679 "object not locked by thread before notify()");
680 return;
681 }
682
683 /*
684 * Check to see if anybody is there to notify. We subtract off
685 * threads that are being interrupted and anything that has
686 * potentially already been notified.
687 */
688 if (mon->notifying + mon->interrupting < mon->waiting) {
689 /* wake up one thread */
690 int cc;
691
692 LOGVV("threadid=%d: signaling on %p\n", self->threadId, mon);
693
694 mon->notifying++;
695 cc = pthread_cond_signal(&mon->cond);
696 assert(cc == 0);
697 } else {
698 LOGVV("threadid=%d: nobody to signal on %p\n", self->threadId, mon);
699 }
700}
701
702/*
703 * Notify all threads waiting on this monitor.
704 *
705 * We keep a count of how many threads we notified, so that our various
706 * counts remain accurate.
707 */
708static void notifyAllMonitor(Thread* self, Monitor* mon)
709{
710 /* Make sure that the lock is fat and that we hold it. */
711 if (mon == NULL || ((u4)mon & 1) != 0 || mon->owner != self) {
712 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
713 "object not locked by thread before notifyAll()");
714 return;
715 }
716
717 mon->notifying = mon->waiting - mon->interrupting;
718 if (mon->notifying > 0) {
719 int cc;
720
721 LOGVV("threadid=%d: broadcasting to %d threads on %p\n",
722 self->threadId, mon->notifying, mon);
723
724 cc = pthread_cond_broadcast(&mon->cond);
725 assert(cc == 0);
726 } else {
727 LOGVV("threadid=%d: nobody to broadcast to on %p\n", self->threadId,mon);
728 }
729}
730
731#if THIN_LOCKING
732/*
733 * Thin locking support
734 */
735
736/*
737 * Implements monitorenter for "synchronized" stuff.
738 *
739 * This does not fail or throw an exception (unless deadlock prediction
740 * is enabled and set to "err" mode).
741 */
742void dvmLockObject(Thread* self, Object *obj)
743{
744 volatile u4 *thinp = &obj->lock.thin;
745 u4 threadId = self->threadId;
746
747 /* First, try to grab the lock as if it's thin;
748 * this is the common case and will usually succeed.
749 */
750 if (!ATOMIC_CMP_SWAP((int32_t *)thinp,
751 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
752 (int32_t)threadId)) {
753 /* The lock is either a thin lock held by someone (possibly 'self'),
754 * or a fat lock.
755 */
756 if ((*thinp & 0xffff) == threadId) {
757 /* 'self' is already holding the thin lock; we can just
758 * bump the count. Atomic operations are not necessary
759 * because only the thread holding the lock is allowed
760 * to modify the Lock field.
761 */
762 *thinp += 1<<16;
763 } else {
764 /* If this is a thin lock we need to spin on it, if it's fat
765 * we need to acquire the monitor.
766 */
767 if ((*thinp & 1) != 0) {
768 ThreadStatus oldStatus;
769 static const unsigned long maxSleepDelay = 1 * 1024 * 1024;
770 unsigned long sleepDelay;
771
772 LOG_THIN("(%d) spin on lock 0x%08x: 0x%08x (0x%08x) 0x%08x\n",
773 threadId, (uint)&obj->lock,
774 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
775
776 /* The lock is still thin, but some other thread is
777 * holding it. Let the VM know that we're about
778 * to wait on another thread.
779 */
780 oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
781
782 /* Spin until the other thread lets go.
783 */
784 sleepDelay = 0;
785 do {
786 /* In addition to looking for an unlock,
787 * we need to watch out for some other thread
788 * fattening the lock behind our back.
789 */
790 while (*thinp != DVM_LOCK_INITIAL_THIN_VALUE) {
791 if ((*thinp & 1) == 0) {
792 /* The lock has been fattened already.
793 */
794 LOG_THIN("(%d) lock 0x%08x surprise-fattened\n",
795 threadId, (uint)&obj->lock);
796 dvmChangeStatus(self, oldStatus);
797 goto fat_lock;
798 }
799
800 if (sleepDelay == 0) {
801 sched_yield();
802 sleepDelay = 1 * 1000;
803 } else {
804 usleep(sleepDelay);
805 if (sleepDelay < maxSleepDelay / 2) {
806 sleepDelay *= 2;
807 }
808 }
809 }
810 } while (!ATOMIC_CMP_SWAP((int32_t *)thinp,
811 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
812 (int32_t)threadId));
813 LOG_THIN("(%d) spin on lock done 0x%08x: "
814 "0x%08x (0x%08x) 0x%08x\n",
815 threadId, (uint)&obj->lock,
816 DVM_LOCK_INITIAL_THIN_VALUE, *thinp, threadId);
817
818 /* We've got the thin lock; let the VM know that we're
819 * done waiting.
820 */
821 dvmChangeStatus(self, oldStatus);
822
823 /* Fatten the lock. Note this relinquishes ownership.
824 * We could also create the monitor in an "owned" state
825 * to avoid "re-locking" it in fat_lock.
826 */
827 obj->lock.mon = dvmCreateMonitor(obj);
828 LOG_THIN("(%d) lock 0x%08x fattened\n",
829 threadId, (uint)&obj->lock);
830
831 /* Fall through to acquire the newly fat lock.
832 */
833 }
834
835 /* The lock is already fat, which means
836 * that obj->lock.mon is a regular (Monitor *).
837 */
838 fat_lock:
839 assert(obj->lock.mon != NULL);
840 lockMonitor(self, obj->lock.mon);
841 }
842 }
843 // else, the lock was acquired with the ATOMIC_CMP_SWAP().
844
845#ifdef WITH_DEADLOCK_PREDICTION
846 /*
847 * See if we were allowed to grab the lock at this time. We do it
848 * *after* acquiring the lock, rather than before, so that we can
849 * freely update the Monitor struct. This seems counter-intuitive,
850 * but our goal is deadlock *prediction* not deadlock *prevention*.
851 * (If we actually deadlock, the situation is easy to diagnose from
852 * a thread dump, so there's no point making a special effort to do
853 * the checks before the lock is held.)
854 *
855 * This needs to happen before we add the object to the thread's
856 * monitor list, so we can tell the difference between first-lock and
857 * re-lock.
858 *
859 * It's also important that we do this while in THREAD_RUNNING, so
860 * that we don't interfere with cleanup operations in the GC.
861 */
862 if (gDvm.deadlockPredictMode != kDPOff) {
863 if (self->status != THREAD_RUNNING) {
864 LOGE("Bad thread status (%d) in DP\n", self->status);
865 dvmDumpThread(self, false);
866 dvmAbort();
867 }
868 assert(!dvmCheckException(self));
869 updateDeadlockPrediction(self, obj);
870 if (dvmCheckException(self)) {
871 /*
872 * If we're throwing an exception here, we need to free the
873 * lock. We add the object to the thread's monitor list so the
874 * "unlock" code can remove it.
875 */
876 dvmAddToMonitorList(self, obj, false);
877 dvmUnlockObject(self, obj);
878 LOGV("--- unlocked, pending is '%s'\n",
879 dvmGetException(self)->clazz->descriptor);
880 }
881 }
882
883 /*
884 * Add the locked object, and the current stack trace, to the list
885 * held by the Thread object. If deadlock prediction isn't on,
886 * don't capture the stack trace.
887 */
888 dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
889#elif defined(WITH_MONITOR_TRACKING)
890 /*
891 * Add the locked object to the list held by the Thread object.
892 */
893 dvmAddToMonitorList(self, obj, false);
894#endif
895}
896
897/*
898 * Implements monitorexit for "synchronized" stuff.
899 *
900 * On failure, throws an exception and returns "false".
901 */
902bool dvmUnlockObject(Thread* self, Object *obj)
903{
904 volatile u4 *thinp = &obj->lock.thin;
905 u4 threadId = self->threadId;
906
907 /* Check the common case, where 'self' has locked 'obj' once, first.
908 */
909 if (*thinp == threadId) {
910 /* Unlock 'obj' by clearing our threadId from 'thin'.
911 * The lock protects the lock field itself, so it's
912 * safe to update non-atomically.
913 */
914 *thinp = DVM_LOCK_INITIAL_THIN_VALUE;
915 } else if ((*thinp & 1) != 0) {
916 /* If the object is locked, it had better be locked by us.
917 */
918 if ((*thinp & 0xffff) != threadId) {
919 /* The JNI spec says that we should throw an exception
920 * in this case.
921 */
922 //LOGW("Unlock thin %p: id %d vs %d\n",
923 // obj, (*thinp & 0xfff), threadId);
924 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
925 "unlock of unowned monitor");
926 return false;
927 }
928
929 /* It's a thin lock, but 'self' has locked 'obj'
930 * more than once. Decrement the count.
931 */
932 *thinp -= 1<<16;
933 } else {
934 /* It's a fat lock.
935 */
936 assert(obj->lock.mon != NULL);
937 if (!unlockMonitor(self, obj->lock.mon)) {
938 /* exception has been raised */
939 return false;
940 }
941 }
942
943#ifdef WITH_MONITOR_TRACKING
944 /*
945 * Remove the object from the Thread's list.
946 */
947 dvmRemoveFromMonitorList(self, obj);
948#endif
949
950 return true;
951}
952
953/*
954 * Object.wait(). Also called for class init.
955 */
956void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
957 bool interruptShouldThrow)
958{
959 Monitor* mon = obj->lock.mon;
960 u4 thin = obj->lock.thin;
961
962 /* If the lock is still thin, we need to fatten it.
963 */
964 if ((thin & 1) != 0) {
965 /* Make sure that 'self' holds the lock.
966 */
967 if ((thin & 0xffff) != self->threadId) {
968 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
969 "object not locked by thread before wait()");
970 return;
971 }
972
973 /* This thread holds the lock. We need to fatten the lock
974 * so 'self' can block on it. Don't update the object lock
975 * field yet, because 'self' needs to acquire the lock before
976 * any other thread gets a chance.
977 */
978 mon = dvmCreateMonitor(obj);
979
980 /* 'self' has actually locked the object one or more times;
981 * make sure that the monitor reflects this.
982 */
983 lockMonitor(self, mon);
984 mon->lockCount = thin >> 16;
985 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
986 self->threadId, (uint)&obj->lock, mon->lockCount);
987
988 /* Make the monitor public now that it's in the right state.
989 */
990 obj->lock.mon = mon;
991 }
992
993 waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
994}
995
996/*
997 * Object.notify().
998 */
999void dvmObjectNotify(Thread* self, Object *obj)
1000{
1001 Monitor* mon = obj->lock.mon;
1002 u4 thin = obj->lock.thin;
1003
1004 /* If the lock is still thin, there aren't any waiters;
1005 * waiting on an object forces lock fattening.
1006 */
1007 if ((thin & 1) != 0) {
1008 /* Make sure that 'self' holds the lock.
1009 */
1010 if ((thin & 0xffff) != self->threadId) {
1011 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1012 "object not locked by thread before notify()");
1013 return;
1014 }
1015
1016 /* no-op; there are no waiters to notify.
1017 */
1018 } else {
1019 /* It's a fat lock.
1020 */
1021 notifyMonitor(self, mon);
1022 }
1023}
1024
1025/*
1026 * Object.notifyAll().
1027 */
1028void dvmObjectNotifyAll(Thread* self, Object *obj)
1029{
1030 u4 thin = obj->lock.thin;
1031
1032 /* If the lock is still thin, there aren't any waiters;
1033 * waiting on an object forces lock fattening.
1034 */
1035 if ((thin & 1) != 0) {
1036 /* Make sure that 'self' holds the lock.
1037 */
1038 if ((thin & 0xffff) != self->threadId) {
1039 dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
1040 "object not locked by thread before notifyAll()");
1041 return;
1042 }
1043
1044 /* no-op; there are no waiters to notify.
1045 */
1046 } else {
1047 Monitor* mon = obj->lock.mon;
1048
1049 /* It's a fat lock.
1050 */
1051 notifyAllMonitor(self, mon);
1052 }
1053}
1054
1055#else // not THIN_LOCKING
1056
1057/*
1058 * Implements monitorenter for "synchronized" stuff.
1059 *
1060 * This does not fail or throw an exception.
1061 */
1062void dvmLockObject(Thread* self, Object* obj)
1063{
1064 Monitor* mon = obj->lock.mon;
1065
1066 if (mon == NULL) {
1067 mon = dvmCreateMonitor(obj);
1068 if (!ATOMIC_CMP_SWAP((int32_t *)&obj->lock.mon,
1069 (int32_t)NULL, (int32_t)mon)) {
1070 /* somebody else beat us to it */
1071 releaseMonitor(mon);
1072 mon = obj->lock.mon;
1073 }
1074 }
1075
1076 lockMonitor(self, mon);
1077}
1078
1079/*
1080 * Implements monitorexit for "synchronized" stuff.
1081 */
1082bool dvmUnlockObject(Thread* self, Object* obj)
1083{
1084 Monitor* mon = obj->lock.mon;
1085
1086 return unlockMonitor(self, mon);
1087}
1088
1089
1090/*
1091 * Object.wait().
1092 */
1093void dvmObjectWait(Thread* self, Object* obj, u8 msec, u4 nsec)
1094{
1095 Monitor* mon = obj->lock.mon;
1096
1097 waitMonitor(self, mon, msec, nsec);
1098}
1099
1100/*
1101 * Object.notify().
1102 */
1103void dvmObjectNotify(Thread* self, Object* obj)
1104{
1105 Monitor* mon = obj->lock.mon;
1106
1107 notifyMonitor(self, mon);
1108}
1109
1110/*
1111 * Object.notifyAll().
1112 */
1113void dvmObjectNotifyAll(Thread* self, Object* obj)
1114{
1115 Monitor* mon = obj->lock.mon;
1116
1117 notifyAllMonitor(self, mon);
1118}
1119
1120#endif // not THIN_LOCKING
1121
1122
1123/*
1124 * This implements java.lang.Thread.sleep(long msec, int nsec).
1125 *
1126 * The sleep is interruptible by other threads, which means we can't just
1127 * plop into an OS sleep call. (We probably could if we wanted to send
1128 * signals around and rely on EINTR, but that's inefficient and relies
1129 * on native code respecting our signal mask.)
1130 *
1131 * We have to do all of this stuff for Object.wait() as well, so it's
1132 * easiest to just sleep on a private Monitor.
1133 *
1134 * It appears that we want sleep(0,0) to go through the motions of sleeping
1135 * for a very short duration, rather than just returning.
1136 */
1137void dvmThreadSleep(u8 msec, u4 nsec)
1138{
1139 Thread* self = dvmThreadSelf();
1140 Monitor* mon = gDvm.threadSleepMon;
1141
1142 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
1143 if (msec == 0 && nsec == 0)
1144 nsec++;
1145
1146 lockMonitor(self, mon);
1147 waitMonitor(self, mon, msec, nsec, true);
1148 unlockMonitor(self, mon);
1149}
1150
1151/*
1152 * Implement java.lang.Thread.interrupt().
1153 *
1154 * We need to increment the monitor's "interrupting" count, and set the
1155 * interrupted status for the thread in question. Doing so requires
1156 * gaining the monitor's lock, which may not happen in a timely fashion.
1157 * We are left with a decision between failing to interrupt the thread
1158 * and stalling the interrupting thread.
1159 *
1160 * We must take some care to ensure that we don't try to interrupt the same
1161 * thread on the same mutex twice. Doing so would leave us with an
1162 * incorrect value for Monitor.interrupting.
1163 */
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001164void dvmThreadInterrupt(volatile Thread* thread)
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001165{
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001166 Monitor* mon;
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001167
1168 /*
1169 * Raise the "interrupted" flag. This will cause it to bail early out
1170 * of the next wait() attempt, if it's not currently waiting on
1171 * something.
1172 */
1173 thread->interrupted = true;
1174 MEM_BARRIER();
1175
1176 /*
1177 * Is the thread waiting?
1178 *
1179 * Note that fat vs. thin doesn't matter here; waitMonitor
1180 * is only set when a thread actually waits on a monitor,
1181 * which implies that the monitor has already been fattened.
1182 */
1183 mon = thread->waitMonitor;
1184 if (mon == NULL)
1185 return;
1186
1187 /*
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001188 * Try to acquire the monitor, if we don't already own it. We need
1189 * to hold the same mutex as the thread in order to signal the
1190 * condition it's waiting on. When the thread goes to sleep it will
1191 * release the monitor's mutex, allowing us to signal it.
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001192 *
1193 * TODO: we may be able to get rid of the explicit lock by coordinating
1194 * this more closely with waitMonitor.
1195 */
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001196 Thread* self = dvmThreadSelf();
1197 if (!tryLockMonitor(self, mon)) {
1198 /*
1199 * Failed to get the monitor the thread is waiting on; most likely
1200 * the other thread is in the middle of doing something.
1201 */
1202 const int kSpinSleepTime = 500*1000; /* 0.5s */
1203 u8 startWhen = dvmGetRelativeTimeUsec();
1204 int sleepIter = 0;
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001205
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001206 while (dvmIterativeSleep(sleepIter++, kSpinSleepTime, startWhen)) {
1207 /*
1208 * Still time left on the clock, try to grab it again.
1209 */
1210 if (tryLockMonitor(self, mon))
1211 goto gotit;
1212
1213 /*
1214 * If the target thread is no longer waiting on the same monitor,
1215 * the "interrupted" flag we set earlier will have caused the
1216 * interrupt when the thread woke up, so we can stop now.
1217 */
1218 if (thread->waitMonitor != mon)
1219 return;
1220 }
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001221
1222 /*
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001223 * We have to give up or risk deadlock.
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001224 */
The Android Open Source Project89c1feb2008-12-17 18:03:55 -08001225 LOGW("threadid=%d: unable to interrupt threadid=%d\n",
1226 self->threadId, thread->threadId);
1227 return;
1228 }
The Android Open Source Project2ad60cf2008-10-21 07:00:00 -07001229
1230gotit:
1231 /*
1232 * We've got the monitor lock, which means nobody can be added or
1233 * removed from the wait list. This also means that the Thread's
1234 * waitMonitor/interruptingWait fields can't be modified by anyone
1235 * else.
1236 *
1237 * If things look good, raise flags and wake the threads sleeping
1238 * on the monitor's condition variable.
1239 */
1240 if (thread->waitMonitor == mon && // still on same monitor?
1241 thread->interrupted && // interrupt still pending?
1242 !thread->interruptingWait) // nobody else is interrupting too?
1243 {
1244 int cc;
1245
1246 LOGVV("threadid=%d: interrupting threadid=%d waiting on %p\n",
1247 self->threadId, thread->threadId, mon);
1248
1249 thread->interruptingWait = true; // prevent re-interrupt...
1250 mon->interrupting++; // ...so we only do this once
1251 cc = pthread_cond_broadcast(&mon->cond);
1252 assert(cc == 0);
1253 }
1254
1255 unlockMonitor(self, mon);
1256}
1257
1258
1259#ifdef WITH_DEADLOCK_PREDICTION
1260/*
1261 * ===========================================================================
1262 * Deadlock prediction
1263 * ===========================================================================
1264 */
1265/*
1266The idea is to predict the possibility of deadlock by recording the order
1267in which monitors are acquired. If we see an attempt to acquire a lock
1268out of order, we can identify the locks and offending code.
1269
1270To make this work, we need to keep track of the locks held by each thread,
1271and create history trees for each lock. When a thread tries to acquire
1272a new lock, we walk through the "history children" of the lock, looking
1273for a match with locks the thread already holds. If we find a match,
1274it means the thread has made a request that could result in a deadlock.
1275
1276To support recursive locks, we always allow re-locking a currently-held
1277lock, and maintain a recursion depth count.
1278
1279An ASCII-art example, where letters represent Objects:
1280
1281 A
1282 /|\
1283 / | \
1284 B | D
1285 \ |
1286 \|
1287 C
1288
1289The above is the tree we'd have after handling Object synchronization
1290sequences "ABC", "AC", "AD". A has three children, {B, C, D}. C is also
1291a child of B. (The lines represent pointers between parent and child.
1292Every node can have multiple parents and multiple children.)
1293
1294If we hold AC, and want to lock B, we recursively search through B's
1295children to see if A or C appears. It does, so we reject the attempt.
1296(A straightforward way to implement it: add a link from C to B, then
1297determine whether the graph starting at B contains a cycle.)
1298
1299If we hold AC and want to lock D, we would succeed, creating a new link
1300from C to D.
1301
1302The lock history and a stack trace is attached to the Object's Monitor
1303struct, which means we need to fatten every Object we lock (thin locking
1304is effectively disabled). If we don't need the stack trace we can
1305avoid fattening the leaf nodes, only fattening objects that need to hold
1306history trees.
1307
1308Updates to Monitor structs are only allowed for the thread that holds
1309the Monitor, so we actually do most of our deadlock prediction work after
1310the lock has been acquired.
1311
1312When an object with a monitor is GCed, we need to remove it from the
1313history trees. There are two basic approaches:
1314 (1) For through the entire set of known monitors, search all child
1315 lists for the object in question. This is rather slow, resulting
1316 in GC passes that take upwards of 10 seconds to complete.
1317 (2) Maintain "parent" pointers in each node. Remove the entries as
1318 required. This requires additional storage and maintenance for
1319 every operation, but is significantly faster at GC time.
1320For each GCed object, we merge all of the object's children into each of
1321the object's parents.
1322*/
1323
1324#if !defined(WITH_MONITOR_TRACKING)
1325# error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
1326#endif
1327
1328/*
1329 * Clear out the contents of an ExpandingObjectList, freeing any
1330 * dynamic allocations.
1331 */
1332static void expandObjClear(ExpandingObjectList* pList)
1333{
1334 if (pList->list != NULL) {
1335 free(pList->list);
1336 pList->list = NULL;
1337 }
1338 pList->alloc = pList->count = 0;
1339}
1340
1341/*
1342 * Get the number of objects currently stored in the list.
1343 */
1344static inline int expandBufGetCount(const ExpandingObjectList* pList)
1345{
1346 return pList->count;
1347}
1348
1349/*
1350 * Get the Nth entry from the list.
1351 */
1352static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
1353 int i)
1354{
1355 return pList->list[i];
1356}
1357
1358/*
1359 * Add a new entry to the list.
1360 *
1361 * We don't check for or try to enforce uniqueness. It's expected that
1362 * the higher-level code does this for us.
1363 */
1364static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
1365{
1366 if (pList->count == pList->alloc) {
1367 /* time to expand */
1368 Object** newList;
1369
1370 if (pList->alloc == 0)
1371 pList->alloc = 4;
1372 else
1373 pList->alloc *= 2;
1374 LOGVV("expanding %p to %d\n", pList, pList->alloc);
1375 newList = realloc(pList->list, pList->alloc * sizeof(Object*));
1376 if (newList == NULL) {
1377 LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
1378 dvmAbort();
1379 }
1380 pList->list = newList;
1381 }
1382
1383 pList->list[pList->count++] = obj;
1384}
1385
1386/*
1387 * Returns "true" if the element was successfully removed.
1388 */
1389static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
1390{
1391 int i;
1392
1393 for (i = pList->count-1; i >= 0; i--) {
1394 if (pList->list[i] == obj)
1395 break;
1396 }
1397 if (i < 0)
1398 return false;
1399
1400 if (i != pList->count-1) {
1401 /*
1402 * The order of elements is not important, so we just copy the
1403 * last entry into the new slot.
1404 */
1405 //memmove(&pList->list[i], &pList->list[i+1],
1406 // (pList->count-1 - i) * sizeof(pList->list[0]));
1407 pList->list[i] = pList->list[pList->count-1];
1408 }
1409
1410 pList->count--;
1411 pList->list[pList->count] = (Object*) 0xdecadead;
1412 return true;
1413}
1414
1415/*
1416 * Returns "true" if "obj" appears in the list.
1417 */
1418static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
1419{
1420 int i;
1421
1422 for (i = 0; i < pList->count; i++) {
1423 if (pList->list[i] == obj)
1424 return true;
1425 }
1426 return false;
1427}
1428
1429/*
1430 * Print the list contents to stdout. For debugging.
1431 */
1432static void expandObjDump(const ExpandingObjectList* pList)
1433{
1434 int i;
1435 for (i = 0; i < pList->count; i++)
1436 printf(" %p", pList->list[i]);
1437}
1438
1439/*
1440 * Check for duplicate entries. Returns the index of the first instance
1441 * of the duplicated value, or -1 if no duplicates were found.
1442 */
1443static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
1444{
1445 int i, j;
1446 for (i = 0; i < pList->count-1; i++) {
1447 for (j = i + 1; j < pList->count; j++) {
1448 if (pList->list[i] == pList->list[j]) {
1449 return i;
1450 }
1451 }
1452 }
1453
1454 return -1;
1455}
1456
1457
1458/*
1459 * Determine whether "child" appears in the list of objects associated
1460 * with the Monitor in "parent". If "parent" is a thin lock, we return
1461 * false immediately.
1462 */
1463static bool objectInChildList(const Object* parent, Object* child)
1464{
1465 Lock lock = parent->lock;
1466 if (!IS_LOCK_FAT(&lock)) {
1467 //LOGI("on thin\n");
1468 return false;
1469 }
1470
1471 return expandObjHas(&lock.mon->historyChildren, child);
1472}
1473
1474/*
1475 * Print the child list.
1476 */
1477static void dumpKids(Object* parent)
1478{
1479 Monitor* mon = parent->lock.mon;
1480
1481 printf("Children of %p:", parent);
1482 expandObjDump(&mon->historyChildren);
1483 printf("\n");
1484}
1485
1486/*
1487 * Add "child" to the list of children in "parent", and add "parent" to
1488 * the list of parents in "child".
1489 */
1490static void linkParentToChild(Object* parent, Object* child)
1491{
1492 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for merge
1493 assert(IS_LOCK_FAT(&parent->lock));
1494 assert(IS_LOCK_FAT(&child->lock));
1495 assert(parent != child);
1496 Monitor* mon;
1497
1498 mon = parent->lock.mon;
1499 assert(!expandObjHas(&mon->historyChildren, child));
1500 expandObjAddEntry(&mon->historyChildren, child);
1501
1502 mon = child->lock.mon;
1503 assert(!expandObjHas(&mon->historyParents, parent));
1504 expandObjAddEntry(&mon->historyParents, parent);
1505}
1506
1507
1508/*
1509 * Remove "child" from the list of children in "parent".
1510 */
1511static void unlinkParentFromChild(Object* parent, Object* child)
1512{
1513 //assert(parent->lock.mon->owner == dvmThreadSelf()); // !owned for GC
1514 assert(IS_LOCK_FAT(&parent->lock));
1515 assert(IS_LOCK_FAT(&child->lock));
1516 assert(parent != child);
1517 Monitor* mon;
1518
1519 mon = parent->lock.mon;
1520 if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
1521 LOGW("WARNING: child %p not found in parent %p\n", child, parent);
1522 }
1523 assert(!expandObjHas(&mon->historyChildren, child));
1524 assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
1525
1526 mon = child->lock.mon;
1527 if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
1528 LOGW("WARNING: parent %p not found in child %p\n", parent, child);
1529 }
1530 assert(!expandObjHas(&mon->historyParents, parent));
1531 assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
1532}
1533
1534
1535/*
1536 * Log the monitors held by the current thread. This is done as part of
1537 * flagging an error.
1538 */
1539static void logHeldMonitors(Thread* self)
1540{
1541 char* name = NULL;
1542
1543 name = dvmGetThreadName(self);
1544 LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
1545 self->threadId, name);
1546 LOGW("(most-recently-acquired on top):\n");
1547 free(name);
1548
1549 LockedObjectData* lod = self->pLockedObjects;
1550 while (lod != NULL) {
1551 LOGW("--- object %p[%d] (%s)\n",
1552 lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
1553 dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
1554
1555 lod = lod->next;
1556 }
1557}
1558
1559/*
1560 * Recursively traverse the object hierarchy starting at "obj". We mark
1561 * ourselves on entry and clear the mark on exit. If we ever encounter
1562 * a marked object, we have a cycle.
1563 *
1564 * Returns "true" if all is well, "false" if we found a cycle.
1565 */
1566static bool traverseTree(Thread* self, const Object* obj)
1567{
1568 assert(IS_LOCK_FAT(&obj->lock));
1569 Monitor* mon = obj->lock.mon;
1570
1571 /*
1572 * Have we been here before?
1573 */
1574 if (mon->historyMark) {
1575 int* rawStackTrace;
1576 int stackDepth;
1577
1578 LOGW("%s\n", kStartBanner);
1579 LOGW("Illegal lock attempt:\n");
1580 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1581
1582 rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
1583 dvmLogRawStackTrace(rawStackTrace, stackDepth);
1584 free(rawStackTrace);
1585
1586 LOGW(" ");
1587 logHeldMonitors(self);
1588
1589 LOGW(" ");
1590 LOGW("Earlier, the following lock order (from last to first) was\n");
1591 LOGW("established -- stack trace is from first successful lock):\n");
1592 return false;
1593 }
1594 mon->historyMark = true;
1595
1596 /*
1597 * Examine the children. We do NOT hold these locks, so they might
1598 * very well transition from thin to fat or change ownership while
1599 * we work.
1600 *
1601 * NOTE: we rely on the fact that they cannot revert from fat to thin
1602 * while we work. This is currently a safe assumption.
1603 *
1604 * We can safely ignore thin-locked children, because by definition
1605 * they have no history and are leaf nodes. In the current
1606 * implementation we always fatten the locks to provide a place to
1607 * hang the stack trace.
1608 */
1609 ExpandingObjectList* pList = &mon->historyChildren;
1610 int i;
1611 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1612 const Object* child = expandBufGetEntry(pList, i);
1613 Lock lock = child->lock;
1614 if (!IS_LOCK_FAT(&lock))
1615 continue;
1616 if (!traverseTree(self, child)) {
1617 LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
1618 dvmLogRawStackTrace(mon->historyRawStackTrace,
1619 mon->historyStackDepth);
1620 mon->historyMark = false;
1621 return false;
1622 }
1623 }
1624
1625 mon->historyMark = false;
1626
1627 return true;
1628}
1629
1630/*
1631 * Update the deadlock prediction tree, based on the current thread
1632 * acquiring "acqObj". This must be called before the object is added to
1633 * the thread's list of held monitors.
1634 *
1635 * If the thread already holds the lock (recursion), or this is a known
1636 * lock configuration, we return without doing anything. Otherwise, we add
1637 * a link from the most-recently-acquired lock in this thread to "acqObj"
1638 * after ensuring that the parent lock is "fat".
1639 *
1640 * This MUST NOT be called while a GC is in progress in another thread,
1641 * because we assume exclusive access to history trees in owned monitors.
1642 */
1643static void updateDeadlockPrediction(Thread* self, Object* acqObj)
1644{
1645 LockedObjectData* lod;
1646 LockedObjectData* mrl;
1647
1648 /*
1649 * Quick check for recursive access.
1650 */
1651 lod = dvmFindInMonitorList(self, acqObj);
1652 if (lod != NULL) {
1653 LOGV("+++ DP: recursive %p\n", acqObj);
1654 return;
1655 }
1656
1657 /*
1658 * Make the newly-acquired object's monitor "fat". In some ways this
1659 * isn't strictly necessary, but we need the GC to tell us when
1660 * "interesting" objects go away, and right now the only way to make
1661 * an object look interesting is to give it a monitor.
1662 *
1663 * This also gives us a place to hang a stack trace.
1664 *
1665 * Our thread holds the lock, so we're allowed to rewrite the lock
1666 * without worrying that something will change out from under us.
1667 */
1668 if (!IS_LOCK_FAT(&acqObj->lock)) {
1669 LOGVV("fattening lockee %p (recur=%d)\n",
1670 acqObj, acqObj->lock.thin >> 16);
1671 Monitor* newMon = dvmCreateMonitor(acqObj);
1672 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1673 newMon->lockCount += acqObj->lock.thin >> 16;
1674 acqObj->lock.mon = newMon;
1675 }
1676
1677 /* if we don't have a stack trace for this monitor, establish one */
1678 if (acqObj->lock.mon->historyRawStackTrace == NULL) {
1679 Monitor* mon = acqObj->lock.mon;
1680 mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
1681 &mon->historyStackDepth);
1682 }
1683
1684 /*
1685 * We need to examine and perhaps modify the most-recently-locked
1686 * monitor. We own that, so there's no risk of another thread
1687 * stepping on us.
1688 *
1689 * Retrieve the most-recently-locked entry from our thread.
1690 */
1691 mrl = self->pLockedObjects;
1692 if (mrl == NULL)
1693 return; /* no other locks held */
1694
1695 /*
1696 * Do a quick check to see if "acqObj" is a direct descendant. We can do
1697 * this without holding the global lock because of our assertion that
1698 * a GC is not running in parallel -- nobody except the GC can
1699 * modify a history list in a Monitor they don't own, and we own "mrl".
1700 * (There might be concurrent *reads*, but no concurrent *writes.)
1701 *
1702 * If we find it, this is a known good configuration, and we're done.
1703 */
1704 if (objectInChildList(mrl->obj, acqObj))
1705 return;
1706
1707 /*
1708 * "mrl" is going to need to have a history tree. If it's currently
1709 * a thin lock, we make it fat now. The thin lock might have a
1710 * nonzero recursive lock count, which we need to carry over.
1711 *
1712 * Our thread holds the lock, so we're allowed to rewrite the lock
1713 * without worrying that something will change out from under us.
1714 */
1715 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1716 LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
1717 mrl->obj, acqObj, mrl->obj->lock.thin >> 16);
1718 Monitor* newMon = dvmCreateMonitor(mrl->obj);
1719 lockMonitor(self, newMon); // can't stall, don't need VMWAIT
1720 newMon->lockCount += mrl->obj->lock.thin >> 16;
1721 mrl->obj->lock.mon = newMon;
1722 }
1723
1724 /*
1725 * We haven't seen this configuration before. We need to scan down
1726 * acqObj's tree to see if any of the monitors in self->pLockedObjects
1727 * appear. We grab a global lock before traversing or updating the
1728 * history list.
1729 *
1730 * If we find a match for any of our held locks, we know that the lock
1731 * has previously been acquired *after* acqObj, and we throw an error.
1732 *
1733 * The easiest way to do this is to create a link from "mrl" to "acqObj"
1734 * and do a recursive traversal, marking nodes as we cross them. If
1735 * we cross one a second time, we have a cycle and can throw an error.
1736 * (We do the flag-clearing traversal before adding the new link, so
1737 * that we're guaranteed to terminate.)
1738 *
1739 * If "acqObj" is a thin lock, it has no history, and we can create a
1740 * link to it without additional checks. [ We now guarantee that it's
1741 * always fat. ]
1742 */
1743 bool failed = false;
1744 dvmLockMutex(&gDvm.deadlockHistoryLock);
1745 linkParentToChild(mrl->obj, acqObj);
1746 if (!traverseTree(self, acqObj)) {
1747 LOGW("%s\n", kEndBanner);
1748 failed = true;
1749
1750 /* remove the entry so we're still okay when in "warning" mode */
1751 unlinkParentFromChild(mrl->obj, acqObj);
1752 }
1753 dvmUnlockMutex(&gDvm.deadlockHistoryLock);
1754
1755 if (failed) {
1756 switch (gDvm.deadlockPredictMode) {
1757 case kDPErr:
1758 dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
1759 break;
1760 case kDPAbort:
1761 LOGE("Aborting due to potential deadlock\n");
1762 dvmAbort();
1763 break;
1764 default:
1765 /* warn only */
1766 break;
1767 }
1768 }
1769}
1770
1771/*
1772 * We're removing "child" from existence. We want to pull all of
1773 * child's children into "parent", filtering out duplicates. This is
1774 * called during the GC.
1775 *
1776 * This does not modify "child", which might have multiple parents.
1777 */
1778static void mergeChildren(Object* parent, const Object* child)
1779{
1780 Monitor* mon;
1781 int i;
1782
1783 assert(IS_LOCK_FAT(&child->lock));
1784 mon = child->lock.mon;
1785 ExpandingObjectList* pList = &mon->historyChildren;
1786
1787 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1788 Object* grandChild = expandBufGetEntry(pList, i);
1789
1790 if (!objectInChildList(parent, grandChild)) {
1791 LOGVV("+++ migrating %p link to %p\n", grandChild, parent);
1792 linkParentToChild(parent, grandChild);
1793 } else {
1794 LOGVV("+++ parent %p already links to %p\n", parent, grandChild);
1795 }
1796 }
1797}
1798
1799/*
1800 * An object with a fat lock is being collected during a GC pass. We
1801 * want to remove it from any lock history trees that it is a part of.
1802 *
1803 * This may require updating the history trees in several monitors. The
1804 * monitor semantics guarantee that no other thread will be accessing
1805 * the history trees at the same time.
1806 */
1807static void removeCollectedObject(Object* obj)
1808{
1809 Monitor* mon;
1810
1811 LOGVV("+++ collecting %p\n", obj);
1812
1813#if 0
1814 /*
1815 * We're currently running through the entire set of known monitors.
1816 * This can be somewhat slow. We may want to keep lists of parents
1817 * in each child to speed up GC.
1818 */
1819 mon = gDvm.monitorList;
1820 while (mon != NULL) {
1821 Object* parent = mon->obj;
1822 if (parent != NULL) { /* value nulled for deleted entries */
1823 if (objectInChildList(parent, obj)) {
1824 LOGVV("removing child %p from parent %p\n", obj, parent);
1825 unlinkParentFromChild(parent, obj);
1826 mergeChildren(parent, obj);
1827 }
1828 }
1829 mon = mon->next;
1830 }
1831#endif
1832
1833 /*
1834 * For every parent of this object:
1835 * - merge all of our children into the parent's child list (creates
1836 * a two-way link between parent and child)
1837 * - remove ourselves from the parent's child list
1838 */
1839 ExpandingObjectList* pList;
1840 int i;
1841
1842 assert(IS_LOCK_FAT(&obj->lock));
1843 mon = obj->lock.mon;
1844 pList = &mon->historyParents;
1845 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1846 Object* parent = expandBufGetEntry(pList, i);
1847 Monitor* parentMon = parent->lock.mon;
1848
1849 if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
1850 LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
1851 }
1852 assert(!expandObjHas(&parentMon->historyChildren, obj));
1853
1854 mergeChildren(parent, obj);
1855 }
1856
1857 /*
1858 * For every child of this object:
1859 * - remove ourselves from the child's parent list
1860 */
1861 pList = &mon->historyChildren;
1862 for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
1863 Object* child = expandBufGetEntry(pList, i);
1864 Monitor* childMon = child->lock.mon;
1865
1866 if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
1867 LOGW("WARNING: parent %p not found in child %p\n", obj, child);
1868 }
1869 assert(!expandObjHas(&childMon->historyParents, obj));
1870 }
1871}
1872
1873#endif /*WITH_DEADLOCK_PREDICTION*/
1874