blob: 9a0233f46e75377551e6470d319a3fff3c6f2440 [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
29#include "thread.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070030#include "thread_list.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070031
32namespace art {
33
34/*
35 * Every Object has a monitor associated with it, but not every Object is
36 * actually locked. Even the ones that are locked do not need a
37 * full-fledged monitor until a) there is actual contention or b) wait()
38 * is called on the Object.
39 *
40 * For Android, we have implemented a scheme similar to the one described
41 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
42 * (ACM 1998). Things are even easier for us, though, because we have
43 * a full 32 bits to work with.
44 *
45 * The two states of an Object's lock are referred to as "thin" and
46 * "fat". A lock may transition from the "thin" state to the "fat"
47 * state and this transition is referred to as inflation. Once a lock
48 * has been inflated it remains in the "fat" state indefinitely.
49 *
50 * The lock value itself is stored in Object.lock. The LSB of the
51 * lock encodes its state. When cleared, the lock is in the "thin"
52 * state and its bits are formatted as follows:
53 *
54 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
55 * lock count thread id hash state 0
56 *
57 * When set, the lock is in the "fat" state and its bits are formatted
58 * as follows:
59 *
60 * [31 ---- 3] [2 ---- 1] [0]
61 * pointer hash state 1
62 *
63 * For an in-depth description of the mechanics of thin-vs-fat locking,
64 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070065 *
Elliott Hughes5f791332011-09-15 17:45:30 -070066 * Monitors provide:
67 * - mutually exclusive access to resources
68 * - a way for multiple threads to wait for notification
69 *
70 * In effect, they fill the role of both mutexes and condition variables.
71 *
72 * Only one thread can own the monitor at any time. There may be several
73 * threads waiting on it (the wait call unlocks it). One or more waiting
74 * threads may be getting interrupted or notified at any given time.
75 *
76 * TODO: the various members of monitor are not SMP-safe.
77 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070078
79
80/*
81 * Monitor accessor. Extracts a monitor structure pointer from a fat
82 * lock. Performs no error checking.
83 */
84#define LW_MONITOR(x) \
85 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
86
87/*
88 * Lock recursion count field. Contains a count of the number of times
89 * a lock has been recursively acquired.
90 */
91#define LW_LOCK_COUNT_MASK 0x1fff
92#define LW_LOCK_COUNT_SHIFT 19
93#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
94
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070095bool Monitor::is_verbose_ = false;
96
97void Monitor::SetVerbose(bool is_verbose) {
98 is_verbose_ = is_verbose;
99}
100
Elliott Hughes5f791332011-09-15 17:45:30 -0700101Monitor::Monitor(Object* obj)
102 : owner_(NULL),
103 lock_count_(0),
104 obj_(obj),
105 wait_set_(NULL),
106 lock_("a monitor lock"),
107 next_(NULL),
108 owner_filename_(NULL),
109 owner_line_number_(0) {
110}
111
112Monitor::~Monitor() {
113 DCHECK(obj_ != NULL);
114 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
115
116#ifndef NDEBUG
117 /* This lock is associated with an object
118 * that's being swept. The only possible way
119 * anyone could be holding this lock would be
120 * if some JNI code locked but didn't unlock
121 * the object, in which case we've got some bad
122 * native code somewhere.
123 */
124 DCHECK(lock_.TryLock());
125 lock_.Unlock();
126#endif
127}
128
129/*
130 * Links a thread into a monitor's wait set. The monitor lock must be
131 * held by the caller of this routine.
132 */
133void Monitor::AppendToWaitSet(Thread* thread) {
134 DCHECK(owner_ == Thread::Current());
135 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700136 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700137 if (wait_set_ == NULL) {
138 wait_set_ = thread;
139 return;
140 }
141
142 // push_back.
143 Thread* t = wait_set_;
144 while (t->wait_next_ != NULL) {
145 t = t->wait_next_;
146 }
147 t->wait_next_ = thread;
148}
149
150/*
151 * Unlinks a thread from a monitor's wait set. The monitor lock must
152 * be held by the caller of this routine.
153 */
154void Monitor::RemoveFromWaitSet(Thread *thread) {
155 DCHECK(owner_ == Thread::Current());
156 DCHECK(thread != NULL);
157 if (wait_set_ == NULL) {
158 return;
159 }
160 if (wait_set_ == thread) {
161 wait_set_ = thread->wait_next_;
162 thread->wait_next_ = NULL;
163 return;
164 }
165
166 Thread* t = wait_set_;
167 while (t->wait_next_ != NULL) {
168 if (t->wait_next_ == thread) {
169 t->wait_next_ = thread->wait_next_;
170 thread->wait_next_ = NULL;
171 return;
172 }
173 t = t->wait_next_;
174 }
175}
176
177// Global list of all monitors. Used for cleanup.
178static Monitor* gMonitorList = NULL;
179
180void Monitor::FreeMonitorList() {
181 Monitor* m = gMonitorList;
182 while (m != NULL) {
183 Monitor* next = m->next_;
184 delete m;
185 m = next;
186 }
187}
188
189/*
190 * Frees monitor objects belonging to unmarked objects.
191 */
192static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
193 UNIMPLEMENTED(FATAL);
194#if 0
195 Monitor handle;
196 Monitor *curr;
197
Elliott Hughes4681c802011-09-25 18:04:37 -0700198 DCHECK(mon != NULL);
199 DCHECK(isUnmarkedObject != NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700200 Monitor* prev = &handle;
201 prev->next = curr = *mon;
202 while (curr != NULL) {
203 Object* obj = curr->obj;
204 if ((*isUnmarkedObject)(obj) != 0) {
205 prev->next = curr->next;
206 delete curr;
207 curr = prev->next;
208 } else {
209 prev = curr;
210 curr = curr->next;
211 }
212 }
213 *mon = handle.next;
214#endif
215}
216
217void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
218 ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
219}
220
221/*
222static char *logWriteInt(char *dst, int value) {
223 *dst++ = EVENT_TYPE_INT;
224 set4LE((uint8_t *)dst, value);
225 return dst + 4;
226}
227
228static char *logWriteString(char *dst, const char *value, size_t len) {
229 *dst++ = EVENT_TYPE_STRING;
230 len = len < 32 ? len : 32;
231 set4LE((uint8_t *)dst, len);
232 dst += 4;
233 memcpy(dst, value, len);
234 return dst + len;
235}
236
237#define EVENT_LOG_TAG_dvm_lock_sample 20003
238
239static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
240 const char *ownerFileName, uint32_t ownerLineNumber)
241{
242 const StackSaveArea *saveArea;
243 const Method *meth;
244 uint32_t relativePc;
245 char eventBuffer[174];
246 const char *fileName;
247 char procName[33];
248 char *cp;
249 size_t len;
250 int fd;
251
252 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
253 meth = saveArea->method;
254 cp = eventBuffer;
255
256 // Emit the event list length, 1 byte.
257 *cp++ = 9;
258
259 // Emit the process name, <= 37 bytes.
260 fd = open("/proc/self/cmdline", O_RDONLY);
261 memset(procName, 0, sizeof(procName));
262 read(fd, procName, sizeof(procName) - 1);
263 close(fd);
264 len = strlen(procName);
265 cp = logWriteString(cp, procName, len);
266
267 // Emit the sensitive thread ("main thread") status, 5 bytes.
268 bool isSensitive = false;
269 if (gDvm.isSensitiveThreadHook != NULL) {
270 isSensitive = gDvm.isSensitiveThreadHook();
271 }
272 cp = logWriteInt(cp, isSensitive);
273
274 // Emit self thread name string, <= 37 bytes.
275 std::string selfName = dvmGetThreadName(self);
276 cp = logWriteString(cp, selfName.c_str(), selfName.size());
277
278 // Emit the wait time, 5 bytes.
279 cp = logWriteInt(cp, waitMs);
280
281 // Emit the source code file name, <= 37 bytes.
282 fileName = dvmGetMethodSourceFile(meth);
283 if (fileName == NULL) fileName = "";
284 cp = logWriteString(cp, fileName, strlen(fileName));
285
286 // Emit the source code line number, 5 bytes.
287 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
288 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
289
290 // Emit the lock owner source code file name, <= 37 bytes.
291 if (ownerFileName == NULL) {
292 ownerFileName = "";
293 } else if (strcmp(fileName, ownerFileName) == 0) {
294 // Common case, so save on log space.
295 ownerFileName = "-";
296 }
297 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
298
299 // Emit the source code line number, 5 bytes.
300 cp = logWriteInt(cp, ownerLineNumber);
301
302 // Emit the sample percentage, 5 bytes.
303 cp = logWriteInt(cp, samplePercent);
304
305 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
306 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
307 EVENT_TYPE_LIST,
308 eventBuffer,
309 (size_t)(cp - eventBuffer));
310}
311*/
312
313void Monitor::Lock(Thread* self) {
314// uint32_t waitThreshold, samplePercent;
315// uint64_t waitStart, waitEnd, waitMs;
316
317 if (owner_ == self) {
318 lock_count_++;
319 return;
320 }
321 if (!lock_.TryLock()) {
322 {
323 ScopedThreadStateChange tsc(self, Thread::kBlocked);
324// waitThreshold = gDvm.lockProfThreshold;
325// if (waitThreshold) {
326// waitStart = dvmGetRelativeTimeUsec();
327// }
328// const char* currentOwnerFileName = mon->ownerFileName;
329// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
330
331 lock_.Lock();
332// if (waitThreshold) {
333// waitEnd = dvmGetRelativeTimeUsec();
334// }
335 }
336// if (waitThreshold) {
337// waitMs = (waitEnd - waitStart) / 1000;
338// if (waitMs >= waitThreshold) {
339// samplePercent = 100;
340// } else {
341// samplePercent = 100 * waitMs / waitThreshold;
342// }
343// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
344// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
345// }
346// }
347 }
348 owner_ = self;
349 DCHECK_EQ(lock_count_, 0);
350
351 // When debugging, save the current monitor holder for future
352 // acquisition failures to use in sampled logging.
353// if (gDvm.lockProfThreshold > 0) {
354// const StackSaveArea *saveArea;
355// const Method *meth;
356// mon->ownerLineNumber = 0;
357// if (self->interpSave.curFrame == NULL) {
358// mon->ownerFileName = "no_frame";
359// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
360// mon->ownerFileName = "no_save_area";
361// } else if ((meth = saveArea->method) == NULL) {
362// mon->ownerFileName = "no_method";
363// } else {
364// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
365// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
366// if (mon->ownerFileName == NULL) {
367// mon->ownerFileName = "no_method_file";
368// } else {
369// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
370// }
371// }
372// }
373}
374
375void ThrowIllegalMonitorStateException(const char* msg) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700376 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", msg);
Elliott Hughes5f791332011-09-15 17:45:30 -0700377}
378
379bool Monitor::Unlock(Thread* self) {
380 DCHECK(self != NULL);
381 if (owner_ == self) {
382 // We own the monitor, so nobody else can be in here.
383 if (lock_count_ == 0) {
384 owner_ = NULL;
385 owner_filename_ = "unlocked";
386 owner_line_number_ = 0;
387 lock_.Unlock();
388 } else {
389 --lock_count_;
390 }
391 } else {
392 // We don't own this, so we're not allowed to unlock it.
393 // The JNI spec says that we should throw IllegalMonitorStateException
394 // in this case.
395 ThrowIllegalMonitorStateException("unlock of unowned monitor");
396 return false;
397 }
398 return true;
399}
400
401/*
402 * Converts the given relative waiting time into an absolute time.
403 */
404void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
405 int64_t endSec;
406
407#ifdef HAVE_TIMEDWAIT_MONOTONIC
408 clock_gettime(CLOCK_MONOTONIC, ts);
409#else
410 {
411 struct timeval tv;
412 gettimeofday(&tv, NULL);
413 ts->tv_sec = tv.tv_sec;
414 ts->tv_nsec = tv.tv_usec * 1000;
415 }
416#endif
417 endSec = ts->tv_sec + ms / 1000;
418 if (endSec >= 0x7fffffff) {
419 LOG(INFO) << "Note: end time exceeds epoch";
420 endSec = 0x7ffffffe;
421 }
422 ts->tv_sec = endSec;
423 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
424
425 // Catch rollover.
426 if (ts->tv_nsec >= 1000000000L) {
427 ts->tv_sec++;
428 ts->tv_nsec -= 1000000000L;
429 }
430}
431
432int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
433 struct timespec ts;
434 ToAbsoluteTime(ms, ns, &ts);
435#if defined(HAVE_TIMEDWAIT_MONOTONIC)
436 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
437#else
438 int rc = pthread_cond_timedwait(cond, mutex, &ts);
439#endif
440 DCHECK(rc == 0 || rc == ETIMEDOUT);
441 return rc;
442}
443
444/*
445 * Wait on a monitor until timeout, interrupt, or notification. Used for
446 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
447 *
448 * If another thread calls Thread.interrupt(), we throw InterruptedException
449 * and return immediately if one of the following are true:
450 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
451 * - blocked in join(), join(long), or join(long, int) methods of Thread
452 * - blocked in sleep(long), or sleep(long, int) methods of Thread
453 * Otherwise, we set the "interrupted" flag.
454 *
455 * Checks to make sure that "ns" is in the range 0-999999
456 * (i.e. fractions of a millisecond) and throws the appropriate
457 * exception if it isn't.
458 *
459 * The spec allows "spurious wakeups", and recommends that all code using
460 * Object.wait() do so in a loop. This appears to derive from concerns
461 * about pthread_cond_wait() on multiprocessor systems. Some commentary
462 * on the web casts doubt on whether these can/should occur.
463 *
464 * Since we're allowed to wake up "early", we clamp extremely long durations
465 * to return at the end of the 32-bit time epoch.
466 */
467void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
468 DCHECK(self != NULL);
469
470 // Make sure that we hold the lock.
471 if (owner_ != self) {
472 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
473 return;
474 }
475
476 // Enforce the timeout range.
477 if (ms < 0 || ns < 0 || ns > 999999) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700478 Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
Elliott Hughes5f791332011-09-15 17:45:30 -0700479 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
480 return;
481 }
482
483 // Compute absolute wakeup time, if necessary.
484 struct timespec ts;
485 bool timed = false;
486 if (ms != 0 || ns != 0) {
487 ToAbsoluteTime(ms, ns, &ts);
488 timed = true;
489 }
490
491 /*
492 * Add ourselves to the set of threads waiting on this monitor, and
493 * release our hold. We need to let it go even if we're a few levels
494 * deep in a recursive lock, and we need to restore that later.
495 *
496 * We append to the wait set ahead of clearing the count and owner
497 * fields so the subroutine can check that the calling thread owns
498 * the monitor. Aside from that, the order of member updates is
499 * not order sensitive as we hold the pthread mutex.
500 */
501 AppendToWaitSet(self);
502 int prevLockCount = lock_count_;
503 lock_count_ = 0;
504 owner_ = NULL;
505 const char* savedFileName = owner_filename_;
506 owner_filename_ = NULL;
507 uint32_t savedLineNumber = owner_line_number_;
508 owner_line_number_ = 0;
509
510 /*
511 * Update thread status. If the GC wakes up, it'll ignore us, knowing
512 * that we won't touch any references in this state, and we'll check
513 * our suspend mode before we transition out.
514 */
515 if (timed) {
516 self->SetState(Thread::kTimedWaiting);
517 } else {
518 self->SetState(Thread::kWaiting);
519 }
520
Elliott Hughes85d15452011-09-16 17:33:01 -0700521 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700522
523 /*
524 * Set wait_monitor_ to the monitor object we will be waiting on.
525 * When wait_monitor_ is non-NULL a notifying or interrupting thread
526 * must signal the thread's wait_cond_ to wake it up.
527 */
528 DCHECK(self->wait_monitor_ == NULL);
529 self->wait_monitor_ = this;
530
531 /*
532 * Handle the case where the thread was interrupted before we called
533 * wait().
534 */
535 bool wasInterrupted = false;
536 if (self->interrupted_) {
537 wasInterrupted = true;
538 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700539 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700540 goto done;
541 }
542
543 /*
544 * Release the monitor lock and wait for a notification or
545 * a timeout to occur.
546 */
547 lock_.Unlock();
548
549 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700550 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700551 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700552 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700553 }
554 if (self->interrupted_) {
555 wasInterrupted = true;
556 }
557
558 self->interrupted_ = false;
559 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700560 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700561
562 // Reacquire the monitor lock.
563 Lock(self);
564
565done:
566 /*
567 * We remove our thread from wait set after restoring the count
568 * and owner fields so the subroutine can check that the calling
569 * thread owns the monitor. Aside from that, the order of member
570 * updates is not order sensitive as we hold the pthread mutex.
571 */
572 owner_ = self;
573 lock_count_ = prevLockCount;
574 owner_filename_ = savedFileName;
575 owner_line_number_ = savedLineNumber;
576 RemoveFromWaitSet(self);
577
578 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
579 self->SetState(Thread::kRunnable);
580
581 if (wasInterrupted) {
582 /*
583 * We were interrupted while waiting, or somebody interrupted an
584 * un-interruptible thread earlier and we're bailing out immediately.
585 *
586 * The doc sayeth: "The interrupted status of the current thread is
587 * cleared when this exception is thrown."
588 */
589 self->interrupted_ = false;
590 if (interruptShouldThrow) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700591 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700592 }
593 }
594}
595
596void Monitor::Notify(Thread* self) {
597 DCHECK(self != NULL);
598
599 // Make sure that we hold the lock.
600 if (owner_ != self) {
601 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
602 return;
603 }
604 // Signal the first waiting thread in the wait set.
605 while (wait_set_ != NULL) {
606 Thread* thread = wait_set_;
607 wait_set_ = thread->wait_next_;
608 thread->wait_next_ = NULL;
609
610 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700611 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700612 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700613 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700614 return;
615 }
616 }
617}
618
619void Monitor::NotifyAll(Thread* self) {
620 DCHECK(self != NULL);
621
622 // Make sure that we hold the lock.
623 if (owner_ != self) {
624 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
625 return;
626 }
627 // Signal all threads in the wait set.
628 while (wait_set_ != NULL) {
629 Thread* thread = wait_set_;
630 wait_set_ = thread->wait_next_;
631 thread->wait_next_ = NULL;
632 thread->Notify();
633 }
634}
635
636/*
637 * Changes the shape of a monitor from thin to fat, preserving the
638 * internal lock state. The calling thread must own the lock.
639 */
640void Monitor::Inflate(Thread* self, Object* obj) {
641 DCHECK(self != NULL);
642 DCHECK(obj != NULL);
643 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
644 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
645
646 // Allocate and acquire a new monitor.
647 Monitor* m = new Monitor(obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700648 if (is_verbose_) {
649 LOG(INFO) << "monitor: created monitor " << m << " for object " << obj;
650 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700651 // Replace the head of the list with the new monitor.
652 do {
653 m->next_ = gMonitorList;
654 } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
655 m->Lock(self);
656 // Propagate the lock state.
657 uint32_t thin = *obj->GetRawLockWordAddress();
658 m->lock_count_ = LW_LOCK_COUNT(thin);
659 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
660 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
661 // Publish the updated lock word.
662 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
663}
664
665void Monitor::MonitorEnter(Thread* self, Object* obj) {
666 volatile int32_t* thinp = obj->GetRawLockWordAddress();
667 struct timespec tm;
668 long sleepDelayNs;
669 long minSleepDelayNs = 1000000; /* 1 millisecond */
670 long maxSleepDelayNs = 1000000000; /* 1 second */
671 uint32_t thin, newThin, threadId;
672
Elliott Hughes4681c802011-09-25 18:04:37 -0700673 DCHECK(self != NULL);
674 DCHECK(obj != NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700675 threadId = self->thin_lock_id_;
676retry:
677 thin = *thinp;
678 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
679 /*
680 * The lock is a thin lock. The owner field is used to
681 * determine the acquire method, ordered by cost.
682 */
683 if (LW_LOCK_OWNER(thin) == threadId) {
684 /*
685 * The calling thread owns the lock. Increment the
686 * value of the recursion count field.
687 */
688 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
689 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
690 /*
691 * The reacquisition limit has been reached. Inflate
692 * the lock so the next acquire will not overflow the
693 * recursion count field.
694 */
695 Inflate(self, obj);
696 }
697 } else if (LW_LOCK_OWNER(thin) == 0) {
698 /*
699 * The lock is unowned. Install the thread id of the
700 * calling thread into the owner field. This is the
701 * common case. In performance critical code the JIT
702 * will have tried this before calling out to the VM.
703 */
704 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
705 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
706 // The acquire failed. Try again.
707 goto retry;
708 }
709 } else {
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700710 if (is_verbose_) {
711 LOG(INFO) << StringPrintf("monitor: (%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
712 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700713 // The lock is owned by another thread. Notify the VM that we are about to wait.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700714 self->monitor_enter_object_ = obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700715 Thread::State oldStatus = self->SetState(Thread::kBlocked);
716 // Spin until the thin lock is released or inflated.
717 sleepDelayNs = 0;
718 for (;;) {
719 thin = *thinp;
720 // Check the shape of the lock word. Another thread
721 // may have inflated the lock while we were waiting.
722 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
723 if (LW_LOCK_OWNER(thin) == 0) {
724 // The lock has been released. Install the thread id of the
725 // calling thread into the owner field.
726 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
727 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
728 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
729 break;
730 }
731 } else {
732 // The lock has not been released. Yield so the owning thread can run.
733 if (sleepDelayNs == 0) {
734 sched_yield();
735 sleepDelayNs = minSleepDelayNs;
736 } else {
737 tm.tv_sec = 0;
738 tm.tv_nsec = sleepDelayNs;
739 nanosleep(&tm, NULL);
740 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
741 if (sleepDelayNs < maxSleepDelayNs / 2) {
742 sleepDelayNs *= 2;
743 } else {
744 sleepDelayNs = minSleepDelayNs;
745 }
746 }
747 }
748 } else {
749 // The thin lock was inflated by another thread. Let the VM know we are no longer
750 // waiting and try again.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700751 if (is_verbose_) {
752 LOG(INFO) << "monitor: (" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
753 }
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700754 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700755 self->SetState(oldStatus);
756 goto retry;
757 }
758 }
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700759 if (is_verbose_) {
760 LOG(INFO) << StringPrintf("monitor: (%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
761 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700762 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700763 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700764 self->SetState(oldStatus);
765 // Fatten the lock.
766 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700767 if (is_verbose_) {
768 LOG(INFO) << StringPrintf("monitor: (%d) lock %p fattened", threadId, thinp);
769 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700770 }
771 } else {
772 // The lock is a fat lock.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700773 if (is_verbose_) {
774 LOG(INFO) << StringPrintf("monitor: (%d) locking fat lock %p (%p) %p on a %s", threadId, thinp, LW_MONITOR(*thinp), (void*)*thinp, PrettyTypeOf(obj).c_str());
775 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700776 DCHECK(LW_MONITOR(*thinp) != NULL);
777 LW_MONITOR(*thinp)->Lock(self);
778 }
779}
780
781bool Monitor::MonitorExit(Thread* self, Object* obj) {
782 volatile int32_t* thinp = obj->GetRawLockWordAddress();
783
784 DCHECK(self != NULL);
Elliott Hughes4681c802011-09-25 18:04:37 -0700785 //DCHECK_EQ(self->GetState(), Thread::kRunnable);
Elliott Hughes5f791332011-09-15 17:45:30 -0700786 DCHECK(obj != NULL);
787
788 /*
789 * Cache the lock word as its value can change while we are
790 * examining its state.
791 */
792 uint32_t thin = *thinp;
793 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
794 /*
795 * The lock is thin. We must ensure that the lock is owned
796 * by the given thread before unlocking it.
797 */
798 if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
799 /*
800 * We are the lock owner. It is safe to update the lock
801 * without CAS as lock ownership guards the lock itself.
802 */
803 if (LW_LOCK_COUNT(thin) == 0) {
804 /*
805 * The lock was not recursively acquired, the common
806 * case. Unlock by clearing all bits except for the
807 * hash state.
808 */
809 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
810 android_atomic_release_store(thin, thinp);
811 } else {
812 /*
813 * The object was recursively acquired. Decrement the
814 * lock recursion count field.
815 */
816 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
817 }
818 } else {
819 /*
820 * We do not own the lock. The JVM spec requires that we
821 * throw an exception in this case.
822 */
823 ThrowIllegalMonitorStateException("unlock of unowned monitor");
824 return false;
825 }
826 } else {
827 /*
828 * The lock is fat. We must check to see if Unlock has
829 * raised any exceptions before continuing.
830 */
831 DCHECK(LW_MONITOR(*thinp) != NULL);
832 if (!LW_MONITOR(*thinp)->Unlock(self)) {
833 // An exception has been raised. Do not fall through.
834 return false;
835 }
836 }
837 return true;
838}
839
840/*
841 * Object.wait(). Also called for class init.
842 */
843void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
844 volatile int32_t* thinp = obj->GetRawLockWordAddress();
845
846 // If the lock is still thin, we need to fatten it.
847 uint32_t thin = *thinp;
848 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
849 // Make sure that 'self' holds the lock.
850 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
851 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
852 return;
853 }
854
855 /* This thread holds the lock. We need to fatten the lock
856 * so 'self' can block on it. Don't update the object lock
857 * field yet, because 'self' needs to acquire the lock before
858 * any other thread gets a chance.
859 */
860 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700861 if (is_verbose_) {
862 LOG(INFO) << StringPrintf("monitor: (%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
863 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700864 }
865 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
866}
867
868void Monitor::Notify(Thread* self, Object *obj) {
869 uint32_t thin = *obj->GetRawLockWordAddress();
870
871 // If the lock is still thin, there aren't any waiters;
872 // waiting on an object forces lock fattening.
873 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
874 // Make sure that 'self' holds the lock.
875 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
876 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
877 return;
878 }
879 // no-op; there are no waiters to notify.
880 } else {
881 // It's a fat lock.
882 LW_MONITOR(thin)->Notify(self);
883 }
884}
885
886void Monitor::NotifyAll(Thread* self, Object *obj) {
887 uint32_t thin = *obj->GetRawLockWordAddress();
888
889 // If the lock is still thin, there aren't any waiters;
890 // waiting on an object forces lock fattening.
891 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
892 // Make sure that 'self' holds the lock.
893 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
894 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
895 return;
896 }
897 // no-op; there are no waiters to notify.
898 } else {
899 // It's a fat lock.
900 LW_MONITOR(thin)->NotifyAll(self);
901 }
902}
903
904uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
905 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
906 return LW_LOCK_OWNER(raw_lock_word);
907 } else {
908 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
909 return owner ? owner->GetThinLockId() : 0;
910 }
911}
912
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700913void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
914 Thread::State state = thread->GetState();
915
916 Object* object = NULL;
917 uint32_t lock_owner = ThreadList::kInvalidId;
918 if (state == Thread::kWaiting || state == Thread::kTimedWaiting) {
919 os << " - waiting on ";
920 Monitor* monitor = thread->wait_monitor_;
921 if (monitor != NULL) {
922 object = monitor->obj_;
923 }
924 lock_owner = Thread::LockOwnerFromThreadLock(object);
925 } else if (state == Thread::kBlocked) {
926 os << " - waiting to lock ";
927 object = thread->monitor_enter_object_;
928 if (object != NULL) {
929 lock_owner = object->GetLockOwner();
930 }
931 } else {
932 // We're not waiting on anything.
933 return;
934 }
935 os << "<" << object << ">";
936
937 // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
938 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
939 os << " (a " << PrettyTypeOf(object) << ")";
940
941 if (lock_owner != ThreadList::kInvalidId) {
942 os << " held by thread " << lock_owner;
943 }
944
945 os << "\n";
946}
947
Elliott Hughes5f791332011-09-15 17:45:30 -0700948} // namespace art