blob: d4285648947c0b7915e27869922dd7a755b05a3b [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070029#include "stl_util.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070030#include "thread.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070031#include "thread_list.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070032
33namespace art {
34
35/*
36 * Every Object has a monitor associated with it, but not every Object is
37 * actually locked. Even the ones that are locked do not need a
38 * full-fledged monitor until a) there is actual contention or b) wait()
39 * is called on the Object.
40 *
41 * For Android, we have implemented a scheme similar to the one described
42 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
43 * (ACM 1998). Things are even easier for us, though, because we have
44 * a full 32 bits to work with.
45 *
46 * The two states of an Object's lock are referred to as "thin" and
47 * "fat". A lock may transition from the "thin" state to the "fat"
48 * state and this transition is referred to as inflation. Once a lock
49 * has been inflated it remains in the "fat" state indefinitely.
50 *
51 * The lock value itself is stored in Object.lock. The LSB of the
52 * lock encodes its state. When cleared, the lock is in the "thin"
53 * state and its bits are formatted as follows:
54 *
55 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
56 * lock count thread id hash state 0
57 *
58 * When set, the lock is in the "fat" state and its bits are formatted
59 * as follows:
60 *
61 * [31 ---- 3] [2 ---- 1] [0]
62 * pointer hash state 1
63 *
64 * For an in-depth description of the mechanics of thin-vs-fat locking,
65 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070066 *
Elliott Hughes5f791332011-09-15 17:45:30 -070067 * Monitors provide:
68 * - mutually exclusive access to resources
69 * - a way for multiple threads to wait for notification
70 *
71 * In effect, they fill the role of both mutexes and condition variables.
72 *
73 * Only one thread can own the monitor at any time. There may be several
74 * threads waiting on it (the wait call unlocks it). One or more waiting
75 * threads may be getting interrupted or notified at any given time.
76 *
77 * TODO: the various members of monitor are not SMP-safe.
78 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070079
80
81/*
82 * Monitor accessor. Extracts a monitor structure pointer from a fat
83 * lock. Performs no error checking.
84 */
85#define LW_MONITOR(x) \
86 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
87
88/*
89 * Lock recursion count field. Contains a count of the number of times
90 * a lock has been recursively acquired.
91 */
92#define LW_LOCK_COUNT_MASK 0x1fff
93#define LW_LOCK_COUNT_SHIFT 19
94#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
95
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070096bool Monitor::is_verbose_ = false;
97
Elliott Hughesc33a32b2011-10-11 18:18:07 -070098bool Monitor::IsVerbose() {
99 return is_verbose_;
100}
101
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700102void Monitor::SetVerbose(bool is_verbose) {
103 is_verbose_ = is_verbose;
104}
105
Elliott Hughes5f791332011-09-15 17:45:30 -0700106Monitor::Monitor(Object* obj)
107 : owner_(NULL),
108 lock_count_(0),
109 obj_(obj),
110 wait_set_(NULL),
111 lock_("a monitor lock"),
Elliott Hughes5f791332011-09-15 17:45:30 -0700112 owner_filename_(NULL),
113 owner_line_number_(0) {
114}
115
116Monitor::~Monitor() {
117 DCHECK(obj_ != NULL);
118 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
119
120#ifndef NDEBUG
121 /* This lock is associated with an object
122 * that's being swept. The only possible way
123 * anyone could be holding this lock would be
124 * if some JNI code locked but didn't unlock
125 * the object, in which case we've got some bad
126 * native code somewhere.
127 */
128 DCHECK(lock_.TryLock());
129 lock_.Unlock();
130#endif
131}
132
133/*
134 * Links a thread into a monitor's wait set. The monitor lock must be
135 * held by the caller of this routine.
136 */
137void Monitor::AppendToWaitSet(Thread* thread) {
138 DCHECK(owner_ == Thread::Current());
139 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700140 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700141 if (wait_set_ == NULL) {
142 wait_set_ = thread;
143 return;
144 }
145
146 // push_back.
147 Thread* t = wait_set_;
148 while (t->wait_next_ != NULL) {
149 t = t->wait_next_;
150 }
151 t->wait_next_ = thread;
152}
153
154/*
155 * Unlinks a thread from a monitor's wait set. The monitor lock must
156 * be held by the caller of this routine.
157 */
158void Monitor::RemoveFromWaitSet(Thread *thread) {
159 DCHECK(owner_ == Thread::Current());
160 DCHECK(thread != NULL);
161 if (wait_set_ == NULL) {
162 return;
163 }
164 if (wait_set_ == thread) {
165 wait_set_ = thread->wait_next_;
166 thread->wait_next_ = NULL;
167 return;
168 }
169
170 Thread* t = wait_set_;
171 while (t->wait_next_ != NULL) {
172 if (t->wait_next_ == thread) {
173 t->wait_next_ = thread->wait_next_;
174 thread->wait_next_ = NULL;
175 return;
176 }
177 t = t->wait_next_;
178 }
179}
180
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700181Object* Monitor::GetObject() {
182 return obj_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700183}
184
185/*
186static char *logWriteInt(char *dst, int value) {
187 *dst++ = EVENT_TYPE_INT;
188 set4LE((uint8_t *)dst, value);
189 return dst + 4;
190}
191
192static char *logWriteString(char *dst, const char *value, size_t len) {
193 *dst++ = EVENT_TYPE_STRING;
194 len = len < 32 ? len : 32;
195 set4LE((uint8_t *)dst, len);
196 dst += 4;
197 memcpy(dst, value, len);
198 return dst + len;
199}
200
201#define EVENT_LOG_TAG_dvm_lock_sample 20003
202
203static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
204 const char *ownerFileName, uint32_t ownerLineNumber)
205{
206 const StackSaveArea *saveArea;
207 const Method *meth;
208 uint32_t relativePc;
209 char eventBuffer[174];
210 const char *fileName;
211 char procName[33];
212 char *cp;
213 size_t len;
214 int fd;
215
216 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
217 meth = saveArea->method;
218 cp = eventBuffer;
219
220 // Emit the event list length, 1 byte.
221 *cp++ = 9;
222
223 // Emit the process name, <= 37 bytes.
224 fd = open("/proc/self/cmdline", O_RDONLY);
225 memset(procName, 0, sizeof(procName));
226 read(fd, procName, sizeof(procName) - 1);
227 close(fd);
228 len = strlen(procName);
229 cp = logWriteString(cp, procName, len);
230
231 // Emit the sensitive thread ("main thread") status, 5 bytes.
232 bool isSensitive = false;
233 if (gDvm.isSensitiveThreadHook != NULL) {
234 isSensitive = gDvm.isSensitiveThreadHook();
235 }
236 cp = logWriteInt(cp, isSensitive);
237
238 // Emit self thread name string, <= 37 bytes.
239 std::string selfName = dvmGetThreadName(self);
240 cp = logWriteString(cp, selfName.c_str(), selfName.size());
241
242 // Emit the wait time, 5 bytes.
243 cp = logWriteInt(cp, waitMs);
244
245 // Emit the source code file name, <= 37 bytes.
246 fileName = dvmGetMethodSourceFile(meth);
247 if (fileName == NULL) fileName = "";
248 cp = logWriteString(cp, fileName, strlen(fileName));
249
250 // Emit the source code line number, 5 bytes.
251 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
252 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
253
254 // Emit the lock owner source code file name, <= 37 bytes.
255 if (ownerFileName == NULL) {
256 ownerFileName = "";
257 } else if (strcmp(fileName, ownerFileName) == 0) {
258 // Common case, so save on log space.
259 ownerFileName = "-";
260 }
261 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
262
263 // Emit the source code line number, 5 bytes.
264 cp = logWriteInt(cp, ownerLineNumber);
265
266 // Emit the sample percentage, 5 bytes.
267 cp = logWriteInt(cp, samplePercent);
268
269 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
270 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
271 EVENT_TYPE_LIST,
272 eventBuffer,
273 (size_t)(cp - eventBuffer));
274}
275*/
276
277void Monitor::Lock(Thread* self) {
278// uint32_t waitThreshold, samplePercent;
279// uint64_t waitStart, waitEnd, waitMs;
280
281 if (owner_ == self) {
282 lock_count_++;
283 return;
284 }
285 if (!lock_.TryLock()) {
286 {
287 ScopedThreadStateChange tsc(self, Thread::kBlocked);
288// waitThreshold = gDvm.lockProfThreshold;
289// if (waitThreshold) {
290// waitStart = dvmGetRelativeTimeUsec();
291// }
292// const char* currentOwnerFileName = mon->ownerFileName;
293// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
294
295 lock_.Lock();
296// if (waitThreshold) {
297// waitEnd = dvmGetRelativeTimeUsec();
298// }
299 }
300// if (waitThreshold) {
301// waitMs = (waitEnd - waitStart) / 1000;
302// if (waitMs >= waitThreshold) {
303// samplePercent = 100;
304// } else {
305// samplePercent = 100 * waitMs / waitThreshold;
306// }
307// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
308// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
309// }
310// }
311 }
312 owner_ = self;
313 DCHECK_EQ(lock_count_, 0);
314
315 // When debugging, save the current monitor holder for future
316 // acquisition failures to use in sampled logging.
317// if (gDvm.lockProfThreshold > 0) {
318// const StackSaveArea *saveArea;
319// const Method *meth;
320// mon->ownerLineNumber = 0;
321// if (self->interpSave.curFrame == NULL) {
322// mon->ownerFileName = "no_frame";
323// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
324// mon->ownerFileName = "no_save_area";
325// } else if ((meth = saveArea->method) == NULL) {
326// mon->ownerFileName = "no_method";
327// } else {
328// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
329// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
330// if (mon->ownerFileName == NULL) {
331// mon->ownerFileName = "no_method_file";
332// } else {
333// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
334// }
335// }
336// }
337}
338
339void ThrowIllegalMonitorStateException(const char* msg) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700340 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", msg);
Elliott Hughes5f791332011-09-15 17:45:30 -0700341}
342
343bool Monitor::Unlock(Thread* self) {
344 DCHECK(self != NULL);
345 if (owner_ == self) {
346 // We own the monitor, so nobody else can be in here.
347 if (lock_count_ == 0) {
348 owner_ = NULL;
349 owner_filename_ = "unlocked";
350 owner_line_number_ = 0;
351 lock_.Unlock();
352 } else {
353 --lock_count_;
354 }
355 } else {
356 // We don't own this, so we're not allowed to unlock it.
357 // The JNI spec says that we should throw IllegalMonitorStateException
358 // in this case.
359 ThrowIllegalMonitorStateException("unlock of unowned monitor");
360 return false;
361 }
362 return true;
363}
364
365/*
366 * Converts the given relative waiting time into an absolute time.
367 */
368void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
369 int64_t endSec;
370
371#ifdef HAVE_TIMEDWAIT_MONOTONIC
372 clock_gettime(CLOCK_MONOTONIC, ts);
373#else
374 {
375 struct timeval tv;
376 gettimeofday(&tv, NULL);
377 ts->tv_sec = tv.tv_sec;
378 ts->tv_nsec = tv.tv_usec * 1000;
379 }
380#endif
381 endSec = ts->tv_sec + ms / 1000;
382 if (endSec >= 0x7fffffff) {
383 LOG(INFO) << "Note: end time exceeds epoch";
384 endSec = 0x7ffffffe;
385 }
386 ts->tv_sec = endSec;
387 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
388
389 // Catch rollover.
390 if (ts->tv_nsec >= 1000000000L) {
391 ts->tv_sec++;
392 ts->tv_nsec -= 1000000000L;
393 }
394}
395
396int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
397 struct timespec ts;
398 ToAbsoluteTime(ms, ns, &ts);
399#if defined(HAVE_TIMEDWAIT_MONOTONIC)
400 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
401#else
402 int rc = pthread_cond_timedwait(cond, mutex, &ts);
403#endif
404 DCHECK(rc == 0 || rc == ETIMEDOUT);
405 return rc;
406}
407
408/*
409 * Wait on a monitor until timeout, interrupt, or notification. Used for
410 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
411 *
412 * If another thread calls Thread.interrupt(), we throw InterruptedException
413 * and return immediately if one of the following are true:
414 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
415 * - blocked in join(), join(long), or join(long, int) methods of Thread
416 * - blocked in sleep(long), or sleep(long, int) methods of Thread
417 * Otherwise, we set the "interrupted" flag.
418 *
419 * Checks to make sure that "ns" is in the range 0-999999
420 * (i.e. fractions of a millisecond) and throws the appropriate
421 * exception if it isn't.
422 *
423 * The spec allows "spurious wakeups", and recommends that all code using
424 * Object.wait() do so in a loop. This appears to derive from concerns
425 * about pthread_cond_wait() on multiprocessor systems. Some commentary
426 * on the web casts doubt on whether these can/should occur.
427 *
428 * Since we're allowed to wake up "early", we clamp extremely long durations
429 * to return at the end of the 32-bit time epoch.
430 */
431void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
432 DCHECK(self != NULL);
433
434 // Make sure that we hold the lock.
435 if (owner_ != self) {
436 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
437 return;
438 }
439
440 // Enforce the timeout range.
441 if (ms < 0 || ns < 0 || ns > 999999) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700442 Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
Elliott Hughes5f791332011-09-15 17:45:30 -0700443 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
444 return;
445 }
446
447 // Compute absolute wakeup time, if necessary.
448 struct timespec ts;
449 bool timed = false;
450 if (ms != 0 || ns != 0) {
451 ToAbsoluteTime(ms, ns, &ts);
452 timed = true;
453 }
454
455 /*
456 * Add ourselves to the set of threads waiting on this monitor, and
457 * release our hold. We need to let it go even if we're a few levels
458 * deep in a recursive lock, and we need to restore that later.
459 *
460 * We append to the wait set ahead of clearing the count and owner
461 * fields so the subroutine can check that the calling thread owns
462 * the monitor. Aside from that, the order of member updates is
463 * not order sensitive as we hold the pthread mutex.
464 */
465 AppendToWaitSet(self);
466 int prevLockCount = lock_count_;
467 lock_count_ = 0;
468 owner_ = NULL;
469 const char* savedFileName = owner_filename_;
470 owner_filename_ = NULL;
471 uint32_t savedLineNumber = owner_line_number_;
472 owner_line_number_ = 0;
473
474 /*
475 * Update thread status. If the GC wakes up, it'll ignore us, knowing
476 * that we won't touch any references in this state, and we'll check
477 * our suspend mode before we transition out.
478 */
479 if (timed) {
480 self->SetState(Thread::kTimedWaiting);
481 } else {
482 self->SetState(Thread::kWaiting);
483 }
484
Elliott Hughes85d15452011-09-16 17:33:01 -0700485 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700486
487 /*
488 * Set wait_monitor_ to the monitor object we will be waiting on.
489 * When wait_monitor_ is non-NULL a notifying or interrupting thread
490 * must signal the thread's wait_cond_ to wake it up.
491 */
492 DCHECK(self->wait_monitor_ == NULL);
493 self->wait_monitor_ = this;
494
495 /*
496 * Handle the case where the thread was interrupted before we called
497 * wait().
498 */
499 bool wasInterrupted = false;
500 if (self->interrupted_) {
501 wasInterrupted = true;
502 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700503 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700504 goto done;
505 }
506
507 /*
508 * Release the monitor lock and wait for a notification or
509 * a timeout to occur.
510 */
511 lock_.Unlock();
512
513 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700514 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700515 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700516 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700517 }
518 if (self->interrupted_) {
519 wasInterrupted = true;
520 }
521
522 self->interrupted_ = false;
523 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700524 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700525
526 // Reacquire the monitor lock.
527 Lock(self);
528
529done:
530 /*
531 * We remove our thread from wait set after restoring the count
532 * and owner fields so the subroutine can check that the calling
533 * thread owns the monitor. Aside from that, the order of member
534 * updates is not order sensitive as we hold the pthread mutex.
535 */
536 owner_ = self;
537 lock_count_ = prevLockCount;
538 owner_filename_ = savedFileName;
539 owner_line_number_ = savedLineNumber;
540 RemoveFromWaitSet(self);
541
542 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
543 self->SetState(Thread::kRunnable);
544
545 if (wasInterrupted) {
546 /*
547 * We were interrupted while waiting, or somebody interrupted an
548 * un-interruptible thread earlier and we're bailing out immediately.
549 *
550 * The doc sayeth: "The interrupted status of the current thread is
551 * cleared when this exception is thrown."
552 */
553 self->interrupted_ = false;
554 if (interruptShouldThrow) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700555 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700556 }
557 }
558}
559
560void Monitor::Notify(Thread* self) {
561 DCHECK(self != NULL);
562
563 // Make sure that we hold the lock.
564 if (owner_ != self) {
565 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
566 return;
567 }
568 // Signal the first waiting thread in the wait set.
569 while (wait_set_ != NULL) {
570 Thread* thread = wait_set_;
571 wait_set_ = thread->wait_next_;
572 thread->wait_next_ = NULL;
573
574 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700575 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700576 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700577 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700578 return;
579 }
580 }
581}
582
583void Monitor::NotifyAll(Thread* self) {
584 DCHECK(self != NULL);
585
586 // Make sure that we hold the lock.
587 if (owner_ != self) {
588 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
589 return;
590 }
591 // Signal all threads in the wait set.
592 while (wait_set_ != NULL) {
593 Thread* thread = wait_set_;
594 wait_set_ = thread->wait_next_;
595 thread->wait_next_ = NULL;
596 thread->Notify();
597 }
598}
599
600/*
601 * Changes the shape of a monitor from thin to fat, preserving the
602 * internal lock state. The calling thread must own the lock.
603 */
604void Monitor::Inflate(Thread* self, Object* obj) {
605 DCHECK(self != NULL);
606 DCHECK(obj != NULL);
607 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700608 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
Elliott Hughes5f791332011-09-15 17:45:30 -0700609
610 // Allocate and acquire a new monitor.
611 Monitor* m = new Monitor(obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700612 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700613 LOG(INFO) << "monitor: thread " << self->GetThinLockId()
614 << " created monitor " << m << " for object " << obj;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700615 }
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700616 Runtime::Current()->GetMonitorList()->Add(m);
Elliott Hughes5f791332011-09-15 17:45:30 -0700617 m->Lock(self);
618 // Propagate the lock state.
619 uint32_t thin = *obj->GetRawLockWordAddress();
620 m->lock_count_ = LW_LOCK_COUNT(thin);
621 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
622 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
623 // Publish the updated lock word.
624 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
625}
626
627void Monitor::MonitorEnter(Thread* self, Object* obj) {
628 volatile int32_t* thinp = obj->GetRawLockWordAddress();
629 struct timespec tm;
630 long sleepDelayNs;
631 long minSleepDelayNs = 1000000; /* 1 millisecond */
632 long maxSleepDelayNs = 1000000000; /* 1 second */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700633 uint32_t thin, newThin;
Elliott Hughes5f791332011-09-15 17:45:30 -0700634
Elliott Hughes4681c802011-09-25 18:04:37 -0700635 DCHECK(self != NULL);
636 DCHECK(obj != NULL);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700637 uint32_t threadId = self->GetThinLockId();
Elliott Hughes5f791332011-09-15 17:45:30 -0700638retry:
639 thin = *thinp;
640 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
641 /*
642 * The lock is a thin lock. The owner field is used to
643 * determine the acquire method, ordered by cost.
644 */
645 if (LW_LOCK_OWNER(thin) == threadId) {
646 /*
647 * The calling thread owns the lock. Increment the
648 * value of the recursion count field.
649 */
650 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
651 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
652 /*
653 * The reacquisition limit has been reached. Inflate
654 * the lock so the next acquire will not overflow the
655 * recursion count field.
656 */
657 Inflate(self, obj);
658 }
659 } else if (LW_LOCK_OWNER(thin) == 0) {
660 /*
661 * The lock is unowned. Install the thread id of the
662 * calling thread into the owner field. This is the
663 * common case. In performance critical code the JIT
664 * will have tried this before calling out to the VM.
665 */
666 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
667 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
668 // The acquire failed. Try again.
669 goto retry;
670 }
671 } else {
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700672 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700673 LOG(INFO) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
674 threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700675 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700676 // The lock is owned by another thread. Notify the VM that we are about to wait.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700677 self->monitor_enter_object_ = obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700678 Thread::State oldStatus = self->SetState(Thread::kBlocked);
679 // Spin until the thin lock is released or inflated.
680 sleepDelayNs = 0;
681 for (;;) {
682 thin = *thinp;
683 // Check the shape of the lock word. Another thread
684 // may have inflated the lock while we were waiting.
685 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
686 if (LW_LOCK_OWNER(thin) == 0) {
687 // The lock has been released. Install the thread id of the
688 // calling thread into the owner field.
689 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
690 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
691 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
692 break;
693 }
694 } else {
695 // The lock has not been released. Yield so the owning thread can run.
696 if (sleepDelayNs == 0) {
697 sched_yield();
698 sleepDelayNs = minSleepDelayNs;
699 } else {
700 tm.tv_sec = 0;
701 tm.tv_nsec = sleepDelayNs;
702 nanosleep(&tm, NULL);
703 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
704 if (sleepDelayNs < maxSleepDelayNs / 2) {
705 sleepDelayNs *= 2;
706 } else {
707 sleepDelayNs = minSleepDelayNs;
708 }
709 }
710 }
711 } else {
712 // The thin lock was inflated by another thread. Let the VM know we are no longer
713 // waiting and try again.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700714 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700715 LOG(INFO) << "monitor: thread " << threadId
716 << " found lock " << (void*) thinp << " surprise-fattened by another thread";
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700717 }
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700718 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700719 self->SetState(oldStatus);
720 goto retry;
721 }
722 }
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700723 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700724 LOG(INFO) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700725 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700726 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700727 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700728 self->SetState(oldStatus);
729 // Fatten the lock.
730 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700731 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700732 LOG(INFO) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700733 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700734 }
735 } else {
736 // The lock is a fat lock.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700737 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700738 LOG(INFO) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
739 threadId, thinp, LW_MONITOR(*thinp), (void*)*thinp, PrettyTypeOf(obj).c_str());
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700740 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700741 DCHECK(LW_MONITOR(*thinp) != NULL);
742 LW_MONITOR(*thinp)->Lock(self);
743 }
744}
745
746bool Monitor::MonitorExit(Thread* self, Object* obj) {
747 volatile int32_t* thinp = obj->GetRawLockWordAddress();
748
749 DCHECK(self != NULL);
Elliott Hughes4681c802011-09-25 18:04:37 -0700750 //DCHECK_EQ(self->GetState(), Thread::kRunnable);
Elliott Hughes5f791332011-09-15 17:45:30 -0700751 DCHECK(obj != NULL);
752
753 /*
754 * Cache the lock word as its value can change while we are
755 * examining its state.
756 */
757 uint32_t thin = *thinp;
758 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
759 /*
760 * The lock is thin. We must ensure that the lock is owned
761 * by the given thread before unlocking it.
762 */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700763 if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700764 /*
765 * We are the lock owner. It is safe to update the lock
766 * without CAS as lock ownership guards the lock itself.
767 */
768 if (LW_LOCK_COUNT(thin) == 0) {
769 /*
770 * The lock was not recursively acquired, the common
771 * case. Unlock by clearing all bits except for the
772 * hash state.
773 */
774 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
775 android_atomic_release_store(thin, thinp);
776 } else {
777 /*
778 * The object was recursively acquired. Decrement the
779 * lock recursion count field.
780 */
781 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
782 }
783 } else {
784 /*
785 * We do not own the lock. The JVM spec requires that we
786 * throw an exception in this case.
787 */
788 ThrowIllegalMonitorStateException("unlock of unowned monitor");
789 return false;
790 }
791 } else {
792 /*
793 * The lock is fat. We must check to see if Unlock has
794 * raised any exceptions before continuing.
795 */
796 DCHECK(LW_MONITOR(*thinp) != NULL);
797 if (!LW_MONITOR(*thinp)->Unlock(self)) {
798 // An exception has been raised. Do not fall through.
799 return false;
800 }
801 }
802 return true;
803}
804
805/*
806 * Object.wait(). Also called for class init.
807 */
808void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
809 volatile int32_t* thinp = obj->GetRawLockWordAddress();
810
811 // If the lock is still thin, we need to fatten it.
812 uint32_t thin = *thinp;
813 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
814 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700815 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700816 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
817 return;
818 }
819
820 /* This thread holds the lock. We need to fatten the lock
821 * so 'self' can block on it. Don't update the object lock
822 * field yet, because 'self' needs to acquire the lock before
823 * any other thread gets a chance.
824 */
825 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700826 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700827 LOG(INFO) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700828 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700829 }
830 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
831}
832
833void Monitor::Notify(Thread* self, Object *obj) {
834 uint32_t thin = *obj->GetRawLockWordAddress();
835
836 // If the lock is still thin, there aren't any waiters;
837 // waiting on an object forces lock fattening.
838 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
839 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700840 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700841 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
842 return;
843 }
844 // no-op; there are no waiters to notify.
845 } else {
846 // It's a fat lock.
847 LW_MONITOR(thin)->Notify(self);
848 }
849}
850
851void Monitor::NotifyAll(Thread* self, Object *obj) {
852 uint32_t thin = *obj->GetRawLockWordAddress();
853
854 // If the lock is still thin, there aren't any waiters;
855 // waiting on an object forces lock fattening.
856 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
857 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700858 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700859 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
860 return;
861 }
862 // no-op; there are no waiters to notify.
863 } else {
864 // It's a fat lock.
865 LW_MONITOR(thin)->NotifyAll(self);
866 }
867}
868
869uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
870 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
871 return LW_LOCK_OWNER(raw_lock_word);
872 } else {
873 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
874 return owner ? owner->GetThinLockId() : 0;
875 }
876}
877
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700878void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
879 Thread::State state = thread->GetState();
880
881 Object* object = NULL;
882 uint32_t lock_owner = ThreadList::kInvalidId;
883 if (state == Thread::kWaiting || state == Thread::kTimedWaiting) {
884 os << " - waiting on ";
885 Monitor* monitor = thread->wait_monitor_;
886 if (monitor != NULL) {
887 object = monitor->obj_;
888 }
889 lock_owner = Thread::LockOwnerFromThreadLock(object);
890 } else if (state == Thread::kBlocked) {
891 os << " - waiting to lock ";
892 object = thread->monitor_enter_object_;
893 if (object != NULL) {
894 lock_owner = object->GetLockOwner();
895 }
896 } else {
897 // We're not waiting on anything.
898 return;
899 }
900 os << "<" << object << ">";
901
902 // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
903 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
904 os << " (a " << PrettyTypeOf(object) << ")";
905
906 if (lock_owner != ThreadList::kInvalidId) {
907 os << " held by thread " << lock_owner;
908 }
909
910 os << "\n";
911}
912
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700913MonitorList::MonitorList() : lock_("MonitorList lock") {
914}
915
916MonitorList::~MonitorList() {
917 MutexLock mu(lock_);
918 STLDeleteElements(&list_);
919}
920
921void MonitorList::Add(Monitor* m) {
922 MutexLock mu(lock_);
923 list_.push_front(m);
924}
925
926void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
927 MutexLock mu(lock_);
928 typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
929 It it = list_.begin();
930 while (it != list_.end()) {
931 Monitor* m = *it;
932 if (!is_marked(m->GetObject(), arg)) {
933 if (Monitor::IsVerbose()) {
934 LOG(INFO) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
935 }
936 delete m;
937 it = list_.erase(it);
938 } else {
939 ++it;
940 }
941 }
942}
943
Elliott Hughes5f791332011-09-15 17:45:30 -0700944} // namespace art