blob: 7261bc451d291f5642c91a023f2a34f223c6447c [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
jeffhao33dc7712011-11-09 17:54:24 -080027#include "class_linker.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070028#include "mutex.h"
29#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080030#include "object_utils.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070031#include "stl_util.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070032#include "thread.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070033#include "thread_list.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070034
35namespace art {
36
37/*
38 * Every Object has a monitor associated with it, but not every Object is
39 * actually locked. Even the ones that are locked do not need a
40 * full-fledged monitor until a) there is actual contention or b) wait()
41 * is called on the Object.
42 *
43 * For Android, we have implemented a scheme similar to the one described
44 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
45 * (ACM 1998). Things are even easier for us, though, because we have
46 * a full 32 bits to work with.
47 *
48 * The two states of an Object's lock are referred to as "thin" and
49 * "fat". A lock may transition from the "thin" state to the "fat"
50 * state and this transition is referred to as inflation. Once a lock
51 * has been inflated it remains in the "fat" state indefinitely.
52 *
53 * The lock value itself is stored in Object.lock. The LSB of the
54 * lock encodes its state. When cleared, the lock is in the "thin"
55 * state and its bits are formatted as follows:
56 *
57 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
58 * lock count thread id hash state 0
59 *
60 * When set, the lock is in the "fat" state and its bits are formatted
61 * as follows:
62 *
63 * [31 ---- 3] [2 ---- 1] [0]
64 * pointer hash state 1
65 *
66 * For an in-depth description of the mechanics of thin-vs-fat locking,
67 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070068 *
Elliott Hughes5f791332011-09-15 17:45:30 -070069 * Monitors provide:
70 * - mutually exclusive access to resources
71 * - a way for multiple threads to wait for notification
72 *
73 * In effect, they fill the role of both mutexes and condition variables.
74 *
75 * Only one thread can own the monitor at any time. There may be several
76 * threads waiting on it (the wait call unlocks it). One or more waiting
77 * threads may be getting interrupted or notified at any given time.
78 *
79 * TODO: the various members of monitor are not SMP-safe.
80 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070081
82
83/*
84 * Monitor accessor. Extracts a monitor structure pointer from a fat
85 * lock. Performs no error checking.
86 */
87#define LW_MONITOR(x) \
88 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
89
90/*
91 * Lock recursion count field. Contains a count of the number of times
92 * a lock has been recursively acquired.
93 */
94#define LW_LOCK_COUNT_MASK 0x1fff
95#define LW_LOCK_COUNT_SHIFT 19
96#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
97
Elliott Hughesfc861622011-10-17 17:57:47 -070098bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
Elliott Hughesfc861622011-10-17 17:57:47 -070099uint32_t Monitor::lock_profiling_threshold_ = 0;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700100
Elliott Hughesfc861622011-10-17 17:57:47 -0700101bool Monitor::IsSensitiveThread() {
102 if (is_sensitive_thread_hook_ != NULL) {
103 return (*is_sensitive_thread_hook_)();
104 }
105 return false;
106}
107
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800108void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
Elliott Hughesfc861622011-10-17 17:57:47 -0700109 lock_profiling_threshold_ = lock_profiling_threshold;
110 is_sensitive_thread_hook_ = is_sensitive_thread_hook;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700111}
112
Elliott Hughes5f791332011-09-15 17:45:30 -0700113Monitor::Monitor(Object* obj)
114 : owner_(NULL),
115 lock_count_(0),
116 obj_(obj),
117 wait_set_(NULL),
118 lock_("a monitor lock"),
jeffhao33dc7712011-11-09 17:54:24 -0800119 locking_method_(NULL),
120 locking_pc_(0) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700121}
122
123Monitor::~Monitor() {
124 DCHECK(obj_ != NULL);
125 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
Elliott Hughes5f791332011-09-15 17:45:30 -0700126}
127
128/*
129 * Links a thread into a monitor's wait set. The monitor lock must be
130 * held by the caller of this routine.
131 */
132void Monitor::AppendToWaitSet(Thread* thread) {
133 DCHECK(owner_ == Thread::Current());
134 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700135 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700136 if (wait_set_ == NULL) {
137 wait_set_ = thread;
138 return;
139 }
140
141 // push_back.
142 Thread* t = wait_set_;
143 while (t->wait_next_ != NULL) {
144 t = t->wait_next_;
145 }
146 t->wait_next_ = thread;
147}
148
149/*
150 * Unlinks a thread from a monitor's wait set. The monitor lock must
151 * be held by the caller of this routine.
152 */
153void Monitor::RemoveFromWaitSet(Thread *thread) {
154 DCHECK(owner_ == Thread::Current());
155 DCHECK(thread != NULL);
156 if (wait_set_ == NULL) {
157 return;
158 }
159 if (wait_set_ == thread) {
160 wait_set_ = thread->wait_next_;
161 thread->wait_next_ = NULL;
162 return;
163 }
164
165 Thread* t = wait_set_;
166 while (t->wait_next_ != NULL) {
167 if (t->wait_next_ == thread) {
168 t->wait_next_ = thread->wait_next_;
169 thread->wait_next_ = NULL;
170 return;
171 }
172 t = t->wait_next_;
173 }
174}
175
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700176Object* Monitor::GetObject() {
177 return obj_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700178}
179
Elliott Hughes5f791332011-09-15 17:45:30 -0700180void Monitor::Lock(Thread* self) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700181 if (owner_ == self) {
182 lock_count_++;
183 return;
184 }
Elliott Hughesfc861622011-10-17 17:57:47 -0700185
186 uint64_t waitStart, waitEnd;
Elliott Hughes5f791332011-09-15 17:45:30 -0700187 if (!lock_.TryLock()) {
Elliott Hughesfc861622011-10-17 17:57:47 -0700188 uint32_t wait_threshold = lock_profiling_threshold_;
jeffhao33dc7712011-11-09 17:54:24 -0800189 const Method* current_locking_method = NULL;
Elliott Hughese65a6c92012-01-18 23:48:31 -0800190 uintptr_t current_locking_pc = 0;
Elliott Hughes5f791332011-09-15 17:45:30 -0700191 {
192 ScopedThreadStateChange tsc(self, Thread::kBlocked);
Elliott Hughesfc861622011-10-17 17:57:47 -0700193 if (wait_threshold != 0) {
194 waitStart = NanoTime() / 1000;
195 }
jeffhao33dc7712011-11-09 17:54:24 -0800196 current_locking_method = locking_method_;
197 current_locking_pc = locking_pc_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700198
199 lock_.Lock();
Elliott Hughesfc861622011-10-17 17:57:47 -0700200 if (wait_threshold != 0) {
201 waitEnd = NanoTime() / 1000;
202 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700203 }
Elliott Hughesfc861622011-10-17 17:57:47 -0700204
205 if (wait_threshold != 0) {
206 uint64_t wait_ms = (waitEnd - waitStart) / 1000;
207 uint32_t sample_percent;
208 if (wait_ms >= wait_threshold) {
209 sample_percent = 100;
210 } else {
211 sample_percent = 100 * wait_ms / wait_threshold;
212 }
213 if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
jeffhao33dc7712011-11-09 17:54:24 -0800214 const char* current_locking_filename;
215 uint32_t current_locking_line_number;
216 TranslateLocation(current_locking_method, current_locking_pc,
217 current_locking_filename, current_locking_line_number);
218 LogContentionEvent(self, wait_ms, sample_percent, current_locking_filename, current_locking_line_number);
Elliott Hughesfc861622011-10-17 17:57:47 -0700219 }
220 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700221 }
222 owner_ = self;
223 DCHECK_EQ(lock_count_, 0);
224
225 // When debugging, save the current monitor holder for future
226 // acquisition failures to use in sampled logging.
Elliott Hughesfc861622011-10-17 17:57:47 -0700227 if (lock_profiling_threshold_ != 0) {
Elliott Hughesd07986f2011-12-06 18:27:45 -0800228 locking_method_ = self->GetCurrentMethod(&locking_pc_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700229 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700230}
231
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800232static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
233 __attribute__((format(printf, 1, 2)));
234
235static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) {
236 va_list args;
237 va_start(args, fmt);
238 Thread::Current()->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
239 va_end(args);
240}
241
Elliott Hughesd4237412012-02-21 11:24:45 -0800242static std::string ThreadToString(Thread* thread) {
243 if (thread == NULL) {
244 return "NULL";
245 }
246 std::ostringstream oss;
247 // TODO: alternatively, we could just return the thread's name.
248 oss << *thread;
249 return oss.str();
250}
251
Elliott Hughesffb465f2012-03-01 18:46:05 -0800252void Monitor::FailedUnlock(Object* o, Thread* expected_owner, Thread* found_owner,
253 Monitor* monitor) {
254 Thread* current_owner = NULL;
255 std::string current_owner_string;
256 std::string expected_owner_string;
257 std::string found_owner_string;
258 {
259 // TODO: isn't this too late to prevent threads from disappearing?
260 // Acquire thread list lock so threads won't disappear from under us.
261 ScopedThreadListLock thread_list_lock;
262 // Re-read owner now that we hold lock.
263 current_owner = (monitor != NULL) ? monitor->owner_ : NULL;
264 // Get short descriptions of the threads involved.
265 current_owner_string = ThreadToString(current_owner);
266 expected_owner_string = ThreadToString(expected_owner);
267 found_owner_string = ThreadToString(found_owner);
268 }
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800269 if (current_owner == NULL) {
270 if (found_owner == NULL) {
271 ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
272 " on thread '%s'",
Elliott Hughesffb465f2012-03-01 18:46:05 -0800273 PrettyTypeOf(o).c_str(),
274 expected_owner_string.c_str());
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800275 } else {
276 // Race: the original read found an owner but now there is none
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800277 ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
278 " (where now the monitor appears unowned) on thread '%s'",
Elliott Hughesffb465f2012-03-01 18:46:05 -0800279 found_owner_string.c_str(),
280 PrettyTypeOf(o).c_str(),
281 expected_owner_string.c_str());
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800282 }
283 } else {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800284 if (found_owner == NULL) {
285 // Race: originally there was no owner, there is now
286 ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
287 " (originally believed to be unowned) on thread '%s'",
Elliott Hughesffb465f2012-03-01 18:46:05 -0800288 current_owner_string.c_str(),
289 PrettyTypeOf(o).c_str(),
290 expected_owner_string.c_str());
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800291 } else {
292 if (found_owner != current_owner) {
293 // Race: originally found and current owner have changed
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800294 ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
295 " owned by '%s') on object of type '%s' on thread '%s'",
Elliott Hughesffb465f2012-03-01 18:46:05 -0800296 found_owner_string.c_str(),
297 current_owner_string.c_str(),
298 PrettyTypeOf(o).c_str(),
299 expected_owner_string.c_str());
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800300 } else {
301 ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
302 " on thread '%s",
Elliott Hughesffb465f2012-03-01 18:46:05 -0800303 current_owner_string.c_str(),
304 PrettyTypeOf(o).c_str(),
305 expected_owner_string.c_str());
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800306 }
307 }
308 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700309}
310
311bool Monitor::Unlock(Thread* self) {
312 DCHECK(self != NULL);
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800313 Thread* owner = owner_;
314 if (owner == self) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700315 // We own the monitor, so nobody else can be in here.
316 if (lock_count_ == 0) {
317 owner_ = NULL;
jeffhao33dc7712011-11-09 17:54:24 -0800318 locking_method_ = NULL;
319 locking_pc_ = 0;
Elliott Hughes5f791332011-09-15 17:45:30 -0700320 lock_.Unlock();
321 } else {
322 --lock_count_;
323 }
324 } else {
325 // We don't own this, so we're not allowed to unlock it.
326 // The JNI spec says that we should throw IllegalMonitorStateException
327 // in this case.
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800328 FailedUnlock(obj_, self, owner, this);
Elliott Hughes5f791332011-09-15 17:45:30 -0700329 return false;
330 }
331 return true;
332}
333
334/*
335 * Converts the given relative waiting time into an absolute time.
336 */
Elliott Hughesb8d2eeb2012-02-29 16:44:41 -0800337static void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700338 int64_t endSec;
339
340#ifdef HAVE_TIMEDWAIT_MONOTONIC
341 clock_gettime(CLOCK_MONOTONIC, ts);
342#else
343 {
344 struct timeval tv;
345 gettimeofday(&tv, NULL);
346 ts->tv_sec = tv.tv_sec;
347 ts->tv_nsec = tv.tv_usec * 1000;
348 }
349#endif
350 endSec = ts->tv_sec + ms / 1000;
351 if (endSec >= 0x7fffffff) {
352 LOG(INFO) << "Note: end time exceeds epoch";
353 endSec = 0x7ffffffe;
354 }
355 ts->tv_sec = endSec;
356 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
357
358 // Catch rollover.
359 if (ts->tv_nsec >= 1000000000L) {
360 ts->tv_sec++;
361 ts->tv_nsec -= 1000000000L;
362 }
363}
364
Elliott Hughes5f791332011-09-15 17:45:30 -0700365/*
366 * Wait on a monitor until timeout, interrupt, or notification. Used for
367 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
368 *
369 * If another thread calls Thread.interrupt(), we throw InterruptedException
370 * and return immediately if one of the following are true:
371 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
372 * - blocked in join(), join(long), or join(long, int) methods of Thread
373 * - blocked in sleep(long), or sleep(long, int) methods of Thread
374 * Otherwise, we set the "interrupted" flag.
375 *
376 * Checks to make sure that "ns" is in the range 0-999999
377 * (i.e. fractions of a millisecond) and throws the appropriate
378 * exception if it isn't.
379 *
380 * The spec allows "spurious wakeups", and recommends that all code using
381 * Object.wait() do so in a loop. This appears to derive from concerns
382 * about pthread_cond_wait() on multiprocessor systems. Some commentary
383 * on the web casts doubt on whether these can/should occur.
384 *
385 * Since we're allowed to wake up "early", we clamp extremely long durations
386 * to return at the end of the 32-bit time epoch.
387 */
388void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
389 DCHECK(self != NULL);
390
391 // Make sure that we hold the lock.
392 if (owner_ != self) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800393 ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700394 return;
395 }
396
397 // Enforce the timeout range.
398 if (ms < 0 || ns < 0 || ns > 999999) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700399 Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
Elliott Hughes5f791332011-09-15 17:45:30 -0700400 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
401 return;
402 }
403
404 // Compute absolute wakeup time, if necessary.
405 struct timespec ts;
406 bool timed = false;
407 if (ms != 0 || ns != 0) {
408 ToAbsoluteTime(ms, ns, &ts);
409 timed = true;
410 }
411
412 /*
413 * Add ourselves to the set of threads waiting on this monitor, and
414 * release our hold. We need to let it go even if we're a few levels
415 * deep in a recursive lock, and we need to restore that later.
416 *
417 * We append to the wait set ahead of clearing the count and owner
418 * fields so the subroutine can check that the calling thread owns
419 * the monitor. Aside from that, the order of member updates is
420 * not order sensitive as we hold the pthread mutex.
421 */
422 AppendToWaitSet(self);
423 int prevLockCount = lock_count_;
424 lock_count_ = 0;
425 owner_ = NULL;
jeffhao33dc7712011-11-09 17:54:24 -0800426 const Method* savedMethod = locking_method_;
427 locking_method_ = NULL;
Elliott Hughese65a6c92012-01-18 23:48:31 -0800428 uintptr_t savedPc = locking_pc_;
jeffhao33dc7712011-11-09 17:54:24 -0800429 locking_pc_ = 0;
Elliott Hughes5f791332011-09-15 17:45:30 -0700430
431 /*
432 * Update thread status. If the GC wakes up, it'll ignore us, knowing
433 * that we won't touch any references in this state, and we'll check
434 * our suspend mode before we transition out.
435 */
436 if (timed) {
437 self->SetState(Thread::kTimedWaiting);
438 } else {
439 self->SetState(Thread::kWaiting);
440 }
441
Elliott Hughes85d15452011-09-16 17:33:01 -0700442 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700443
444 /*
445 * Set wait_monitor_ to the monitor object we will be waiting on.
446 * When wait_monitor_ is non-NULL a notifying or interrupting thread
447 * must signal the thread's wait_cond_ to wake it up.
448 */
449 DCHECK(self->wait_monitor_ == NULL);
450 self->wait_monitor_ = this;
451
452 /*
453 * Handle the case where the thread was interrupted before we called
454 * wait().
455 */
456 bool wasInterrupted = false;
457 if (self->interrupted_) {
458 wasInterrupted = true;
459 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700460 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700461 goto done;
462 }
463
464 /*
465 * Release the monitor lock and wait for a notification or
466 * a timeout to occur.
467 */
468 lock_.Unlock();
469
470 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700471 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700472 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700473 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700474 }
475 if (self->interrupted_) {
476 wasInterrupted = true;
477 }
478
479 self->interrupted_ = false;
480 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700481 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700482
483 // Reacquire the monitor lock.
484 Lock(self);
485
486done:
487 /*
488 * We remove our thread from wait set after restoring the count
489 * and owner fields so the subroutine can check that the calling
490 * thread owns the monitor. Aside from that, the order of member
491 * updates is not order sensitive as we hold the pthread mutex.
492 */
493 owner_ = self;
494 lock_count_ = prevLockCount;
jeffhao33dc7712011-11-09 17:54:24 -0800495 locking_method_ = savedMethod;
496 locking_pc_ = savedPc;
Elliott Hughes5f791332011-09-15 17:45:30 -0700497 RemoveFromWaitSet(self);
498
499 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
500 self->SetState(Thread::kRunnable);
501
502 if (wasInterrupted) {
503 /*
504 * We were interrupted while waiting, or somebody interrupted an
505 * un-interruptible thread earlier and we're bailing out immediately.
506 *
507 * The doc sayeth: "The interrupted status of the current thread is
508 * cleared when this exception is thrown."
509 */
510 self->interrupted_ = false;
511 if (interruptShouldThrow) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700512 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700513 }
514 }
515}
516
517void Monitor::Notify(Thread* self) {
518 DCHECK(self != NULL);
519
520 // Make sure that we hold the lock.
521 if (owner_ != self) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800522 ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700523 return;
524 }
525 // Signal the first waiting thread in the wait set.
526 while (wait_set_ != NULL) {
527 Thread* thread = wait_set_;
528 wait_set_ = thread->wait_next_;
529 thread->wait_next_ = NULL;
530
531 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700532 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700533 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700534 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700535 return;
536 }
537 }
538}
539
540void Monitor::NotifyAll(Thread* self) {
541 DCHECK(self != NULL);
542
543 // Make sure that we hold the lock.
544 if (owner_ != self) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800545 ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700546 return;
547 }
548 // Signal all threads in the wait set.
549 while (wait_set_ != NULL) {
550 Thread* thread = wait_set_;
551 wait_set_ = thread->wait_next_;
552 thread->wait_next_ = NULL;
553 thread->Notify();
554 }
555}
556
557/*
558 * Changes the shape of a monitor from thin to fat, preserving the
559 * internal lock state. The calling thread must own the lock.
560 */
561void Monitor::Inflate(Thread* self, Object* obj) {
562 DCHECK(self != NULL);
563 DCHECK(obj != NULL);
564 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700565 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
Elliott Hughes5f791332011-09-15 17:45:30 -0700566
567 // Allocate and acquire a new monitor.
568 Monitor* m = new Monitor(obj);
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800569 VLOG(monitor) << "monitor: thread " << self->GetThinLockId()
570 << " created monitor " << m << " for object " << obj;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700571 Runtime::Current()->GetMonitorList()->Add(m);
Elliott Hughes5f791332011-09-15 17:45:30 -0700572 m->Lock(self);
573 // Propagate the lock state.
574 uint32_t thin = *obj->GetRawLockWordAddress();
575 m->lock_count_ = LW_LOCK_COUNT(thin);
576 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
577 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
578 // Publish the updated lock word.
579 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
580}
581
582void Monitor::MonitorEnter(Thread* self, Object* obj) {
583 volatile int32_t* thinp = obj->GetRawLockWordAddress();
584 struct timespec tm;
585 long sleepDelayNs;
586 long minSleepDelayNs = 1000000; /* 1 millisecond */
587 long maxSleepDelayNs = 1000000000; /* 1 second */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700588 uint32_t thin, newThin;
Elliott Hughes5f791332011-09-15 17:45:30 -0700589
Elliott Hughes4681c802011-09-25 18:04:37 -0700590 DCHECK(self != NULL);
591 DCHECK(obj != NULL);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700592 uint32_t threadId = self->GetThinLockId();
Elliott Hughes5f791332011-09-15 17:45:30 -0700593retry:
594 thin = *thinp;
595 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
596 /*
597 * The lock is a thin lock. The owner field is used to
598 * determine the acquire method, ordered by cost.
599 */
600 if (LW_LOCK_OWNER(thin) == threadId) {
601 /*
602 * The calling thread owns the lock. Increment the
603 * value of the recursion count field.
604 */
605 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
606 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
607 /*
608 * The reacquisition limit has been reached. Inflate
609 * the lock so the next acquire will not overflow the
610 * recursion count field.
611 */
612 Inflate(self, obj);
613 }
614 } else if (LW_LOCK_OWNER(thin) == 0) {
615 /*
616 * The lock is unowned. Install the thread id of the
617 * calling thread into the owner field. This is the
618 * common case. In performance critical code the JIT
619 * will have tried this before calling out to the VM.
620 */
621 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
622 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
623 // The acquire failed. Try again.
624 goto retry;
625 }
626 } else {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800627 VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
628 threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
Elliott Hughes5f791332011-09-15 17:45:30 -0700629 // The lock is owned by another thread. Notify the VM that we are about to wait.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700630 self->monitor_enter_object_ = obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700631 Thread::State oldStatus = self->SetState(Thread::kBlocked);
632 // Spin until the thin lock is released or inflated.
633 sleepDelayNs = 0;
634 for (;;) {
635 thin = *thinp;
636 // Check the shape of the lock word. Another thread
637 // may have inflated the lock while we were waiting.
638 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
639 if (LW_LOCK_OWNER(thin) == 0) {
640 // The lock has been released. Install the thread id of the
641 // calling thread into the owner field.
642 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
643 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
644 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
645 break;
646 }
647 } else {
648 // The lock has not been released. Yield so the owning thread can run.
649 if (sleepDelayNs == 0) {
650 sched_yield();
651 sleepDelayNs = minSleepDelayNs;
652 } else {
653 tm.tv_sec = 0;
654 tm.tv_nsec = sleepDelayNs;
655 nanosleep(&tm, NULL);
656 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
657 if (sleepDelayNs < maxSleepDelayNs / 2) {
658 sleepDelayNs *= 2;
659 } else {
660 sleepDelayNs = minSleepDelayNs;
661 }
662 }
663 }
664 } else {
665 // The thin lock was inflated by another thread. Let the VM know we are no longer
666 // waiting and try again.
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800667 VLOG(monitor) << "monitor: thread " << threadId
668 << " found lock " << (void*) thinp << " surprise-fattened by another thread";
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700669 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700670 self->SetState(oldStatus);
671 goto retry;
672 }
673 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800674 VLOG(monitor) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
Elliott Hughes5f791332011-09-15 17:45:30 -0700675 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700676 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700677 self->SetState(oldStatus);
678 // Fatten the lock.
679 Inflate(self, obj);
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800680 VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
Elliott Hughes5f791332011-09-15 17:45:30 -0700681 }
682 } else {
683 // The lock is a fat lock.
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800684 VLOG(monitor) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
Elliott Hughesf8e01272011-10-17 11:29:05 -0700685 threadId, thinp, LW_MONITOR(*thinp), (void*)*thinp, PrettyTypeOf(obj).c_str());
Elliott Hughes5f791332011-09-15 17:45:30 -0700686 DCHECK(LW_MONITOR(*thinp) != NULL);
687 LW_MONITOR(*thinp)->Lock(self);
688 }
689}
690
691bool Monitor::MonitorExit(Thread* self, Object* obj) {
692 volatile int32_t* thinp = obj->GetRawLockWordAddress();
693
694 DCHECK(self != NULL);
Elliott Hughes4681c802011-09-25 18:04:37 -0700695 //DCHECK_EQ(self->GetState(), Thread::kRunnable);
Elliott Hughes5f791332011-09-15 17:45:30 -0700696 DCHECK(obj != NULL);
697
698 /*
699 * Cache the lock word as its value can change while we are
700 * examining its state.
701 */
702 uint32_t thin = *thinp;
703 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
704 /*
705 * The lock is thin. We must ensure that the lock is owned
706 * by the given thread before unlocking it.
707 */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700708 if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700709 /*
710 * We are the lock owner. It is safe to update the lock
711 * without CAS as lock ownership guards the lock itself.
712 */
713 if (LW_LOCK_COUNT(thin) == 0) {
714 /*
715 * The lock was not recursively acquired, the common
716 * case. Unlock by clearing all bits except for the
717 * hash state.
718 */
719 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
720 android_atomic_release_store(thin, thinp);
721 } else {
722 /*
723 * The object was recursively acquired. Decrement the
724 * lock recursion count field.
725 */
726 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
727 }
728 } else {
729 /*
730 * We do not own the lock. The JVM spec requires that we
731 * throw an exception in this case.
732 */
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800733 FailedUnlock(obj, self, NULL, NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700734 return false;
735 }
736 } else {
737 /*
738 * The lock is fat. We must check to see if Unlock has
739 * raised any exceptions before continuing.
740 */
741 DCHECK(LW_MONITOR(*thinp) != NULL);
742 if (!LW_MONITOR(*thinp)->Unlock(self)) {
743 // An exception has been raised. Do not fall through.
744 return false;
745 }
746 }
747 return true;
748}
749
750/*
751 * Object.wait(). Also called for class init.
752 */
753void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
754 volatile int32_t* thinp = obj->GetRawLockWordAddress();
755
756 // If the lock is still thin, we need to fatten it.
757 uint32_t thin = *thinp;
758 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
759 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700760 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800761 ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700762 return;
763 }
764
765 /* This thread holds the lock. We need to fatten the lock
766 * so 'self' can block on it. Don't update the object lock
767 * field yet, because 'self' needs to acquire the lock before
768 * any other thread gets a chance.
769 */
770 Inflate(self, obj);
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800771 VLOG(monitor) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
Elliott Hughes5f791332011-09-15 17:45:30 -0700772 }
773 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
774}
775
776void Monitor::Notify(Thread* self, Object *obj) {
777 uint32_t thin = *obj->GetRawLockWordAddress();
778
779 // If the lock is still thin, there aren't any waiters;
780 // waiting on an object forces lock fattening.
781 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
782 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700783 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800784 ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700785 return;
786 }
787 // no-op; there are no waiters to notify.
788 } else {
789 // It's a fat lock.
790 LW_MONITOR(thin)->Notify(self);
791 }
792}
793
794void Monitor::NotifyAll(Thread* self, Object *obj) {
795 uint32_t thin = *obj->GetRawLockWordAddress();
796
797 // If the lock is still thin, there aren't any waiters;
798 // waiting on an object forces lock fattening.
799 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
800 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700801 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800802 ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
Elliott Hughes5f791332011-09-15 17:45:30 -0700803 return;
804 }
805 // no-op; there are no waiters to notify.
806 } else {
807 // It's a fat lock.
808 LW_MONITOR(thin)->NotifyAll(self);
809 }
810}
811
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700812uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700813 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
814 return LW_LOCK_OWNER(raw_lock_word);
815 } else {
816 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
817 return owner ? owner->GetThinLockId() : 0;
818 }
819}
820
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700821void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
822 Thread::State state = thread->GetState();
823
824 Object* object = NULL;
825 uint32_t lock_owner = ThreadList::kInvalidId;
826 if (state == Thread::kWaiting || state == Thread::kTimedWaiting) {
827 os << " - waiting on ";
828 Monitor* monitor = thread->wait_monitor_;
829 if (monitor != NULL) {
830 object = monitor->obj_;
831 }
832 lock_owner = Thread::LockOwnerFromThreadLock(object);
833 } else if (state == Thread::kBlocked) {
834 os << " - waiting to lock ";
835 object = thread->monitor_enter_object_;
836 if (object != NULL) {
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700837 lock_owner = object->GetThinLockId();
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700838 }
839 } else {
840 // We're not waiting on anything.
841 return;
842 }
843 os << "<" << object << ">";
844
845 // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
846 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
847 os << " (a " << PrettyTypeOf(object) << ")";
848
849 if (lock_owner != ThreadList::kInvalidId) {
850 os << " held by thread " << lock_owner;
851 }
852
853 os << "\n";
854}
855
jeffhao33dc7712011-11-09 17:54:24 -0800856void Monitor::TranslateLocation(const Method* method, uint32_t pc,
857 const char*& source_file, uint32_t& line_number) const {
858 // If method is null, location is unknown
859 if (method == NULL) {
Elliott Hughes12c51e32012-01-17 20:25:05 -0800860 source_file = "";
jeffhao33dc7712011-11-09 17:54:24 -0800861 line_number = 0;
862 return;
863 }
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800864 MethodHelper mh(method);
865 source_file = mh.GetDeclaringClassSourceFile();
Elliott Hughes12c51e32012-01-17 20:25:05 -0800866 if (source_file == NULL) {
867 source_file = "";
868 }
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800869 line_number = mh.GetLineNumFromNativePC(pc);
jeffhao33dc7712011-11-09 17:54:24 -0800870}
871
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700872MonitorList::MonitorList() : lock_("MonitorList lock") {
873}
874
875MonitorList::~MonitorList() {
876 MutexLock mu(lock_);
877 STLDeleteElements(&list_);
878}
879
880void MonitorList::Add(Monitor* m) {
881 MutexLock mu(lock_);
882 list_.push_front(m);
883}
884
885void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
886 MutexLock mu(lock_);
887 typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
888 It it = list_.begin();
889 while (it != list_.end()) {
890 Monitor* m = *it;
891 if (!is_marked(m->GetObject(), arg)) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800892 VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700893 delete m;
894 it = list_.erase(it);
895 } else {
896 ++it;
897 }
898 }
899}
900
Elliott Hughes5f791332011-09-15 17:45:30 -0700901} // namespace art