blob: b6f91bc0ce1fad0beadedfb59dd49a07eea9d3a0 [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070029#include "stl_util.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070030#include "thread.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070031#include "thread_list.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070032
33namespace art {
34
35/*
36 * Every Object has a monitor associated with it, but not every Object is
37 * actually locked. Even the ones that are locked do not need a
38 * full-fledged monitor until a) there is actual contention or b) wait()
39 * is called on the Object.
40 *
41 * For Android, we have implemented a scheme similar to the one described
42 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
43 * (ACM 1998). Things are even easier for us, though, because we have
44 * a full 32 bits to work with.
45 *
46 * The two states of an Object's lock are referred to as "thin" and
47 * "fat". A lock may transition from the "thin" state to the "fat"
48 * state and this transition is referred to as inflation. Once a lock
49 * has been inflated it remains in the "fat" state indefinitely.
50 *
51 * The lock value itself is stored in Object.lock. The LSB of the
52 * lock encodes its state. When cleared, the lock is in the "thin"
53 * state and its bits are formatted as follows:
54 *
55 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
56 * lock count thread id hash state 0
57 *
58 * When set, the lock is in the "fat" state and its bits are formatted
59 * as follows:
60 *
61 * [31 ---- 3] [2 ---- 1] [0]
62 * pointer hash state 1
63 *
64 * For an in-depth description of the mechanics of thin-vs-fat locking,
65 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070066 *
Elliott Hughes5f791332011-09-15 17:45:30 -070067 * Monitors provide:
68 * - mutually exclusive access to resources
69 * - a way for multiple threads to wait for notification
70 *
71 * In effect, they fill the role of both mutexes and condition variables.
72 *
73 * Only one thread can own the monitor at any time. There may be several
74 * threads waiting on it (the wait call unlocks it). One or more waiting
75 * threads may be getting interrupted or notified at any given time.
76 *
77 * TODO: the various members of monitor are not SMP-safe.
78 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070079
80
81/*
82 * Monitor accessor. Extracts a monitor structure pointer from a fat
83 * lock. Performs no error checking.
84 */
85#define LW_MONITOR(x) \
86 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
87
88/*
89 * Lock recursion count field. Contains a count of the number of times
90 * a lock has been recursively acquired.
91 */
92#define LW_LOCK_COUNT_MASK 0x1fff
93#define LW_LOCK_COUNT_SHIFT 19
94#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
95
Elliott Hughesfc861622011-10-17 17:57:47 -070096bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070097bool Monitor::is_verbose_ = false;
Elliott Hughesfc861622011-10-17 17:57:47 -070098uint32_t Monitor::lock_profiling_threshold_ = 0;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070099
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700100bool Monitor::IsVerbose() {
101 return is_verbose_;
102}
103
Elliott Hughesfc861622011-10-17 17:57:47 -0700104bool Monitor::IsSensitiveThread() {
105 if (is_sensitive_thread_hook_ != NULL) {
106 return (*is_sensitive_thread_hook_)();
107 }
108 return false;
109}
110
111void Monitor::Init(bool is_verbose, uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700112 is_verbose_ = is_verbose;
Elliott Hughesfc861622011-10-17 17:57:47 -0700113 lock_profiling_threshold_ = lock_profiling_threshold;
114 is_sensitive_thread_hook_ = is_sensitive_thread_hook;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700115}
116
Elliott Hughes5f791332011-09-15 17:45:30 -0700117Monitor::Monitor(Object* obj)
118 : owner_(NULL),
119 lock_count_(0),
120 obj_(obj),
121 wait_set_(NULL),
122 lock_("a monitor lock"),
Elliott Hughes5f791332011-09-15 17:45:30 -0700123 owner_filename_(NULL),
124 owner_line_number_(0) {
125}
126
127Monitor::~Monitor() {
128 DCHECK(obj_ != NULL);
129 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
130
131#ifndef NDEBUG
132 /* This lock is associated with an object
133 * that's being swept. The only possible way
134 * anyone could be holding this lock would be
135 * if some JNI code locked but didn't unlock
136 * the object, in which case we've got some bad
137 * native code somewhere.
138 */
139 DCHECK(lock_.TryLock());
140 lock_.Unlock();
141#endif
142}
143
144/*
145 * Links a thread into a monitor's wait set. The monitor lock must be
146 * held by the caller of this routine.
147 */
148void Monitor::AppendToWaitSet(Thread* thread) {
149 DCHECK(owner_ == Thread::Current());
150 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700151 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700152 if (wait_set_ == NULL) {
153 wait_set_ = thread;
154 return;
155 }
156
157 // push_back.
158 Thread* t = wait_set_;
159 while (t->wait_next_ != NULL) {
160 t = t->wait_next_;
161 }
162 t->wait_next_ = thread;
163}
164
165/*
166 * Unlinks a thread from a monitor's wait set. The monitor lock must
167 * be held by the caller of this routine.
168 */
169void Monitor::RemoveFromWaitSet(Thread *thread) {
170 DCHECK(owner_ == Thread::Current());
171 DCHECK(thread != NULL);
172 if (wait_set_ == NULL) {
173 return;
174 }
175 if (wait_set_ == thread) {
176 wait_set_ = thread->wait_next_;
177 thread->wait_next_ = NULL;
178 return;
179 }
180
181 Thread* t = wait_set_;
182 while (t->wait_next_ != NULL) {
183 if (t->wait_next_ == thread) {
184 t->wait_next_ = thread->wait_next_;
185 thread->wait_next_ = NULL;
186 return;
187 }
188 t = t->wait_next_;
189 }
190}
191
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700192Object* Monitor::GetObject() {
193 return obj_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700194}
195
Elliott Hughes5f791332011-09-15 17:45:30 -0700196void Monitor::Lock(Thread* self) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700197 if (owner_ == self) {
198 lock_count_++;
199 return;
200 }
Elliott Hughesfc861622011-10-17 17:57:47 -0700201
202 uint64_t waitStart, waitEnd;
Elliott Hughes5f791332011-09-15 17:45:30 -0700203 if (!lock_.TryLock()) {
Elliott Hughesfc861622011-10-17 17:57:47 -0700204 uint32_t wait_threshold = lock_profiling_threshold_;
205 const char* current_owner_filename = NULL;
206 uint32_t current_owner_line_number = -1;
Elliott Hughes5f791332011-09-15 17:45:30 -0700207 {
208 ScopedThreadStateChange tsc(self, Thread::kBlocked);
Elliott Hughesfc861622011-10-17 17:57:47 -0700209 if (wait_threshold != 0) {
210 waitStart = NanoTime() / 1000;
211 }
212 current_owner_filename = owner_filename_;
213 current_owner_line_number = owner_line_number_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700214
215 lock_.Lock();
Elliott Hughesfc861622011-10-17 17:57:47 -0700216 if (wait_threshold != 0) {
217 waitEnd = NanoTime() / 1000;
218 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700219 }
Elliott Hughesfc861622011-10-17 17:57:47 -0700220
221 if (wait_threshold != 0) {
222 uint64_t wait_ms = (waitEnd - waitStart) / 1000;
223 uint32_t sample_percent;
224 if (wait_ms >= wait_threshold) {
225 sample_percent = 100;
226 } else {
227 sample_percent = 100 * wait_ms / wait_threshold;
228 }
229 if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
230 LogContentionEvent(self, wait_ms, sample_percent, current_owner_filename, current_owner_line_number);
231 }
232 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700233 }
234 owner_ = self;
235 DCHECK_EQ(lock_count_, 0);
236
237 // When debugging, save the current monitor holder for future
238 // acquisition failures to use in sampled logging.
Elliott Hughesfc861622011-10-17 17:57:47 -0700239 if (lock_profiling_threshold_ != 0) {
240 self->GetCurrentLocation(owner_filename_, owner_line_number_);
241 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700242}
243
244void ThrowIllegalMonitorStateException(const char* msg) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700245 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", msg);
Elliott Hughes5f791332011-09-15 17:45:30 -0700246}
247
248bool Monitor::Unlock(Thread* self) {
249 DCHECK(self != NULL);
250 if (owner_ == self) {
251 // We own the monitor, so nobody else can be in here.
252 if (lock_count_ == 0) {
253 owner_ = NULL;
254 owner_filename_ = "unlocked";
255 owner_line_number_ = 0;
256 lock_.Unlock();
257 } else {
258 --lock_count_;
259 }
260 } else {
261 // We don't own this, so we're not allowed to unlock it.
262 // The JNI spec says that we should throw IllegalMonitorStateException
263 // in this case.
264 ThrowIllegalMonitorStateException("unlock of unowned monitor");
265 return false;
266 }
267 return true;
268}
269
270/*
271 * Converts the given relative waiting time into an absolute time.
272 */
273void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
274 int64_t endSec;
275
276#ifdef HAVE_TIMEDWAIT_MONOTONIC
277 clock_gettime(CLOCK_MONOTONIC, ts);
278#else
279 {
280 struct timeval tv;
281 gettimeofday(&tv, NULL);
282 ts->tv_sec = tv.tv_sec;
283 ts->tv_nsec = tv.tv_usec * 1000;
284 }
285#endif
286 endSec = ts->tv_sec + ms / 1000;
287 if (endSec >= 0x7fffffff) {
288 LOG(INFO) << "Note: end time exceeds epoch";
289 endSec = 0x7ffffffe;
290 }
291 ts->tv_sec = endSec;
292 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
293
294 // Catch rollover.
295 if (ts->tv_nsec >= 1000000000L) {
296 ts->tv_sec++;
297 ts->tv_nsec -= 1000000000L;
298 }
299}
300
301int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
302 struct timespec ts;
303 ToAbsoluteTime(ms, ns, &ts);
304#if defined(HAVE_TIMEDWAIT_MONOTONIC)
305 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
306#else
307 int rc = pthread_cond_timedwait(cond, mutex, &ts);
308#endif
309 DCHECK(rc == 0 || rc == ETIMEDOUT);
310 return rc;
311}
312
313/*
314 * Wait on a monitor until timeout, interrupt, or notification. Used for
315 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
316 *
317 * If another thread calls Thread.interrupt(), we throw InterruptedException
318 * and return immediately if one of the following are true:
319 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
320 * - blocked in join(), join(long), or join(long, int) methods of Thread
321 * - blocked in sleep(long), or sleep(long, int) methods of Thread
322 * Otherwise, we set the "interrupted" flag.
323 *
324 * Checks to make sure that "ns" is in the range 0-999999
325 * (i.e. fractions of a millisecond) and throws the appropriate
326 * exception if it isn't.
327 *
328 * The spec allows "spurious wakeups", and recommends that all code using
329 * Object.wait() do so in a loop. This appears to derive from concerns
330 * about pthread_cond_wait() on multiprocessor systems. Some commentary
331 * on the web casts doubt on whether these can/should occur.
332 *
333 * Since we're allowed to wake up "early", we clamp extremely long durations
334 * to return at the end of the 32-bit time epoch.
335 */
336void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
337 DCHECK(self != NULL);
338
339 // Make sure that we hold the lock.
340 if (owner_ != self) {
341 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
342 return;
343 }
344
345 // Enforce the timeout range.
346 if (ms < 0 || ns < 0 || ns > 999999) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700347 Thread::Current()->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
Elliott Hughes5f791332011-09-15 17:45:30 -0700348 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
349 return;
350 }
351
352 // Compute absolute wakeup time, if necessary.
353 struct timespec ts;
354 bool timed = false;
355 if (ms != 0 || ns != 0) {
356 ToAbsoluteTime(ms, ns, &ts);
357 timed = true;
358 }
359
360 /*
361 * Add ourselves to the set of threads waiting on this monitor, and
362 * release our hold. We need to let it go even if we're a few levels
363 * deep in a recursive lock, and we need to restore that later.
364 *
365 * We append to the wait set ahead of clearing the count and owner
366 * fields so the subroutine can check that the calling thread owns
367 * the monitor. Aside from that, the order of member updates is
368 * not order sensitive as we hold the pthread mutex.
369 */
370 AppendToWaitSet(self);
371 int prevLockCount = lock_count_;
372 lock_count_ = 0;
373 owner_ = NULL;
374 const char* savedFileName = owner_filename_;
375 owner_filename_ = NULL;
376 uint32_t savedLineNumber = owner_line_number_;
377 owner_line_number_ = 0;
378
379 /*
380 * Update thread status. If the GC wakes up, it'll ignore us, knowing
381 * that we won't touch any references in this state, and we'll check
382 * our suspend mode before we transition out.
383 */
384 if (timed) {
385 self->SetState(Thread::kTimedWaiting);
386 } else {
387 self->SetState(Thread::kWaiting);
388 }
389
Elliott Hughes85d15452011-09-16 17:33:01 -0700390 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700391
392 /*
393 * Set wait_monitor_ to the monitor object we will be waiting on.
394 * When wait_monitor_ is non-NULL a notifying or interrupting thread
395 * must signal the thread's wait_cond_ to wake it up.
396 */
397 DCHECK(self->wait_monitor_ == NULL);
398 self->wait_monitor_ = this;
399
400 /*
401 * Handle the case where the thread was interrupted before we called
402 * wait().
403 */
404 bool wasInterrupted = false;
405 if (self->interrupted_) {
406 wasInterrupted = true;
407 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700408 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700409 goto done;
410 }
411
412 /*
413 * Release the monitor lock and wait for a notification or
414 * a timeout to occur.
415 */
416 lock_.Unlock();
417
418 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700419 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700420 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700421 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700422 }
423 if (self->interrupted_) {
424 wasInterrupted = true;
425 }
426
427 self->interrupted_ = false;
428 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700429 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700430
431 // Reacquire the monitor lock.
432 Lock(self);
433
434done:
435 /*
436 * We remove our thread from wait set after restoring the count
437 * and owner fields so the subroutine can check that the calling
438 * thread owns the monitor. Aside from that, the order of member
439 * updates is not order sensitive as we hold the pthread mutex.
440 */
441 owner_ = self;
442 lock_count_ = prevLockCount;
443 owner_filename_ = savedFileName;
444 owner_line_number_ = savedLineNumber;
445 RemoveFromWaitSet(self);
446
447 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
448 self->SetState(Thread::kRunnable);
449
450 if (wasInterrupted) {
451 /*
452 * We were interrupted while waiting, or somebody interrupted an
453 * un-interruptible thread earlier and we're bailing out immediately.
454 *
455 * The doc sayeth: "The interrupted status of the current thread is
456 * cleared when this exception is thrown."
457 */
458 self->interrupted_ = false;
459 if (interruptShouldThrow) {
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700460 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700461 }
462 }
463}
464
465void Monitor::Notify(Thread* self) {
466 DCHECK(self != NULL);
467
468 // Make sure that we hold the lock.
469 if (owner_ != self) {
470 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
471 return;
472 }
473 // Signal the first waiting thread in the wait set.
474 while (wait_set_ != NULL) {
475 Thread* thread = wait_set_;
476 wait_set_ = thread->wait_next_;
477 thread->wait_next_ = NULL;
478
479 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700480 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700481 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700482 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700483 return;
484 }
485 }
486}
487
488void Monitor::NotifyAll(Thread* self) {
489 DCHECK(self != NULL);
490
491 // Make sure that we hold the lock.
492 if (owner_ != self) {
493 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
494 return;
495 }
496 // Signal all threads in the wait set.
497 while (wait_set_ != NULL) {
498 Thread* thread = wait_set_;
499 wait_set_ = thread->wait_next_;
500 thread->wait_next_ = NULL;
501 thread->Notify();
502 }
503}
504
505/*
506 * Changes the shape of a monitor from thin to fat, preserving the
507 * internal lock state. The calling thread must own the lock.
508 */
509void Monitor::Inflate(Thread* self, Object* obj) {
510 DCHECK(self != NULL);
511 DCHECK(obj != NULL);
512 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700513 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->GetThinLockId()));
Elliott Hughes5f791332011-09-15 17:45:30 -0700514
515 // Allocate and acquire a new monitor.
516 Monitor* m = new Monitor(obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700517 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700518 LOG(INFO) << "monitor: thread " << self->GetThinLockId()
519 << " created monitor " << m << " for object " << obj;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700520 }
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700521 Runtime::Current()->GetMonitorList()->Add(m);
Elliott Hughes5f791332011-09-15 17:45:30 -0700522 m->Lock(self);
523 // Propagate the lock state.
524 uint32_t thin = *obj->GetRawLockWordAddress();
525 m->lock_count_ = LW_LOCK_COUNT(thin);
526 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
527 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
528 // Publish the updated lock word.
529 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
530}
531
532void Monitor::MonitorEnter(Thread* self, Object* obj) {
533 volatile int32_t* thinp = obj->GetRawLockWordAddress();
534 struct timespec tm;
535 long sleepDelayNs;
536 long minSleepDelayNs = 1000000; /* 1 millisecond */
537 long maxSleepDelayNs = 1000000000; /* 1 second */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700538 uint32_t thin, newThin;
Elliott Hughes5f791332011-09-15 17:45:30 -0700539
Elliott Hughes4681c802011-09-25 18:04:37 -0700540 DCHECK(self != NULL);
541 DCHECK(obj != NULL);
Elliott Hughesf8e01272011-10-17 11:29:05 -0700542 uint32_t threadId = self->GetThinLockId();
Elliott Hughes5f791332011-09-15 17:45:30 -0700543retry:
544 thin = *thinp;
545 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
546 /*
547 * The lock is a thin lock. The owner field is used to
548 * determine the acquire method, ordered by cost.
549 */
550 if (LW_LOCK_OWNER(thin) == threadId) {
551 /*
552 * The calling thread owns the lock. Increment the
553 * value of the recursion count field.
554 */
555 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
556 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
557 /*
558 * The reacquisition limit has been reached. Inflate
559 * the lock so the next acquire will not overflow the
560 * recursion count field.
561 */
562 Inflate(self, obj);
563 }
564 } else if (LW_LOCK_OWNER(thin) == 0) {
565 /*
566 * The lock is unowned. Install the thread id of the
567 * calling thread into the owner field. This is the
568 * common case. In performance critical code the JIT
569 * will have tried this before calling out to the VM.
570 */
571 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
572 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
573 // The acquire failed. Try again.
574 goto retry;
575 }
576 } else {
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700577 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700578 LOG(INFO) << StringPrintf("monitor: thread %d spin on lock %p (a %s) owned by %d",
579 threadId, thinp, PrettyTypeOf(obj).c_str(), LW_LOCK_OWNER(thin));
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700580 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700581 // The lock is owned by another thread. Notify the VM that we are about to wait.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700582 self->monitor_enter_object_ = obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700583 Thread::State oldStatus = self->SetState(Thread::kBlocked);
584 // Spin until the thin lock is released or inflated.
585 sleepDelayNs = 0;
586 for (;;) {
587 thin = *thinp;
588 // Check the shape of the lock word. Another thread
589 // may have inflated the lock while we were waiting.
590 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
591 if (LW_LOCK_OWNER(thin) == 0) {
592 // The lock has been released. Install the thread id of the
593 // calling thread into the owner field.
594 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
595 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
596 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
597 break;
598 }
599 } else {
600 // The lock has not been released. Yield so the owning thread can run.
601 if (sleepDelayNs == 0) {
602 sched_yield();
603 sleepDelayNs = minSleepDelayNs;
604 } else {
605 tm.tv_sec = 0;
606 tm.tv_nsec = sleepDelayNs;
607 nanosleep(&tm, NULL);
608 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
609 if (sleepDelayNs < maxSleepDelayNs / 2) {
610 sleepDelayNs *= 2;
611 } else {
612 sleepDelayNs = minSleepDelayNs;
613 }
614 }
615 }
616 } else {
617 // The thin lock was inflated by another thread. Let the VM know we are no longer
618 // waiting and try again.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700619 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700620 LOG(INFO) << "monitor: thread " << threadId
621 << " found lock " << (void*) thinp << " surprise-fattened by another thread";
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700622 }
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700623 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700624 self->SetState(oldStatus);
625 goto retry;
626 }
627 }
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700628 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700629 LOG(INFO) << StringPrintf("monitor: thread %d spin on lock %p done", threadId, thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700630 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700631 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700632 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700633 self->SetState(oldStatus);
634 // Fatten the lock.
635 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700636 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700637 LOG(INFO) << StringPrintf("monitor: thread %d fattened lock %p", threadId, thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700638 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700639 }
640 } else {
641 // The lock is a fat lock.
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700642 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700643 LOG(INFO) << StringPrintf("monitor: thread %d locking fat lock %p (%p) %p on a %s",
644 threadId, thinp, LW_MONITOR(*thinp), (void*)*thinp, PrettyTypeOf(obj).c_str());
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700645 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700646 DCHECK(LW_MONITOR(*thinp) != NULL);
647 LW_MONITOR(*thinp)->Lock(self);
648 }
649}
650
651bool Monitor::MonitorExit(Thread* self, Object* obj) {
652 volatile int32_t* thinp = obj->GetRawLockWordAddress();
653
654 DCHECK(self != NULL);
Elliott Hughes4681c802011-09-25 18:04:37 -0700655 //DCHECK_EQ(self->GetState(), Thread::kRunnable);
Elliott Hughes5f791332011-09-15 17:45:30 -0700656 DCHECK(obj != NULL);
657
658 /*
659 * Cache the lock word as its value can change while we are
660 * examining its state.
661 */
662 uint32_t thin = *thinp;
663 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
664 /*
665 * The lock is thin. We must ensure that the lock is owned
666 * by the given thread before unlocking it.
667 */
Elliott Hughesf8e01272011-10-17 11:29:05 -0700668 if (LW_LOCK_OWNER(thin) == self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700669 /*
670 * We are the lock owner. It is safe to update the lock
671 * without CAS as lock ownership guards the lock itself.
672 */
673 if (LW_LOCK_COUNT(thin) == 0) {
674 /*
675 * The lock was not recursively acquired, the common
676 * case. Unlock by clearing all bits except for the
677 * hash state.
678 */
679 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
680 android_atomic_release_store(thin, thinp);
681 } else {
682 /*
683 * The object was recursively acquired. Decrement the
684 * lock recursion count field.
685 */
686 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
687 }
688 } else {
689 /*
690 * We do not own the lock. The JVM spec requires that we
691 * throw an exception in this case.
692 */
693 ThrowIllegalMonitorStateException("unlock of unowned monitor");
694 return false;
695 }
696 } else {
697 /*
698 * The lock is fat. We must check to see if Unlock has
699 * raised any exceptions before continuing.
700 */
701 DCHECK(LW_MONITOR(*thinp) != NULL);
702 if (!LW_MONITOR(*thinp)->Unlock(self)) {
703 // An exception has been raised. Do not fall through.
704 return false;
705 }
706 }
707 return true;
708}
709
710/*
711 * Object.wait(). Also called for class init.
712 */
713void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
714 volatile int32_t* thinp = obj->GetRawLockWordAddress();
715
716 // If the lock is still thin, we need to fatten it.
717 uint32_t thin = *thinp;
718 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
719 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700720 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700721 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
722 return;
723 }
724
725 /* This thread holds the lock. We need to fatten the lock
726 * so 'self' can block on it. Don't update the object lock
727 * field yet, because 'self' needs to acquire the lock before
728 * any other thread gets a chance.
729 */
730 Inflate(self, obj);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700731 if (is_verbose_) {
Elliott Hughesf8e01272011-10-17 11:29:05 -0700732 LOG(INFO) << StringPrintf("monitor: thread %d fattened lock %p by wait()", self->GetThinLockId(), thinp);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700733 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700734 }
735 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
736}
737
738void Monitor::Notify(Thread* self, Object *obj) {
739 uint32_t thin = *obj->GetRawLockWordAddress();
740
741 // If the lock is still thin, there aren't any waiters;
742 // waiting on an object forces lock fattening.
743 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
744 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700745 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700746 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
747 return;
748 }
749 // no-op; there are no waiters to notify.
750 } else {
751 // It's a fat lock.
752 LW_MONITOR(thin)->Notify(self);
753 }
754}
755
756void Monitor::NotifyAll(Thread* self, Object *obj) {
757 uint32_t thin = *obj->GetRawLockWordAddress();
758
759 // If the lock is still thin, there aren't any waiters;
760 // waiting on an object forces lock fattening.
761 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
762 // Make sure that 'self' holds the lock.
Elliott Hughesf8e01272011-10-17 11:29:05 -0700763 if (LW_LOCK_OWNER(thin) != self->GetThinLockId()) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700764 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
765 return;
766 }
767 // no-op; there are no waiters to notify.
768 } else {
769 // It's a fat lock.
770 LW_MONITOR(thin)->NotifyAll(self);
771 }
772}
773
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700774uint32_t Monitor::GetThinLockId(uint32_t raw_lock_word) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700775 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
776 return LW_LOCK_OWNER(raw_lock_word);
777 } else {
778 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
779 return owner ? owner->GetThinLockId() : 0;
780 }
781}
782
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700783void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
784 Thread::State state = thread->GetState();
785
786 Object* object = NULL;
787 uint32_t lock_owner = ThreadList::kInvalidId;
788 if (state == Thread::kWaiting || state == Thread::kTimedWaiting) {
789 os << " - waiting on ";
790 Monitor* monitor = thread->wait_monitor_;
791 if (monitor != NULL) {
792 object = monitor->obj_;
793 }
794 lock_owner = Thread::LockOwnerFromThreadLock(object);
795 } else if (state == Thread::kBlocked) {
796 os << " - waiting to lock ";
797 object = thread->monitor_enter_object_;
798 if (object != NULL) {
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700799 lock_owner = object->GetThinLockId();
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700800 }
801 } else {
802 // We're not waiting on anything.
803 return;
804 }
805 os << "<" << object << ">";
806
807 // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
808 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
809 os << " (a " << PrettyTypeOf(object) << ")";
810
811 if (lock_owner != ThreadList::kInvalidId) {
812 os << " held by thread " << lock_owner;
813 }
814
815 os << "\n";
816}
817
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700818MonitorList::MonitorList() : lock_("MonitorList lock") {
819}
820
821MonitorList::~MonitorList() {
822 MutexLock mu(lock_);
823 STLDeleteElements(&list_);
824}
825
826void MonitorList::Add(Monitor* m) {
827 MutexLock mu(lock_);
828 list_.push_front(m);
829}
830
831void MonitorList::SweepMonitorList(Heap::IsMarkedTester is_marked, void* arg) {
832 MutexLock mu(lock_);
833 typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
834 It it = list_.begin();
835 while (it != list_.end()) {
836 Monitor* m = *it;
837 if (!is_marked(m->GetObject(), arg)) {
838 if (Monitor::IsVerbose()) {
839 LOG(INFO) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
840 }
841 delete m;
842 it = list_.erase(it);
843 } else {
844 ++it;
845 }
846 }
847}
848
Elliott Hughes5f791332011-09-15 17:45:30 -0700849} // namespace art