blob: 4e71e456b45285dded688450337fc08c669bfa92 [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
29#include "thread.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070030#include "thread_list.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070031
32namespace art {
33
34/*
35 * Every Object has a monitor associated with it, but not every Object is
36 * actually locked. Even the ones that are locked do not need a
37 * full-fledged monitor until a) there is actual contention or b) wait()
38 * is called on the Object.
39 *
40 * For Android, we have implemented a scheme similar to the one described
41 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
42 * (ACM 1998). Things are even easier for us, though, because we have
43 * a full 32 bits to work with.
44 *
45 * The two states of an Object's lock are referred to as "thin" and
46 * "fat". A lock may transition from the "thin" state to the "fat"
47 * state and this transition is referred to as inflation. Once a lock
48 * has been inflated it remains in the "fat" state indefinitely.
49 *
50 * The lock value itself is stored in Object.lock. The LSB of the
51 * lock encodes its state. When cleared, the lock is in the "thin"
52 * state and its bits are formatted as follows:
53 *
54 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
55 * lock count thread id hash state 0
56 *
57 * When set, the lock is in the "fat" state and its bits are formatted
58 * as follows:
59 *
60 * [31 ---- 3] [2 ---- 1] [0]
61 * pointer hash state 1
62 *
63 * For an in-depth description of the mechanics of thin-vs-fat locking,
64 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070065 *
Elliott Hughes5f791332011-09-15 17:45:30 -070066 * Monitors provide:
67 * - mutually exclusive access to resources
68 * - a way for multiple threads to wait for notification
69 *
70 * In effect, they fill the role of both mutexes and condition variables.
71 *
72 * Only one thread can own the monitor at any time. There may be several
73 * threads waiting on it (the wait call unlocks it). One or more waiting
74 * threads may be getting interrupted or notified at any given time.
75 *
76 * TODO: the various members of monitor are not SMP-safe.
77 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070078
79
80/*
81 * Monitor accessor. Extracts a monitor structure pointer from a fat
82 * lock. Performs no error checking.
83 */
84#define LW_MONITOR(x) \
85 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
86
87/*
88 * Lock recursion count field. Contains a count of the number of times
89 * a lock has been recursively acquired.
90 */
91#define LW_LOCK_COUNT_MASK 0x1fff
92#define LW_LOCK_COUNT_SHIFT 19
93#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
94
Elliott Hughes5f791332011-09-15 17:45:30 -070095Monitor::Monitor(Object* obj)
96 : owner_(NULL),
97 lock_count_(0),
98 obj_(obj),
99 wait_set_(NULL),
100 lock_("a monitor lock"),
101 next_(NULL),
102 owner_filename_(NULL),
103 owner_line_number_(0) {
104}
105
106Monitor::~Monitor() {
107 DCHECK(obj_ != NULL);
108 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
109
110#ifndef NDEBUG
111 /* This lock is associated with an object
112 * that's being swept. The only possible way
113 * anyone could be holding this lock would be
114 * if some JNI code locked but didn't unlock
115 * the object, in which case we've got some bad
116 * native code somewhere.
117 */
118 DCHECK(lock_.TryLock());
119 lock_.Unlock();
120#endif
121}
122
123/*
124 * Links a thread into a monitor's wait set. The monitor lock must be
125 * held by the caller of this routine.
126 */
127void Monitor::AppendToWaitSet(Thread* thread) {
128 DCHECK(owner_ == Thread::Current());
129 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700130 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700131 if (wait_set_ == NULL) {
132 wait_set_ = thread;
133 return;
134 }
135
136 // push_back.
137 Thread* t = wait_set_;
138 while (t->wait_next_ != NULL) {
139 t = t->wait_next_;
140 }
141 t->wait_next_ = thread;
142}
143
144/*
145 * Unlinks a thread from a monitor's wait set. The monitor lock must
146 * be held by the caller of this routine.
147 */
148void Monitor::RemoveFromWaitSet(Thread *thread) {
149 DCHECK(owner_ == Thread::Current());
150 DCHECK(thread != NULL);
151 if (wait_set_ == NULL) {
152 return;
153 }
154 if (wait_set_ == thread) {
155 wait_set_ = thread->wait_next_;
156 thread->wait_next_ = NULL;
157 return;
158 }
159
160 Thread* t = wait_set_;
161 while (t->wait_next_ != NULL) {
162 if (t->wait_next_ == thread) {
163 t->wait_next_ = thread->wait_next_;
164 thread->wait_next_ = NULL;
165 return;
166 }
167 t = t->wait_next_;
168 }
169}
170
171// Global list of all monitors. Used for cleanup.
172static Monitor* gMonitorList = NULL;
173
174void Monitor::FreeMonitorList() {
175 Monitor* m = gMonitorList;
176 while (m != NULL) {
177 Monitor* next = m->next_;
178 delete m;
179 m = next;
180 }
181}
182
183/*
184 * Frees monitor objects belonging to unmarked objects.
185 */
186static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
187 UNIMPLEMENTED(FATAL);
188#if 0
189 Monitor handle;
190 Monitor *curr;
191
Elliott Hughes4681c802011-09-25 18:04:37 -0700192 DCHECK(mon != NULL);
193 DCHECK(isUnmarkedObject != NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700194 Monitor* prev = &handle;
195 prev->next = curr = *mon;
196 while (curr != NULL) {
197 Object* obj = curr->obj;
198 if ((*isUnmarkedObject)(obj) != 0) {
199 prev->next = curr->next;
200 delete curr;
201 curr = prev->next;
202 } else {
203 prev = curr;
204 curr = curr->next;
205 }
206 }
207 *mon = handle.next;
208#endif
209}
210
211void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
212 ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
213}
214
215/*
216static char *logWriteInt(char *dst, int value) {
217 *dst++ = EVENT_TYPE_INT;
218 set4LE((uint8_t *)dst, value);
219 return dst + 4;
220}
221
222static char *logWriteString(char *dst, const char *value, size_t len) {
223 *dst++ = EVENT_TYPE_STRING;
224 len = len < 32 ? len : 32;
225 set4LE((uint8_t *)dst, len);
226 dst += 4;
227 memcpy(dst, value, len);
228 return dst + len;
229}
230
231#define EVENT_LOG_TAG_dvm_lock_sample 20003
232
233static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
234 const char *ownerFileName, uint32_t ownerLineNumber)
235{
236 const StackSaveArea *saveArea;
237 const Method *meth;
238 uint32_t relativePc;
239 char eventBuffer[174];
240 const char *fileName;
241 char procName[33];
242 char *cp;
243 size_t len;
244 int fd;
245
246 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
247 meth = saveArea->method;
248 cp = eventBuffer;
249
250 // Emit the event list length, 1 byte.
251 *cp++ = 9;
252
253 // Emit the process name, <= 37 bytes.
254 fd = open("/proc/self/cmdline", O_RDONLY);
255 memset(procName, 0, sizeof(procName));
256 read(fd, procName, sizeof(procName) - 1);
257 close(fd);
258 len = strlen(procName);
259 cp = logWriteString(cp, procName, len);
260
261 // Emit the sensitive thread ("main thread") status, 5 bytes.
262 bool isSensitive = false;
263 if (gDvm.isSensitiveThreadHook != NULL) {
264 isSensitive = gDvm.isSensitiveThreadHook();
265 }
266 cp = logWriteInt(cp, isSensitive);
267
268 // Emit self thread name string, <= 37 bytes.
269 std::string selfName = dvmGetThreadName(self);
270 cp = logWriteString(cp, selfName.c_str(), selfName.size());
271
272 // Emit the wait time, 5 bytes.
273 cp = logWriteInt(cp, waitMs);
274
275 // Emit the source code file name, <= 37 bytes.
276 fileName = dvmGetMethodSourceFile(meth);
277 if (fileName == NULL) fileName = "";
278 cp = logWriteString(cp, fileName, strlen(fileName));
279
280 // Emit the source code line number, 5 bytes.
281 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
282 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
283
284 // Emit the lock owner source code file name, <= 37 bytes.
285 if (ownerFileName == NULL) {
286 ownerFileName = "";
287 } else if (strcmp(fileName, ownerFileName) == 0) {
288 // Common case, so save on log space.
289 ownerFileName = "-";
290 }
291 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
292
293 // Emit the source code line number, 5 bytes.
294 cp = logWriteInt(cp, ownerLineNumber);
295
296 // Emit the sample percentage, 5 bytes.
297 cp = logWriteInt(cp, samplePercent);
298
299 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
300 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
301 EVENT_TYPE_LIST,
302 eventBuffer,
303 (size_t)(cp - eventBuffer));
304}
305*/
306
307void Monitor::Lock(Thread* self) {
308// uint32_t waitThreshold, samplePercent;
309// uint64_t waitStart, waitEnd, waitMs;
310
311 if (owner_ == self) {
312 lock_count_++;
313 return;
314 }
315 if (!lock_.TryLock()) {
316 {
317 ScopedThreadStateChange tsc(self, Thread::kBlocked);
318// waitThreshold = gDvm.lockProfThreshold;
319// if (waitThreshold) {
320// waitStart = dvmGetRelativeTimeUsec();
321// }
322// const char* currentOwnerFileName = mon->ownerFileName;
323// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
324
325 lock_.Lock();
326// if (waitThreshold) {
327// waitEnd = dvmGetRelativeTimeUsec();
328// }
329 }
330// if (waitThreshold) {
331// waitMs = (waitEnd - waitStart) / 1000;
332// if (waitMs >= waitThreshold) {
333// samplePercent = 100;
334// } else {
335// samplePercent = 100 * waitMs / waitThreshold;
336// }
337// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
338// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
339// }
340// }
341 }
342 owner_ = self;
343 DCHECK_EQ(lock_count_, 0);
344
345 // When debugging, save the current monitor holder for future
346 // acquisition failures to use in sampled logging.
347// if (gDvm.lockProfThreshold > 0) {
348// const StackSaveArea *saveArea;
349// const Method *meth;
350// mon->ownerLineNumber = 0;
351// if (self->interpSave.curFrame == NULL) {
352// mon->ownerFileName = "no_frame";
353// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
354// mon->ownerFileName = "no_save_area";
355// } else if ((meth = saveArea->method) == NULL) {
356// mon->ownerFileName = "no_method";
357// } else {
358// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
359// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
360// if (mon->ownerFileName == NULL) {
361// mon->ownerFileName = "no_method_file";
362// } else {
363// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
364// }
365// }
366// }
367}
368
369void ThrowIllegalMonitorStateException(const char* msg) {
370 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", "%s", msg);
371}
372
373bool Monitor::Unlock(Thread* self) {
374 DCHECK(self != NULL);
375 if (owner_ == self) {
376 // We own the monitor, so nobody else can be in here.
377 if (lock_count_ == 0) {
378 owner_ = NULL;
379 owner_filename_ = "unlocked";
380 owner_line_number_ = 0;
381 lock_.Unlock();
382 } else {
383 --lock_count_;
384 }
385 } else {
386 // We don't own this, so we're not allowed to unlock it.
387 // The JNI spec says that we should throw IllegalMonitorStateException
388 // in this case.
389 ThrowIllegalMonitorStateException("unlock of unowned monitor");
390 return false;
391 }
392 return true;
393}
394
395/*
396 * Converts the given relative waiting time into an absolute time.
397 */
398void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
399 int64_t endSec;
400
401#ifdef HAVE_TIMEDWAIT_MONOTONIC
402 clock_gettime(CLOCK_MONOTONIC, ts);
403#else
404 {
405 struct timeval tv;
406 gettimeofday(&tv, NULL);
407 ts->tv_sec = tv.tv_sec;
408 ts->tv_nsec = tv.tv_usec * 1000;
409 }
410#endif
411 endSec = ts->tv_sec + ms / 1000;
412 if (endSec >= 0x7fffffff) {
413 LOG(INFO) << "Note: end time exceeds epoch";
414 endSec = 0x7ffffffe;
415 }
416 ts->tv_sec = endSec;
417 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
418
419 // Catch rollover.
420 if (ts->tv_nsec >= 1000000000L) {
421 ts->tv_sec++;
422 ts->tv_nsec -= 1000000000L;
423 }
424}
425
426int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
427 struct timespec ts;
428 ToAbsoluteTime(ms, ns, &ts);
429#if defined(HAVE_TIMEDWAIT_MONOTONIC)
430 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
431#else
432 int rc = pthread_cond_timedwait(cond, mutex, &ts);
433#endif
434 DCHECK(rc == 0 || rc == ETIMEDOUT);
435 return rc;
436}
437
438/*
439 * Wait on a monitor until timeout, interrupt, or notification. Used for
440 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
441 *
442 * If another thread calls Thread.interrupt(), we throw InterruptedException
443 * and return immediately if one of the following are true:
444 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
445 * - blocked in join(), join(long), or join(long, int) methods of Thread
446 * - blocked in sleep(long), or sleep(long, int) methods of Thread
447 * Otherwise, we set the "interrupted" flag.
448 *
449 * Checks to make sure that "ns" is in the range 0-999999
450 * (i.e. fractions of a millisecond) and throws the appropriate
451 * exception if it isn't.
452 *
453 * The spec allows "spurious wakeups", and recommends that all code using
454 * Object.wait() do so in a loop. This appears to derive from concerns
455 * about pthread_cond_wait() on multiprocessor systems. Some commentary
456 * on the web casts doubt on whether these can/should occur.
457 *
458 * Since we're allowed to wake up "early", we clamp extremely long durations
459 * to return at the end of the 32-bit time epoch.
460 */
461void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
462 DCHECK(self != NULL);
463
464 // Make sure that we hold the lock.
465 if (owner_ != self) {
466 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
467 return;
468 }
469
470 // Enforce the timeout range.
471 if (ms < 0 || ns < 0 || ns > 999999) {
472 Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;",
473 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
474 return;
475 }
476
477 // Compute absolute wakeup time, if necessary.
478 struct timespec ts;
479 bool timed = false;
480 if (ms != 0 || ns != 0) {
481 ToAbsoluteTime(ms, ns, &ts);
482 timed = true;
483 }
484
485 /*
486 * Add ourselves to the set of threads waiting on this monitor, and
487 * release our hold. We need to let it go even if we're a few levels
488 * deep in a recursive lock, and we need to restore that later.
489 *
490 * We append to the wait set ahead of clearing the count and owner
491 * fields so the subroutine can check that the calling thread owns
492 * the monitor. Aside from that, the order of member updates is
493 * not order sensitive as we hold the pthread mutex.
494 */
495 AppendToWaitSet(self);
496 int prevLockCount = lock_count_;
497 lock_count_ = 0;
498 owner_ = NULL;
499 const char* savedFileName = owner_filename_;
500 owner_filename_ = NULL;
501 uint32_t savedLineNumber = owner_line_number_;
502 owner_line_number_ = 0;
503
504 /*
505 * Update thread status. If the GC wakes up, it'll ignore us, knowing
506 * that we won't touch any references in this state, and we'll check
507 * our suspend mode before we transition out.
508 */
509 if (timed) {
510 self->SetState(Thread::kTimedWaiting);
511 } else {
512 self->SetState(Thread::kWaiting);
513 }
514
Elliott Hughes85d15452011-09-16 17:33:01 -0700515 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700516
517 /*
518 * Set wait_monitor_ to the monitor object we will be waiting on.
519 * When wait_monitor_ is non-NULL a notifying or interrupting thread
520 * must signal the thread's wait_cond_ to wake it up.
521 */
522 DCHECK(self->wait_monitor_ == NULL);
523 self->wait_monitor_ = this;
524
525 /*
526 * Handle the case where the thread was interrupted before we called
527 * wait().
528 */
529 bool wasInterrupted = false;
530 if (self->interrupted_) {
531 wasInterrupted = true;
532 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700533 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700534 goto done;
535 }
536
537 /*
538 * Release the monitor lock and wait for a notification or
539 * a timeout to occur.
540 */
541 lock_.Unlock();
542
543 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700544 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700545 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700546 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700547 }
548 if (self->interrupted_) {
549 wasInterrupted = true;
550 }
551
552 self->interrupted_ = false;
553 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700554 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700555
556 // Reacquire the monitor lock.
557 Lock(self);
558
559done:
560 /*
561 * We remove our thread from wait set after restoring the count
562 * and owner fields so the subroutine can check that the calling
563 * thread owns the monitor. Aside from that, the order of member
564 * updates is not order sensitive as we hold the pthread mutex.
565 */
566 owner_ = self;
567 lock_count_ = prevLockCount;
568 owner_filename_ = savedFileName;
569 owner_line_number_ = savedLineNumber;
570 RemoveFromWaitSet(self);
571
572 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
573 self->SetState(Thread::kRunnable);
574
575 if (wasInterrupted) {
576 /*
577 * We were interrupted while waiting, or somebody interrupted an
578 * un-interruptible thread earlier and we're bailing out immediately.
579 *
580 * The doc sayeth: "The interrupted status of the current thread is
581 * cleared when this exception is thrown."
582 */
583 self->interrupted_ = false;
584 if (interruptShouldThrow) {
585 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", "%s", "");
586 }
587 }
588}
589
590void Monitor::Notify(Thread* self) {
591 DCHECK(self != NULL);
592
593 // Make sure that we hold the lock.
594 if (owner_ != self) {
595 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
596 return;
597 }
598 // Signal the first waiting thread in the wait set.
599 while (wait_set_ != NULL) {
600 Thread* thread = wait_set_;
601 wait_set_ = thread->wait_next_;
602 thread->wait_next_ = NULL;
603
604 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700605 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700606 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700607 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700608 return;
609 }
610 }
611}
612
613void Monitor::NotifyAll(Thread* self) {
614 DCHECK(self != NULL);
615
616 // Make sure that we hold the lock.
617 if (owner_ != self) {
618 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
619 return;
620 }
621 // Signal all threads in the wait set.
622 while (wait_set_ != NULL) {
623 Thread* thread = wait_set_;
624 wait_set_ = thread->wait_next_;
625 thread->wait_next_ = NULL;
626 thread->Notify();
627 }
628}
629
630/*
631 * Changes the shape of a monitor from thin to fat, preserving the
632 * internal lock state. The calling thread must own the lock.
633 */
634void Monitor::Inflate(Thread* self, Object* obj) {
635 DCHECK(self != NULL);
636 DCHECK(obj != NULL);
637 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
638 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
639
640 // Allocate and acquire a new monitor.
641 Monitor* m = new Monitor(obj);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700642 LOG(INFO) << "created monitor " << m << " for object " << obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700643 // Replace the head of the list with the new monitor.
644 do {
645 m->next_ = gMonitorList;
646 } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
647 m->Lock(self);
648 // Propagate the lock state.
649 uint32_t thin = *obj->GetRawLockWordAddress();
650 m->lock_count_ = LW_LOCK_COUNT(thin);
651 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
652 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
653 // Publish the updated lock word.
654 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
655}
656
657void Monitor::MonitorEnter(Thread* self, Object* obj) {
658 volatile int32_t* thinp = obj->GetRawLockWordAddress();
659 struct timespec tm;
660 long sleepDelayNs;
661 long minSleepDelayNs = 1000000; /* 1 millisecond */
662 long maxSleepDelayNs = 1000000000; /* 1 second */
663 uint32_t thin, newThin, threadId;
664
Elliott Hughes4681c802011-09-25 18:04:37 -0700665 DCHECK(self != NULL);
666 DCHECK(obj != NULL);
Elliott Hughes5f791332011-09-15 17:45:30 -0700667 threadId = self->thin_lock_id_;
668retry:
669 thin = *thinp;
670 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
671 /*
672 * The lock is a thin lock. The owner field is used to
673 * determine the acquire method, ordered by cost.
674 */
675 if (LW_LOCK_OWNER(thin) == threadId) {
676 /*
677 * The calling thread owns the lock. Increment the
678 * value of the recursion count field.
679 */
680 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
681 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
682 /*
683 * The reacquisition limit has been reached. Inflate
684 * the lock so the next acquire will not overflow the
685 * recursion count field.
686 */
687 Inflate(self, obj);
688 }
689 } else if (LW_LOCK_OWNER(thin) == 0) {
690 /*
691 * The lock is unowned. Install the thread id of the
692 * calling thread into the owner field. This is the
693 * common case. In performance critical code the JIT
694 * will have tried this before calling out to the VM.
695 */
696 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
697 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
698 // The acquire failed. Try again.
699 goto retry;
700 }
701 } else {
702 LOG(INFO) << StringPrintf("(%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
703 // The lock is owned by another thread. Notify the VM that we are about to wait.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700704 self->monitor_enter_object_ = obj;
Elliott Hughes5f791332011-09-15 17:45:30 -0700705 Thread::State oldStatus = self->SetState(Thread::kBlocked);
706 // Spin until the thin lock is released or inflated.
707 sleepDelayNs = 0;
708 for (;;) {
709 thin = *thinp;
710 // Check the shape of the lock word. Another thread
711 // may have inflated the lock while we were waiting.
712 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
713 if (LW_LOCK_OWNER(thin) == 0) {
714 // The lock has been released. Install the thread id of the
715 // calling thread into the owner field.
716 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
717 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
718 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
719 break;
720 }
721 } else {
722 // The lock has not been released. Yield so the owning thread can run.
723 if (sleepDelayNs == 0) {
724 sched_yield();
725 sleepDelayNs = minSleepDelayNs;
726 } else {
727 tm.tv_sec = 0;
728 tm.tv_nsec = sleepDelayNs;
729 nanosleep(&tm, NULL);
730 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
731 if (sleepDelayNs < maxSleepDelayNs / 2) {
732 sleepDelayNs *= 2;
733 } else {
734 sleepDelayNs = minSleepDelayNs;
735 }
736 }
737 }
738 } else {
739 // The thin lock was inflated by another thread. Let the VM know we are no longer
740 // waiting and try again.
741 LOG(INFO) << "(" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700742 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700743 self->SetState(oldStatus);
744 goto retry;
745 }
746 }
747 LOG(INFO) << StringPrintf("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
748 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700749 self->monitor_enter_object_ = NULL;
Elliott Hughes5f791332011-09-15 17:45:30 -0700750 self->SetState(oldStatus);
751 // Fatten the lock.
752 Inflate(self, obj);
753 LOG(INFO) << StringPrintf("(%d) lock %p fattened", threadId, thinp);
754 }
755 } else {
756 // The lock is a fat lock.
Elliott Hughes4681c802011-09-25 18:04:37 -0700757 LOG(INFO) << StringPrintf("(%d) locking fat lock %p (%p) %p on a %s", threadId, thinp, LW_MONITOR(*thinp), (void*)*thinp, PrettyTypeOf(obj).c_str());
Elliott Hughes5f791332011-09-15 17:45:30 -0700758 DCHECK(LW_MONITOR(*thinp) != NULL);
759 LW_MONITOR(*thinp)->Lock(self);
760 }
761}
762
763bool Monitor::MonitorExit(Thread* self, Object* obj) {
764 volatile int32_t* thinp = obj->GetRawLockWordAddress();
765
766 DCHECK(self != NULL);
Elliott Hughes4681c802011-09-25 18:04:37 -0700767 //DCHECK_EQ(self->GetState(), Thread::kRunnable);
Elliott Hughes5f791332011-09-15 17:45:30 -0700768 DCHECK(obj != NULL);
769
770 /*
771 * Cache the lock word as its value can change while we are
772 * examining its state.
773 */
774 uint32_t thin = *thinp;
775 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
776 /*
777 * The lock is thin. We must ensure that the lock is owned
778 * by the given thread before unlocking it.
779 */
780 if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
781 /*
782 * We are the lock owner. It is safe to update the lock
783 * without CAS as lock ownership guards the lock itself.
784 */
785 if (LW_LOCK_COUNT(thin) == 0) {
786 /*
787 * The lock was not recursively acquired, the common
788 * case. Unlock by clearing all bits except for the
789 * hash state.
790 */
791 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
792 android_atomic_release_store(thin, thinp);
793 } else {
794 /*
795 * The object was recursively acquired. Decrement the
796 * lock recursion count field.
797 */
798 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
799 }
800 } else {
801 /*
802 * We do not own the lock. The JVM spec requires that we
803 * throw an exception in this case.
804 */
805 ThrowIllegalMonitorStateException("unlock of unowned monitor");
806 return false;
807 }
808 } else {
809 /*
810 * The lock is fat. We must check to see if Unlock has
811 * raised any exceptions before continuing.
812 */
813 DCHECK(LW_MONITOR(*thinp) != NULL);
814 if (!LW_MONITOR(*thinp)->Unlock(self)) {
815 // An exception has been raised. Do not fall through.
816 return false;
817 }
818 }
819 return true;
820}
821
822/*
823 * Object.wait(). Also called for class init.
824 */
825void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
826 volatile int32_t* thinp = obj->GetRawLockWordAddress();
827
828 // If the lock is still thin, we need to fatten it.
829 uint32_t thin = *thinp;
830 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
831 // Make sure that 'self' holds the lock.
832 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
833 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
834 return;
835 }
836
837 /* This thread holds the lock. We need to fatten the lock
838 * so 'self' can block on it. Don't update the object lock
839 * field yet, because 'self' needs to acquire the lock before
840 * any other thread gets a chance.
841 */
842 Inflate(self, obj);
843 LOG(INFO) << StringPrintf("(%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
844 }
845 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
846}
847
848void Monitor::Notify(Thread* self, Object *obj) {
849 uint32_t thin = *obj->GetRawLockWordAddress();
850
851 // If the lock is still thin, there aren't any waiters;
852 // waiting on an object forces lock fattening.
853 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
854 // Make sure that 'self' holds the lock.
855 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
856 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
857 return;
858 }
859 // no-op; there are no waiters to notify.
860 } else {
861 // It's a fat lock.
862 LW_MONITOR(thin)->Notify(self);
863 }
864}
865
866void Monitor::NotifyAll(Thread* self, Object *obj) {
867 uint32_t thin = *obj->GetRawLockWordAddress();
868
869 // If the lock is still thin, there aren't any waiters;
870 // waiting on an object forces lock fattening.
871 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
872 // Make sure that 'self' holds the lock.
873 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
874 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
875 return;
876 }
877 // no-op; there are no waiters to notify.
878 } else {
879 // It's a fat lock.
880 LW_MONITOR(thin)->NotifyAll(self);
881 }
882}
883
884uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
885 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
886 return LW_LOCK_OWNER(raw_lock_word);
887 } else {
888 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
889 return owner ? owner->GetThinLockId() : 0;
890 }
891}
892
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700893void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
894 Thread::State state = thread->GetState();
895
896 Object* object = NULL;
897 uint32_t lock_owner = ThreadList::kInvalidId;
898 if (state == Thread::kWaiting || state == Thread::kTimedWaiting) {
899 os << " - waiting on ";
900 Monitor* monitor = thread->wait_monitor_;
901 if (monitor != NULL) {
902 object = monitor->obj_;
903 }
904 lock_owner = Thread::LockOwnerFromThreadLock(object);
905 } else if (state == Thread::kBlocked) {
906 os << " - waiting to lock ";
907 object = thread->monitor_enter_object_;
908 if (object != NULL) {
909 lock_owner = object->GetLockOwner();
910 }
911 } else {
912 // We're not waiting on anything.
913 return;
914 }
915 os << "<" << object << ">";
916
917 // - waiting on <0x613f83d8> (a java.lang.ThreadLock) held by thread 5
918 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
919 os << " (a " << PrettyTypeOf(object) << ")";
920
921 if (lock_owner != ThreadList::kInvalidId) {
922 os << " held by thread " << lock_owner;
923 }
924
925 os << "\n";
926}
927
Elliott Hughes5f791332011-09-15 17:45:30 -0700928} // namespace art