blob: 0ef5fb42e9671afb87e31328686e99e496823a04 [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes54e7df12011-09-16 11:47:04 -070017#include "monitor.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070018
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
29#include "thread.h"
30
31namespace art {
32
33/*
34 * Every Object has a monitor associated with it, but not every Object is
35 * actually locked. Even the ones that are locked do not need a
36 * full-fledged monitor until a) there is actual contention or b) wait()
37 * is called on the Object.
38 *
39 * For Android, we have implemented a scheme similar to the one described
40 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
41 * (ACM 1998). Things are even easier for us, though, because we have
42 * a full 32 bits to work with.
43 *
44 * The two states of an Object's lock are referred to as "thin" and
45 * "fat". A lock may transition from the "thin" state to the "fat"
46 * state and this transition is referred to as inflation. Once a lock
47 * has been inflated it remains in the "fat" state indefinitely.
48 *
49 * The lock value itself is stored in Object.lock. The LSB of the
50 * lock encodes its state. When cleared, the lock is in the "thin"
51 * state and its bits are formatted as follows:
52 *
53 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
54 * lock count thread id hash state 0
55 *
56 * When set, the lock is in the "fat" state and its bits are formatted
57 * as follows:
58 *
59 * [31 ---- 3] [2 ---- 1] [0]
60 * pointer hash state 1
61 *
62 * For an in-depth description of the mechanics of thin-vs-fat locking,
63 * read the paper referred to above.
Elliott Hughes54e7df12011-09-16 11:47:04 -070064 *
Elliott Hughes5f791332011-09-15 17:45:30 -070065 * Monitors provide:
66 * - mutually exclusive access to resources
67 * - a way for multiple threads to wait for notification
68 *
69 * In effect, they fill the role of both mutexes and condition variables.
70 *
71 * Only one thread can own the monitor at any time. There may be several
72 * threads waiting on it (the wait call unlocks it). One or more waiting
73 * threads may be getting interrupted or notified at any given time.
74 *
75 * TODO: the various members of monitor are not SMP-safe.
76 */
Elliott Hughes54e7df12011-09-16 11:47:04 -070077
78
79/*
80 * Monitor accessor. Extracts a monitor structure pointer from a fat
81 * lock. Performs no error checking.
82 */
83#define LW_MONITOR(x) \
84 ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
85
86/*
87 * Lock recursion count field. Contains a count of the number of times
88 * a lock has been recursively acquired.
89 */
90#define LW_LOCK_COUNT_MASK 0x1fff
91#define LW_LOCK_COUNT_SHIFT 19
92#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
93
Elliott Hughes5f791332011-09-15 17:45:30 -070094Monitor::Monitor(Object* obj)
95 : owner_(NULL),
96 lock_count_(0),
97 obj_(obj),
98 wait_set_(NULL),
99 lock_("a monitor lock"),
100 next_(NULL),
101 owner_filename_(NULL),
102 owner_line_number_(0) {
103}
104
105Monitor::~Monitor() {
106 DCHECK(obj_ != NULL);
107 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
108
109#ifndef NDEBUG
110 /* This lock is associated with an object
111 * that's being swept. The only possible way
112 * anyone could be holding this lock would be
113 * if some JNI code locked but didn't unlock
114 * the object, in which case we've got some bad
115 * native code somewhere.
116 */
117 DCHECK(lock_.TryLock());
118 lock_.Unlock();
119#endif
120}
121
122/*
123 * Links a thread into a monitor's wait set. The monitor lock must be
124 * held by the caller of this routine.
125 */
126void Monitor::AppendToWaitSet(Thread* thread) {
127 DCHECK(owner_ == Thread::Current());
128 DCHECK(thread != NULL);
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700129 DCHECK(thread->wait_next_ == NULL) << thread->wait_next_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700130 if (wait_set_ == NULL) {
131 wait_set_ = thread;
132 return;
133 }
134
135 // push_back.
136 Thread* t = wait_set_;
137 while (t->wait_next_ != NULL) {
138 t = t->wait_next_;
139 }
140 t->wait_next_ = thread;
141}
142
143/*
144 * Unlinks a thread from a monitor's wait set. The monitor lock must
145 * be held by the caller of this routine.
146 */
147void Monitor::RemoveFromWaitSet(Thread *thread) {
148 DCHECK(owner_ == Thread::Current());
149 DCHECK(thread != NULL);
150 if (wait_set_ == NULL) {
151 return;
152 }
153 if (wait_set_ == thread) {
154 wait_set_ = thread->wait_next_;
155 thread->wait_next_ = NULL;
156 return;
157 }
158
159 Thread* t = wait_set_;
160 while (t->wait_next_ != NULL) {
161 if (t->wait_next_ == thread) {
162 t->wait_next_ = thread->wait_next_;
163 thread->wait_next_ = NULL;
164 return;
165 }
166 t = t->wait_next_;
167 }
168}
169
170// Global list of all monitors. Used for cleanup.
171static Monitor* gMonitorList = NULL;
172
173void Monitor::FreeMonitorList() {
174 Monitor* m = gMonitorList;
175 while (m != NULL) {
176 Monitor* next = m->next_;
177 delete m;
178 m = next;
179 }
180}
181
182/*
183 * Frees monitor objects belonging to unmarked objects.
184 */
185static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
186 UNIMPLEMENTED(FATAL);
187#if 0
188 Monitor handle;
189 Monitor *curr;
190
191 assert(mon != NULL);
192 assert(isUnmarkedObject != NULL);
193 Monitor* prev = &handle;
194 prev->next = curr = *mon;
195 while (curr != NULL) {
196 Object* obj = curr->obj;
197 if ((*isUnmarkedObject)(obj) != 0) {
198 prev->next = curr->next;
199 delete curr;
200 curr = prev->next;
201 } else {
202 prev = curr;
203 curr = curr->next;
204 }
205 }
206 *mon = handle.next;
207#endif
208}
209
210void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
211 ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
212}
213
214/*
215static char *logWriteInt(char *dst, int value) {
216 *dst++ = EVENT_TYPE_INT;
217 set4LE((uint8_t *)dst, value);
218 return dst + 4;
219}
220
221static char *logWriteString(char *dst, const char *value, size_t len) {
222 *dst++ = EVENT_TYPE_STRING;
223 len = len < 32 ? len : 32;
224 set4LE((uint8_t *)dst, len);
225 dst += 4;
226 memcpy(dst, value, len);
227 return dst + len;
228}
229
230#define EVENT_LOG_TAG_dvm_lock_sample 20003
231
232static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
233 const char *ownerFileName, uint32_t ownerLineNumber)
234{
235 const StackSaveArea *saveArea;
236 const Method *meth;
237 uint32_t relativePc;
238 char eventBuffer[174];
239 const char *fileName;
240 char procName[33];
241 char *cp;
242 size_t len;
243 int fd;
244
245 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
246 meth = saveArea->method;
247 cp = eventBuffer;
248
249 // Emit the event list length, 1 byte.
250 *cp++ = 9;
251
252 // Emit the process name, <= 37 bytes.
253 fd = open("/proc/self/cmdline", O_RDONLY);
254 memset(procName, 0, sizeof(procName));
255 read(fd, procName, sizeof(procName) - 1);
256 close(fd);
257 len = strlen(procName);
258 cp = logWriteString(cp, procName, len);
259
260 // Emit the sensitive thread ("main thread") status, 5 bytes.
261 bool isSensitive = false;
262 if (gDvm.isSensitiveThreadHook != NULL) {
263 isSensitive = gDvm.isSensitiveThreadHook();
264 }
265 cp = logWriteInt(cp, isSensitive);
266
267 // Emit self thread name string, <= 37 bytes.
268 std::string selfName = dvmGetThreadName(self);
269 cp = logWriteString(cp, selfName.c_str(), selfName.size());
270
271 // Emit the wait time, 5 bytes.
272 cp = logWriteInt(cp, waitMs);
273
274 // Emit the source code file name, <= 37 bytes.
275 fileName = dvmGetMethodSourceFile(meth);
276 if (fileName == NULL) fileName = "";
277 cp = logWriteString(cp, fileName, strlen(fileName));
278
279 // Emit the source code line number, 5 bytes.
280 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
281 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
282
283 // Emit the lock owner source code file name, <= 37 bytes.
284 if (ownerFileName == NULL) {
285 ownerFileName = "";
286 } else if (strcmp(fileName, ownerFileName) == 0) {
287 // Common case, so save on log space.
288 ownerFileName = "-";
289 }
290 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
291
292 // Emit the source code line number, 5 bytes.
293 cp = logWriteInt(cp, ownerLineNumber);
294
295 // Emit the sample percentage, 5 bytes.
296 cp = logWriteInt(cp, samplePercent);
297
298 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
299 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
300 EVENT_TYPE_LIST,
301 eventBuffer,
302 (size_t)(cp - eventBuffer));
303}
304*/
305
306void Monitor::Lock(Thread* self) {
307// uint32_t waitThreshold, samplePercent;
308// uint64_t waitStart, waitEnd, waitMs;
309
310 if (owner_ == self) {
311 lock_count_++;
312 return;
313 }
314 if (!lock_.TryLock()) {
315 {
316 ScopedThreadStateChange tsc(self, Thread::kBlocked);
317// waitThreshold = gDvm.lockProfThreshold;
318// if (waitThreshold) {
319// waitStart = dvmGetRelativeTimeUsec();
320// }
321// const char* currentOwnerFileName = mon->ownerFileName;
322// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
323
324 lock_.Lock();
325// if (waitThreshold) {
326// waitEnd = dvmGetRelativeTimeUsec();
327// }
328 }
329// if (waitThreshold) {
330// waitMs = (waitEnd - waitStart) / 1000;
331// if (waitMs >= waitThreshold) {
332// samplePercent = 100;
333// } else {
334// samplePercent = 100 * waitMs / waitThreshold;
335// }
336// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
337// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
338// }
339// }
340 }
341 owner_ = self;
342 DCHECK_EQ(lock_count_, 0);
343
344 // When debugging, save the current monitor holder for future
345 // acquisition failures to use in sampled logging.
346// if (gDvm.lockProfThreshold > 0) {
347// const StackSaveArea *saveArea;
348// const Method *meth;
349// mon->ownerLineNumber = 0;
350// if (self->interpSave.curFrame == NULL) {
351// mon->ownerFileName = "no_frame";
352// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
353// mon->ownerFileName = "no_save_area";
354// } else if ((meth = saveArea->method) == NULL) {
355// mon->ownerFileName = "no_method";
356// } else {
357// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
358// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
359// if (mon->ownerFileName == NULL) {
360// mon->ownerFileName = "no_method_file";
361// } else {
362// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
363// }
364// }
365// }
366}
367
368void ThrowIllegalMonitorStateException(const char* msg) {
369 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", "%s", msg);
370}
371
372bool Monitor::Unlock(Thread* self) {
373 DCHECK(self != NULL);
374 if (owner_ == self) {
375 // We own the monitor, so nobody else can be in here.
376 if (lock_count_ == 0) {
377 owner_ = NULL;
378 owner_filename_ = "unlocked";
379 owner_line_number_ = 0;
380 lock_.Unlock();
381 } else {
382 --lock_count_;
383 }
384 } else {
385 // We don't own this, so we're not allowed to unlock it.
386 // The JNI spec says that we should throw IllegalMonitorStateException
387 // in this case.
388 ThrowIllegalMonitorStateException("unlock of unowned monitor");
389 return false;
390 }
391 return true;
392}
393
394/*
395 * Converts the given relative waiting time into an absolute time.
396 */
397void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
398 int64_t endSec;
399
400#ifdef HAVE_TIMEDWAIT_MONOTONIC
401 clock_gettime(CLOCK_MONOTONIC, ts);
402#else
403 {
404 struct timeval tv;
405 gettimeofday(&tv, NULL);
406 ts->tv_sec = tv.tv_sec;
407 ts->tv_nsec = tv.tv_usec * 1000;
408 }
409#endif
410 endSec = ts->tv_sec + ms / 1000;
411 if (endSec >= 0x7fffffff) {
412 LOG(INFO) << "Note: end time exceeds epoch";
413 endSec = 0x7ffffffe;
414 }
415 ts->tv_sec = endSec;
416 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
417
418 // Catch rollover.
419 if (ts->tv_nsec >= 1000000000L) {
420 ts->tv_sec++;
421 ts->tv_nsec -= 1000000000L;
422 }
423}
424
425int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
426 struct timespec ts;
427 ToAbsoluteTime(ms, ns, &ts);
428#if defined(HAVE_TIMEDWAIT_MONOTONIC)
429 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
430#else
431 int rc = pthread_cond_timedwait(cond, mutex, &ts);
432#endif
433 DCHECK(rc == 0 || rc == ETIMEDOUT);
434 return rc;
435}
436
437/*
438 * Wait on a monitor until timeout, interrupt, or notification. Used for
439 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
440 *
441 * If another thread calls Thread.interrupt(), we throw InterruptedException
442 * and return immediately if one of the following are true:
443 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
444 * - blocked in join(), join(long), or join(long, int) methods of Thread
445 * - blocked in sleep(long), or sleep(long, int) methods of Thread
446 * Otherwise, we set the "interrupted" flag.
447 *
448 * Checks to make sure that "ns" is in the range 0-999999
449 * (i.e. fractions of a millisecond) and throws the appropriate
450 * exception if it isn't.
451 *
452 * The spec allows "spurious wakeups", and recommends that all code using
453 * Object.wait() do so in a loop. This appears to derive from concerns
454 * about pthread_cond_wait() on multiprocessor systems. Some commentary
455 * on the web casts doubt on whether these can/should occur.
456 *
457 * Since we're allowed to wake up "early", we clamp extremely long durations
458 * to return at the end of the 32-bit time epoch.
459 */
460void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
461 DCHECK(self != NULL);
462
463 // Make sure that we hold the lock.
464 if (owner_ != self) {
465 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
466 return;
467 }
468
469 // Enforce the timeout range.
470 if (ms < 0 || ns < 0 || ns > 999999) {
471 Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;",
472 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
473 return;
474 }
475
476 // Compute absolute wakeup time, if necessary.
477 struct timespec ts;
478 bool timed = false;
479 if (ms != 0 || ns != 0) {
480 ToAbsoluteTime(ms, ns, &ts);
481 timed = true;
482 }
483
484 /*
485 * Add ourselves to the set of threads waiting on this monitor, and
486 * release our hold. We need to let it go even if we're a few levels
487 * deep in a recursive lock, and we need to restore that later.
488 *
489 * We append to the wait set ahead of clearing the count and owner
490 * fields so the subroutine can check that the calling thread owns
491 * the monitor. Aside from that, the order of member updates is
492 * not order sensitive as we hold the pthread mutex.
493 */
494 AppendToWaitSet(self);
495 int prevLockCount = lock_count_;
496 lock_count_ = 0;
497 owner_ = NULL;
498 const char* savedFileName = owner_filename_;
499 owner_filename_ = NULL;
500 uint32_t savedLineNumber = owner_line_number_;
501 owner_line_number_ = 0;
502
503 /*
504 * Update thread status. If the GC wakes up, it'll ignore us, knowing
505 * that we won't touch any references in this state, and we'll check
506 * our suspend mode before we transition out.
507 */
508 if (timed) {
509 self->SetState(Thread::kTimedWaiting);
510 } else {
511 self->SetState(Thread::kWaiting);
512 }
513
Elliott Hughes85d15452011-09-16 17:33:01 -0700514 self->wait_mutex_->Lock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700515
516 /*
517 * Set wait_monitor_ to the monitor object we will be waiting on.
518 * When wait_monitor_ is non-NULL a notifying or interrupting thread
519 * must signal the thread's wait_cond_ to wake it up.
520 */
521 DCHECK(self->wait_monitor_ == NULL);
522 self->wait_monitor_ = this;
523
524 /*
525 * Handle the case where the thread was interrupted before we called
526 * wait().
527 */
528 bool wasInterrupted = false;
529 if (self->interrupted_) {
530 wasInterrupted = true;
531 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700532 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700533 goto done;
534 }
535
536 /*
537 * Release the monitor lock and wait for a notification or
538 * a timeout to occur.
539 */
540 lock_.Unlock();
541
542 if (!timed) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700543 self->wait_cond_->Wait(*self->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700544 } else {
Elliott Hughes85d15452011-09-16 17:33:01 -0700545 self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
Elliott Hughes5f791332011-09-15 17:45:30 -0700546 }
547 if (self->interrupted_) {
548 wasInterrupted = true;
549 }
550
551 self->interrupted_ = false;
552 self->wait_monitor_ = NULL;
Elliott Hughes85d15452011-09-16 17:33:01 -0700553 self->wait_mutex_->Unlock();
Elliott Hughes5f791332011-09-15 17:45:30 -0700554
555 // Reacquire the monitor lock.
556 Lock(self);
557
558done:
559 /*
560 * We remove our thread from wait set after restoring the count
561 * and owner fields so the subroutine can check that the calling
562 * thread owns the monitor. Aside from that, the order of member
563 * updates is not order sensitive as we hold the pthread mutex.
564 */
565 owner_ = self;
566 lock_count_ = prevLockCount;
567 owner_filename_ = savedFileName;
568 owner_line_number_ = savedLineNumber;
569 RemoveFromWaitSet(self);
570
571 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
572 self->SetState(Thread::kRunnable);
573
574 if (wasInterrupted) {
575 /*
576 * We were interrupted while waiting, or somebody interrupted an
577 * un-interruptible thread earlier and we're bailing out immediately.
578 *
579 * The doc sayeth: "The interrupted status of the current thread is
580 * cleared when this exception is thrown."
581 */
582 self->interrupted_ = false;
583 if (interruptShouldThrow) {
584 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", "%s", "");
585 }
586 }
587}
588
589void Monitor::Notify(Thread* self) {
590 DCHECK(self != NULL);
591
592 // Make sure that we hold the lock.
593 if (owner_ != self) {
594 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
595 return;
596 }
597 // Signal the first waiting thread in the wait set.
598 while (wait_set_ != NULL) {
599 Thread* thread = wait_set_;
600 wait_set_ = thread->wait_next_;
601 thread->wait_next_ = NULL;
602
603 // Check to see if the thread is still waiting.
Elliott Hughes85d15452011-09-16 17:33:01 -0700604 MutexLock mu(*thread->wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700605 if (thread->wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700606 thread->wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700607 return;
608 }
609 }
610}
611
612void Monitor::NotifyAll(Thread* self) {
613 DCHECK(self != NULL);
614
615 // Make sure that we hold the lock.
616 if (owner_ != self) {
617 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
618 return;
619 }
620 // Signal all threads in the wait set.
621 while (wait_set_ != NULL) {
622 Thread* thread = wait_set_;
623 wait_set_ = thread->wait_next_;
624 thread->wait_next_ = NULL;
625 thread->Notify();
626 }
627}
628
629/*
630 * Changes the shape of a monitor from thin to fat, preserving the
631 * internal lock state. The calling thread must own the lock.
632 */
633void Monitor::Inflate(Thread* self, Object* obj) {
634 DCHECK(self != NULL);
635 DCHECK(obj != NULL);
636 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
637 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
638
639 // Allocate and acquire a new monitor.
640 Monitor* m = new Monitor(obj);
641 // Replace the head of the list with the new monitor.
642 do {
643 m->next_ = gMonitorList;
644 } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
645 m->Lock(self);
646 // Propagate the lock state.
647 uint32_t thin = *obj->GetRawLockWordAddress();
648 m->lock_count_ = LW_LOCK_COUNT(thin);
649 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
650 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
651 // Publish the updated lock word.
652 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
653}
654
655void Monitor::MonitorEnter(Thread* self, Object* obj) {
656 volatile int32_t* thinp = obj->GetRawLockWordAddress();
657 struct timespec tm;
658 long sleepDelayNs;
659 long minSleepDelayNs = 1000000; /* 1 millisecond */
660 long maxSleepDelayNs = 1000000000; /* 1 second */
661 uint32_t thin, newThin, threadId;
662
663 assert(self != NULL);
664 assert(obj != NULL);
665 threadId = self->thin_lock_id_;
666retry:
667 thin = *thinp;
668 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
669 /*
670 * The lock is a thin lock. The owner field is used to
671 * determine the acquire method, ordered by cost.
672 */
673 if (LW_LOCK_OWNER(thin) == threadId) {
674 /*
675 * The calling thread owns the lock. Increment the
676 * value of the recursion count field.
677 */
678 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
679 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
680 /*
681 * The reacquisition limit has been reached. Inflate
682 * the lock so the next acquire will not overflow the
683 * recursion count field.
684 */
685 Inflate(self, obj);
686 }
687 } else if (LW_LOCK_OWNER(thin) == 0) {
688 /*
689 * The lock is unowned. Install the thread id of the
690 * calling thread into the owner field. This is the
691 * common case. In performance critical code the JIT
692 * will have tried this before calling out to the VM.
693 */
694 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
695 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
696 // The acquire failed. Try again.
697 goto retry;
698 }
699 } else {
700 LOG(INFO) << StringPrintf("(%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
701 // The lock is owned by another thread. Notify the VM that we are about to wait.
702 Thread::State oldStatus = self->SetState(Thread::kBlocked);
703 // Spin until the thin lock is released or inflated.
704 sleepDelayNs = 0;
705 for (;;) {
706 thin = *thinp;
707 // Check the shape of the lock word. Another thread
708 // may have inflated the lock while we were waiting.
709 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
710 if (LW_LOCK_OWNER(thin) == 0) {
711 // The lock has been released. Install the thread id of the
712 // calling thread into the owner field.
713 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
714 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
715 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
716 break;
717 }
718 } else {
719 // The lock has not been released. Yield so the owning thread can run.
720 if (sleepDelayNs == 0) {
721 sched_yield();
722 sleepDelayNs = minSleepDelayNs;
723 } else {
724 tm.tv_sec = 0;
725 tm.tv_nsec = sleepDelayNs;
726 nanosleep(&tm, NULL);
727 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
728 if (sleepDelayNs < maxSleepDelayNs / 2) {
729 sleepDelayNs *= 2;
730 } else {
731 sleepDelayNs = minSleepDelayNs;
732 }
733 }
734 }
735 } else {
736 // The thin lock was inflated by another thread. Let the VM know we are no longer
737 // waiting and try again.
738 LOG(INFO) << "(" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
739 self->SetState(oldStatus);
740 goto retry;
741 }
742 }
743 LOG(INFO) << StringPrintf("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
744 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
745 self->SetState(oldStatus);
746 // Fatten the lock.
747 Inflate(self, obj);
748 LOG(INFO) << StringPrintf("(%d) lock %p fattened", threadId, thinp);
749 }
750 } else {
751 // The lock is a fat lock.
752 DCHECK(LW_MONITOR(*thinp) != NULL);
753 LW_MONITOR(*thinp)->Lock(self);
754 }
755}
756
757bool Monitor::MonitorExit(Thread* self, Object* obj) {
758 volatile int32_t* thinp = obj->GetRawLockWordAddress();
759
760 DCHECK(self != NULL);
761 DCHECK_EQ(self->GetState(), Thread::kRunnable);
762 DCHECK(obj != NULL);
763
764 /*
765 * Cache the lock word as its value can change while we are
766 * examining its state.
767 */
768 uint32_t thin = *thinp;
769 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
770 /*
771 * The lock is thin. We must ensure that the lock is owned
772 * by the given thread before unlocking it.
773 */
774 if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
775 /*
776 * We are the lock owner. It is safe to update the lock
777 * without CAS as lock ownership guards the lock itself.
778 */
779 if (LW_LOCK_COUNT(thin) == 0) {
780 /*
781 * The lock was not recursively acquired, the common
782 * case. Unlock by clearing all bits except for the
783 * hash state.
784 */
785 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
786 android_atomic_release_store(thin, thinp);
787 } else {
788 /*
789 * The object was recursively acquired. Decrement the
790 * lock recursion count field.
791 */
792 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
793 }
794 } else {
795 /*
796 * We do not own the lock. The JVM spec requires that we
797 * throw an exception in this case.
798 */
799 ThrowIllegalMonitorStateException("unlock of unowned monitor");
800 return false;
801 }
802 } else {
803 /*
804 * The lock is fat. We must check to see if Unlock has
805 * raised any exceptions before continuing.
806 */
807 DCHECK(LW_MONITOR(*thinp) != NULL);
808 if (!LW_MONITOR(*thinp)->Unlock(self)) {
809 // An exception has been raised. Do not fall through.
810 return false;
811 }
812 }
813 return true;
814}
815
816/*
817 * Object.wait(). Also called for class init.
818 */
819void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
820 volatile int32_t* thinp = obj->GetRawLockWordAddress();
821
822 // If the lock is still thin, we need to fatten it.
823 uint32_t thin = *thinp;
824 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
825 // Make sure that 'self' holds the lock.
826 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
827 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
828 return;
829 }
830
831 /* This thread holds the lock. We need to fatten the lock
832 * so 'self' can block on it. Don't update the object lock
833 * field yet, because 'self' needs to acquire the lock before
834 * any other thread gets a chance.
835 */
836 Inflate(self, obj);
837 LOG(INFO) << StringPrintf("(%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
838 }
839 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
840}
841
842void Monitor::Notify(Thread* self, Object *obj) {
843 uint32_t thin = *obj->GetRawLockWordAddress();
844
845 // If the lock is still thin, there aren't any waiters;
846 // waiting on an object forces lock fattening.
847 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
848 // Make sure that 'self' holds the lock.
849 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
850 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
851 return;
852 }
853 // no-op; there are no waiters to notify.
854 } else {
855 // It's a fat lock.
856 LW_MONITOR(thin)->Notify(self);
857 }
858}
859
860void Monitor::NotifyAll(Thread* self, Object *obj) {
861 uint32_t thin = *obj->GetRawLockWordAddress();
862
863 // If the lock is still thin, there aren't any waiters;
864 // waiting on an object forces lock fattening.
865 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
866 // Make sure that 'self' holds the lock.
867 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
868 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
869 return;
870 }
871 // no-op; there are no waiters to notify.
872 } else {
873 // It's a fat lock.
874 LW_MONITOR(thin)->NotifyAll(self);
875 }
876}
877
878uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
879 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
880 return LW_LOCK_OWNER(raw_lock_word);
881 } else {
882 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
883 return owner ? owner->GetThinLockId() : 0;
884 }
885}
886
887} // namespace art