blob: b07ec3500b1358146757c353483bc8377f5515bf [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "sync.h"
18
19#include <errno.h>
20#include <fcntl.h>
21#include <pthread.h>
22#include <stdlib.h>
23#include <sys/time.h>
24#include <time.h>
25#include <unistd.h>
26
27#include "mutex.h"
28#include "object.h"
29#include "thread.h"
30
31namespace art {
32
33/*
34 * Every Object has a monitor associated with it, but not every Object is
35 * actually locked. Even the ones that are locked do not need a
36 * full-fledged monitor until a) there is actual contention or b) wait()
37 * is called on the Object.
38 *
39 * For Android, we have implemented a scheme similar to the one described
40 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
41 * (ACM 1998). Things are even easier for us, though, because we have
42 * a full 32 bits to work with.
43 *
44 * The two states of an Object's lock are referred to as "thin" and
45 * "fat". A lock may transition from the "thin" state to the "fat"
46 * state and this transition is referred to as inflation. Once a lock
47 * has been inflated it remains in the "fat" state indefinitely.
48 *
49 * The lock value itself is stored in Object.lock. The LSB of the
50 * lock encodes its state. When cleared, the lock is in the "thin"
51 * state and its bits are formatted as follows:
52 *
53 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
54 * lock count thread id hash state 0
55 *
56 * When set, the lock is in the "fat" state and its bits are formatted
57 * as follows:
58 *
59 * [31 ---- 3] [2 ---- 1] [0]
60 * pointer hash state 1
61 *
62 * For an in-depth description of the mechanics of thin-vs-fat locking,
63 * read the paper referred to above.
64 */
65
66/*
67 * Monitors provide:
68 * - mutually exclusive access to resources
69 * - a way for multiple threads to wait for notification
70 *
71 * In effect, they fill the role of both mutexes and condition variables.
72 *
73 * Only one thread can own the monitor at any time. There may be several
74 * threads waiting on it (the wait call unlocks it). One or more waiting
75 * threads may be getting interrupted or notified at any given time.
76 *
77 * TODO: the various members of monitor are not SMP-safe.
78 */
79Monitor::Monitor(Object* obj)
80 : owner_(NULL),
81 lock_count_(0),
82 obj_(obj),
83 wait_set_(NULL),
84 lock_("a monitor lock"),
85 next_(NULL),
86 owner_filename_(NULL),
87 owner_line_number_(0) {
88}
89
90Monitor::~Monitor() {
91 DCHECK(obj_ != NULL);
92 DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
93
94#ifndef NDEBUG
95 /* This lock is associated with an object
96 * that's being swept. The only possible way
97 * anyone could be holding this lock would be
98 * if some JNI code locked but didn't unlock
99 * the object, in which case we've got some bad
100 * native code somewhere.
101 */
102 DCHECK(lock_.TryLock());
103 lock_.Unlock();
104#endif
105}
106
107/*
108 * Links a thread into a monitor's wait set. The monitor lock must be
109 * held by the caller of this routine.
110 */
111void Monitor::AppendToWaitSet(Thread* thread) {
112 DCHECK(owner_ == Thread::Current());
113 DCHECK(thread != NULL);
114 DCHECK(thread->wait_next_ == NULL);
115 if (wait_set_ == NULL) {
116 wait_set_ = thread;
117 return;
118 }
119
120 // push_back.
121 Thread* t = wait_set_;
122 while (t->wait_next_ != NULL) {
123 t = t->wait_next_;
124 }
125 t->wait_next_ = thread;
126}
127
128/*
129 * Unlinks a thread from a monitor's wait set. The monitor lock must
130 * be held by the caller of this routine.
131 */
132void Monitor::RemoveFromWaitSet(Thread *thread) {
133 DCHECK(owner_ == Thread::Current());
134 DCHECK(thread != NULL);
135 if (wait_set_ == NULL) {
136 return;
137 }
138 if (wait_set_ == thread) {
139 wait_set_ = thread->wait_next_;
140 thread->wait_next_ = NULL;
141 return;
142 }
143
144 Thread* t = wait_set_;
145 while (t->wait_next_ != NULL) {
146 if (t->wait_next_ == thread) {
147 t->wait_next_ = thread->wait_next_;
148 thread->wait_next_ = NULL;
149 return;
150 }
151 t = t->wait_next_;
152 }
153}
154
155// Global list of all monitors. Used for cleanup.
156static Monitor* gMonitorList = NULL;
157
158void Monitor::FreeMonitorList() {
159 Monitor* m = gMonitorList;
160 while (m != NULL) {
161 Monitor* next = m->next_;
162 delete m;
163 m = next;
164 }
165}
166
167/*
168 * Frees monitor objects belonging to unmarked objects.
169 */
170static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
171 UNIMPLEMENTED(FATAL);
172#if 0
173 Monitor handle;
174 Monitor *curr;
175
176 assert(mon != NULL);
177 assert(isUnmarkedObject != NULL);
178 Monitor* prev = &handle;
179 prev->next = curr = *mon;
180 while (curr != NULL) {
181 Object* obj = curr->obj;
182 if ((*isUnmarkedObject)(obj) != 0) {
183 prev->next = curr->next;
184 delete curr;
185 curr = prev->next;
186 } else {
187 prev = curr;
188 curr = curr->next;
189 }
190 }
191 *mon = handle.next;
192#endif
193}
194
195void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
196 ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
197}
198
199/*
200static char *logWriteInt(char *dst, int value) {
201 *dst++ = EVENT_TYPE_INT;
202 set4LE((uint8_t *)dst, value);
203 return dst + 4;
204}
205
206static char *logWriteString(char *dst, const char *value, size_t len) {
207 *dst++ = EVENT_TYPE_STRING;
208 len = len < 32 ? len : 32;
209 set4LE((uint8_t *)dst, len);
210 dst += 4;
211 memcpy(dst, value, len);
212 return dst + len;
213}
214
215#define EVENT_LOG_TAG_dvm_lock_sample 20003
216
217static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
218 const char *ownerFileName, uint32_t ownerLineNumber)
219{
220 const StackSaveArea *saveArea;
221 const Method *meth;
222 uint32_t relativePc;
223 char eventBuffer[174];
224 const char *fileName;
225 char procName[33];
226 char *cp;
227 size_t len;
228 int fd;
229
230 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
231 meth = saveArea->method;
232 cp = eventBuffer;
233
234 // Emit the event list length, 1 byte.
235 *cp++ = 9;
236
237 // Emit the process name, <= 37 bytes.
238 fd = open("/proc/self/cmdline", O_RDONLY);
239 memset(procName, 0, sizeof(procName));
240 read(fd, procName, sizeof(procName) - 1);
241 close(fd);
242 len = strlen(procName);
243 cp = logWriteString(cp, procName, len);
244
245 // Emit the sensitive thread ("main thread") status, 5 bytes.
246 bool isSensitive = false;
247 if (gDvm.isSensitiveThreadHook != NULL) {
248 isSensitive = gDvm.isSensitiveThreadHook();
249 }
250 cp = logWriteInt(cp, isSensitive);
251
252 // Emit self thread name string, <= 37 bytes.
253 std::string selfName = dvmGetThreadName(self);
254 cp = logWriteString(cp, selfName.c_str(), selfName.size());
255
256 // Emit the wait time, 5 bytes.
257 cp = logWriteInt(cp, waitMs);
258
259 // Emit the source code file name, <= 37 bytes.
260 fileName = dvmGetMethodSourceFile(meth);
261 if (fileName == NULL) fileName = "";
262 cp = logWriteString(cp, fileName, strlen(fileName));
263
264 // Emit the source code line number, 5 bytes.
265 relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
266 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
267
268 // Emit the lock owner source code file name, <= 37 bytes.
269 if (ownerFileName == NULL) {
270 ownerFileName = "";
271 } else if (strcmp(fileName, ownerFileName) == 0) {
272 // Common case, so save on log space.
273 ownerFileName = "-";
274 }
275 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
276
277 // Emit the source code line number, 5 bytes.
278 cp = logWriteInt(cp, ownerLineNumber);
279
280 // Emit the sample percentage, 5 bytes.
281 cp = logWriteInt(cp, samplePercent);
282
283 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
284 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
285 EVENT_TYPE_LIST,
286 eventBuffer,
287 (size_t)(cp - eventBuffer));
288}
289*/
290
291void Monitor::Lock(Thread* self) {
292// uint32_t waitThreshold, samplePercent;
293// uint64_t waitStart, waitEnd, waitMs;
294
295 if (owner_ == self) {
296 lock_count_++;
297 return;
298 }
299 if (!lock_.TryLock()) {
300 {
301 ScopedThreadStateChange tsc(self, Thread::kBlocked);
302// waitThreshold = gDvm.lockProfThreshold;
303// if (waitThreshold) {
304// waitStart = dvmGetRelativeTimeUsec();
305// }
306// const char* currentOwnerFileName = mon->ownerFileName;
307// uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
308
309 lock_.Lock();
310// if (waitThreshold) {
311// waitEnd = dvmGetRelativeTimeUsec();
312// }
313 }
314// if (waitThreshold) {
315// waitMs = (waitEnd - waitStart) / 1000;
316// if (waitMs >= waitThreshold) {
317// samplePercent = 100;
318// } else {
319// samplePercent = 100 * waitMs / waitThreshold;
320// }
321// if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
322// logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
323// }
324// }
325 }
326 owner_ = self;
327 DCHECK_EQ(lock_count_, 0);
328
329 // When debugging, save the current monitor holder for future
330 // acquisition failures to use in sampled logging.
331// if (gDvm.lockProfThreshold > 0) {
332// const StackSaveArea *saveArea;
333// const Method *meth;
334// mon->ownerLineNumber = 0;
335// if (self->interpSave.curFrame == NULL) {
336// mon->ownerFileName = "no_frame";
337// } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
338// mon->ownerFileName = "no_save_area";
339// } else if ((meth = saveArea->method) == NULL) {
340// mon->ownerFileName = "no_method";
341// } else {
342// uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
343// mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
344// if (mon->ownerFileName == NULL) {
345// mon->ownerFileName = "no_method_file";
346// } else {
347// mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
348// }
349// }
350// }
351}
352
353void ThrowIllegalMonitorStateException(const char* msg) {
354 Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", "%s", msg);
355}
356
357bool Monitor::Unlock(Thread* self) {
358 DCHECK(self != NULL);
359 if (owner_ == self) {
360 // We own the monitor, so nobody else can be in here.
361 if (lock_count_ == 0) {
362 owner_ = NULL;
363 owner_filename_ = "unlocked";
364 owner_line_number_ = 0;
365 lock_.Unlock();
366 } else {
367 --lock_count_;
368 }
369 } else {
370 // We don't own this, so we're not allowed to unlock it.
371 // The JNI spec says that we should throw IllegalMonitorStateException
372 // in this case.
373 ThrowIllegalMonitorStateException("unlock of unowned monitor");
374 return false;
375 }
376 return true;
377}
378
379/*
380 * Converts the given relative waiting time into an absolute time.
381 */
382void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
383 int64_t endSec;
384
385#ifdef HAVE_TIMEDWAIT_MONOTONIC
386 clock_gettime(CLOCK_MONOTONIC, ts);
387#else
388 {
389 struct timeval tv;
390 gettimeofday(&tv, NULL);
391 ts->tv_sec = tv.tv_sec;
392 ts->tv_nsec = tv.tv_usec * 1000;
393 }
394#endif
395 endSec = ts->tv_sec + ms / 1000;
396 if (endSec >= 0x7fffffff) {
397 LOG(INFO) << "Note: end time exceeds epoch";
398 endSec = 0x7ffffffe;
399 }
400 ts->tv_sec = endSec;
401 ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
402
403 // Catch rollover.
404 if (ts->tv_nsec >= 1000000000L) {
405 ts->tv_sec++;
406 ts->tv_nsec -= 1000000000L;
407 }
408}
409
410int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
411 struct timespec ts;
412 ToAbsoluteTime(ms, ns, &ts);
413#if defined(HAVE_TIMEDWAIT_MONOTONIC)
414 int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
415#else
416 int rc = pthread_cond_timedwait(cond, mutex, &ts);
417#endif
418 DCHECK(rc == 0 || rc == ETIMEDOUT);
419 return rc;
420}
421
422/*
423 * Wait on a monitor until timeout, interrupt, or notification. Used for
424 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
425 *
426 * If another thread calls Thread.interrupt(), we throw InterruptedException
427 * and return immediately if one of the following are true:
428 * - blocked in wait(), wait(long), or wait(long, int) methods of Object
429 * - blocked in join(), join(long), or join(long, int) methods of Thread
430 * - blocked in sleep(long), or sleep(long, int) methods of Thread
431 * Otherwise, we set the "interrupted" flag.
432 *
433 * Checks to make sure that "ns" is in the range 0-999999
434 * (i.e. fractions of a millisecond) and throws the appropriate
435 * exception if it isn't.
436 *
437 * The spec allows "spurious wakeups", and recommends that all code using
438 * Object.wait() do so in a loop. This appears to derive from concerns
439 * about pthread_cond_wait() on multiprocessor systems. Some commentary
440 * on the web casts doubt on whether these can/should occur.
441 *
442 * Since we're allowed to wake up "early", we clamp extremely long durations
443 * to return at the end of the 32-bit time epoch.
444 */
445void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
446 DCHECK(self != NULL);
447
448 // Make sure that we hold the lock.
449 if (owner_ != self) {
450 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
451 return;
452 }
453
454 // Enforce the timeout range.
455 if (ms < 0 || ns < 0 || ns > 999999) {
456 Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;",
457 "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
458 return;
459 }
460
461 // Compute absolute wakeup time, if necessary.
462 struct timespec ts;
463 bool timed = false;
464 if (ms != 0 || ns != 0) {
465 ToAbsoluteTime(ms, ns, &ts);
466 timed = true;
467 }
468
469 /*
470 * Add ourselves to the set of threads waiting on this monitor, and
471 * release our hold. We need to let it go even if we're a few levels
472 * deep in a recursive lock, and we need to restore that later.
473 *
474 * We append to the wait set ahead of clearing the count and owner
475 * fields so the subroutine can check that the calling thread owns
476 * the monitor. Aside from that, the order of member updates is
477 * not order sensitive as we hold the pthread mutex.
478 */
479 AppendToWaitSet(self);
480 int prevLockCount = lock_count_;
481 lock_count_ = 0;
482 owner_ = NULL;
483 const char* savedFileName = owner_filename_;
484 owner_filename_ = NULL;
485 uint32_t savedLineNumber = owner_line_number_;
486 owner_line_number_ = 0;
487
488 /*
489 * Update thread status. If the GC wakes up, it'll ignore us, knowing
490 * that we won't touch any references in this state, and we'll check
491 * our suspend mode before we transition out.
492 */
493 if (timed) {
494 self->SetState(Thread::kTimedWaiting);
495 } else {
496 self->SetState(Thread::kWaiting);
497 }
498
499 self->wait_mutex_.Lock();
500
501 /*
502 * Set wait_monitor_ to the monitor object we will be waiting on.
503 * When wait_monitor_ is non-NULL a notifying or interrupting thread
504 * must signal the thread's wait_cond_ to wake it up.
505 */
506 DCHECK(self->wait_monitor_ == NULL);
507 self->wait_monitor_ = this;
508
509 /*
510 * Handle the case where the thread was interrupted before we called
511 * wait().
512 */
513 bool wasInterrupted = false;
514 if (self->interrupted_) {
515 wasInterrupted = true;
516 self->wait_monitor_ = NULL;
517 self->wait_mutex_.Unlock();
518 goto done;
519 }
520
521 /*
522 * Release the monitor lock and wait for a notification or
523 * a timeout to occur.
524 */
525 lock_.Unlock();
526
527 if (!timed) {
528 self->wait_cond_.Wait(self->wait_mutex_);
529 } else {
530 self->wait_cond_.TimedWait(self->wait_mutex_, ts);
531 }
532 if (self->interrupted_) {
533 wasInterrupted = true;
534 }
535
536 self->interrupted_ = false;
537 self->wait_monitor_ = NULL;
538 self->wait_mutex_.Unlock();
539
540 // Reacquire the monitor lock.
541 Lock(self);
542
543done:
544 /*
545 * We remove our thread from wait set after restoring the count
546 * and owner fields so the subroutine can check that the calling
547 * thread owns the monitor. Aside from that, the order of member
548 * updates is not order sensitive as we hold the pthread mutex.
549 */
550 owner_ = self;
551 lock_count_ = prevLockCount;
552 owner_filename_ = savedFileName;
553 owner_line_number_ = savedLineNumber;
554 RemoveFromWaitSet(self);
555
556 /* set self->status back to Thread::kRunnable, and self-suspend if needed */
557 self->SetState(Thread::kRunnable);
558
559 if (wasInterrupted) {
560 /*
561 * We were interrupted while waiting, or somebody interrupted an
562 * un-interruptible thread earlier and we're bailing out immediately.
563 *
564 * The doc sayeth: "The interrupted status of the current thread is
565 * cleared when this exception is thrown."
566 */
567 self->interrupted_ = false;
568 if (interruptShouldThrow) {
569 Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", "%s", "");
570 }
571 }
572}
573
574void Monitor::Notify(Thread* self) {
575 DCHECK(self != NULL);
576
577 // Make sure that we hold the lock.
578 if (owner_ != self) {
579 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
580 return;
581 }
582 // Signal the first waiting thread in the wait set.
583 while (wait_set_ != NULL) {
584 Thread* thread = wait_set_;
585 wait_set_ = thread->wait_next_;
586 thread->wait_next_ = NULL;
587
588 // Check to see if the thread is still waiting.
589 MutexLock mu(thread->wait_mutex_);
590 if (thread->wait_monitor_ != NULL) {
591 thread->wait_cond_.Signal();
592 return;
593 }
594 }
595}
596
597void Monitor::NotifyAll(Thread* self) {
598 DCHECK(self != NULL);
599
600 // Make sure that we hold the lock.
601 if (owner_ != self) {
602 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
603 return;
604 }
605 // Signal all threads in the wait set.
606 while (wait_set_ != NULL) {
607 Thread* thread = wait_set_;
608 wait_set_ = thread->wait_next_;
609 thread->wait_next_ = NULL;
610 thread->Notify();
611 }
612}
613
614/*
615 * Changes the shape of a monitor from thin to fat, preserving the
616 * internal lock state. The calling thread must own the lock.
617 */
618void Monitor::Inflate(Thread* self, Object* obj) {
619 DCHECK(self != NULL);
620 DCHECK(obj != NULL);
621 DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
622 DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
623
624 // Allocate and acquire a new monitor.
625 Monitor* m = new Monitor(obj);
626 // Replace the head of the list with the new monitor.
627 do {
628 m->next_ = gMonitorList;
629 } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
630 m->Lock(self);
631 // Propagate the lock state.
632 uint32_t thin = *obj->GetRawLockWordAddress();
633 m->lock_count_ = LW_LOCK_COUNT(thin);
634 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
635 thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
636 // Publish the updated lock word.
637 android_atomic_release_store(thin, obj->GetRawLockWordAddress());
638}
639
640void Monitor::MonitorEnter(Thread* self, Object* obj) {
641 volatile int32_t* thinp = obj->GetRawLockWordAddress();
642 struct timespec tm;
643 long sleepDelayNs;
644 long minSleepDelayNs = 1000000; /* 1 millisecond */
645 long maxSleepDelayNs = 1000000000; /* 1 second */
646 uint32_t thin, newThin, threadId;
647
648 assert(self != NULL);
649 assert(obj != NULL);
650 threadId = self->thin_lock_id_;
651retry:
652 thin = *thinp;
653 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
654 /*
655 * The lock is a thin lock. The owner field is used to
656 * determine the acquire method, ordered by cost.
657 */
658 if (LW_LOCK_OWNER(thin) == threadId) {
659 /*
660 * The calling thread owns the lock. Increment the
661 * value of the recursion count field.
662 */
663 *thinp += 1 << LW_LOCK_COUNT_SHIFT;
664 if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
665 /*
666 * The reacquisition limit has been reached. Inflate
667 * the lock so the next acquire will not overflow the
668 * recursion count field.
669 */
670 Inflate(self, obj);
671 }
672 } else if (LW_LOCK_OWNER(thin) == 0) {
673 /*
674 * The lock is unowned. Install the thread id of the
675 * calling thread into the owner field. This is the
676 * common case. In performance critical code the JIT
677 * will have tried this before calling out to the VM.
678 */
679 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
680 if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
681 // The acquire failed. Try again.
682 goto retry;
683 }
684 } else {
685 LOG(INFO) << StringPrintf("(%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
686 // The lock is owned by another thread. Notify the VM that we are about to wait.
687 Thread::State oldStatus = self->SetState(Thread::kBlocked);
688 // Spin until the thin lock is released or inflated.
689 sleepDelayNs = 0;
690 for (;;) {
691 thin = *thinp;
692 // Check the shape of the lock word. Another thread
693 // may have inflated the lock while we were waiting.
694 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
695 if (LW_LOCK_OWNER(thin) == 0) {
696 // The lock has been released. Install the thread id of the
697 // calling thread into the owner field.
698 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
699 if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
700 // The acquire succeed. Break out of the loop and proceed to inflate the lock.
701 break;
702 }
703 } else {
704 // The lock has not been released. Yield so the owning thread can run.
705 if (sleepDelayNs == 0) {
706 sched_yield();
707 sleepDelayNs = minSleepDelayNs;
708 } else {
709 tm.tv_sec = 0;
710 tm.tv_nsec = sleepDelayNs;
711 nanosleep(&tm, NULL);
712 // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
713 if (sleepDelayNs < maxSleepDelayNs / 2) {
714 sleepDelayNs *= 2;
715 } else {
716 sleepDelayNs = minSleepDelayNs;
717 }
718 }
719 }
720 } else {
721 // The thin lock was inflated by another thread. Let the VM know we are no longer
722 // waiting and try again.
723 LOG(INFO) << "(" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
724 self->SetState(oldStatus);
725 goto retry;
726 }
727 }
728 LOG(INFO) << StringPrintf("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
729 // We have acquired the thin lock. Let the VM know that we are no longer waiting.
730 self->SetState(oldStatus);
731 // Fatten the lock.
732 Inflate(self, obj);
733 LOG(INFO) << StringPrintf("(%d) lock %p fattened", threadId, thinp);
734 }
735 } else {
736 // The lock is a fat lock.
737 DCHECK(LW_MONITOR(*thinp) != NULL);
738 LW_MONITOR(*thinp)->Lock(self);
739 }
740}
741
742bool Monitor::MonitorExit(Thread* self, Object* obj) {
743 volatile int32_t* thinp = obj->GetRawLockWordAddress();
744
745 DCHECK(self != NULL);
746 DCHECK_EQ(self->GetState(), Thread::kRunnable);
747 DCHECK(obj != NULL);
748
749 /*
750 * Cache the lock word as its value can change while we are
751 * examining its state.
752 */
753 uint32_t thin = *thinp;
754 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
755 /*
756 * The lock is thin. We must ensure that the lock is owned
757 * by the given thread before unlocking it.
758 */
759 if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
760 /*
761 * We are the lock owner. It is safe to update the lock
762 * without CAS as lock ownership guards the lock itself.
763 */
764 if (LW_LOCK_COUNT(thin) == 0) {
765 /*
766 * The lock was not recursively acquired, the common
767 * case. Unlock by clearing all bits except for the
768 * hash state.
769 */
770 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
771 android_atomic_release_store(thin, thinp);
772 } else {
773 /*
774 * The object was recursively acquired. Decrement the
775 * lock recursion count field.
776 */
777 *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
778 }
779 } else {
780 /*
781 * We do not own the lock. The JVM spec requires that we
782 * throw an exception in this case.
783 */
784 ThrowIllegalMonitorStateException("unlock of unowned monitor");
785 return false;
786 }
787 } else {
788 /*
789 * The lock is fat. We must check to see if Unlock has
790 * raised any exceptions before continuing.
791 */
792 DCHECK(LW_MONITOR(*thinp) != NULL);
793 if (!LW_MONITOR(*thinp)->Unlock(self)) {
794 // An exception has been raised. Do not fall through.
795 return false;
796 }
797 }
798 return true;
799}
800
801/*
802 * Object.wait(). Also called for class init.
803 */
804void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
805 volatile int32_t* thinp = obj->GetRawLockWordAddress();
806
807 // If the lock is still thin, we need to fatten it.
808 uint32_t thin = *thinp;
809 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
810 // Make sure that 'self' holds the lock.
811 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
812 ThrowIllegalMonitorStateException("object not locked by thread before wait()");
813 return;
814 }
815
816 /* This thread holds the lock. We need to fatten the lock
817 * so 'self' can block on it. Don't update the object lock
818 * field yet, because 'self' needs to acquire the lock before
819 * any other thread gets a chance.
820 */
821 Inflate(self, obj);
822 LOG(INFO) << StringPrintf("(%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
823 }
824 LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
825}
826
827void Monitor::Notify(Thread* self, Object *obj) {
828 uint32_t thin = *obj->GetRawLockWordAddress();
829
830 // If the lock is still thin, there aren't any waiters;
831 // waiting on an object forces lock fattening.
832 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
833 // Make sure that 'self' holds the lock.
834 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
835 ThrowIllegalMonitorStateException("object not locked by thread before notify()");
836 return;
837 }
838 // no-op; there are no waiters to notify.
839 } else {
840 // It's a fat lock.
841 LW_MONITOR(thin)->Notify(self);
842 }
843}
844
845void Monitor::NotifyAll(Thread* self, Object *obj) {
846 uint32_t thin = *obj->GetRawLockWordAddress();
847
848 // If the lock is still thin, there aren't any waiters;
849 // waiting on an object forces lock fattening.
850 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
851 // Make sure that 'self' holds the lock.
852 if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
853 ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
854 return;
855 }
856 // no-op; there are no waiters to notify.
857 } else {
858 // It's a fat lock.
859 LW_MONITOR(thin)->NotifyAll(self);
860 }
861}
862
863uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
864 if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
865 return LW_LOCK_OWNER(raw_lock_word);
866 } else {
867 Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
868 return owner ? owner->GetThinLockId() : 0;
869 }
870}
871
872} // namespace art