blob: 99e071e7abca96dceb19e70c79d2b3b507640e74 [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_MONITOR_H_
18#define ART_RUNTIME_MONITOR_H_
Elliott Hughes5f791332011-09-15 17:45:30 -070019
20#include <pthread.h>
21#include <stdint.h>
Brian Carlstromc57ad202015-03-03 21:21:29 -080022#include <stdlib.h>
Elliott Hughes5f791332011-09-15 17:45:30 -070023
Hans Boehm65c18a22020-01-03 23:37:13 +000024#include <atomic>
Elliott Hughes8e4aac52011-09-26 17:03:36 -070025#include <iosfwd>
Elliott Hughesc33a32b2011-10-11 18:18:07 -070026#include <list>
Elliott Hughesf327e072013-01-09 16:01:26 -080027#include <vector>
Elliott Hughes8e4aac52011-09-26 17:03:36 -070028
Mathieu Chartierbad02672014-08-25 13:08:22 -070029#include "base/allocator.h"
David Sehrc431b9d2018-03-02 12:01:51 -080030#include "base/atomic.h"
Elliott Hughes76b61672012-12-12 17:47:30 -080031#include "base/mutex.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070032#include "gc_root.h"
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080033#include "lock_word.h"
Vladimir Markof52d92f2019-03-29 12:33:02 +000034#include "obj_ptr.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070035#include "read_barrier_option.h"
Alex Light77fee872017-09-05 14:51:49 -070036#include "runtime_callbacks.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080037#include "thread_state.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070038
39namespace art {
40
Mathieu Chartiere401d142015-04-22 13:56:20 -070041class ArtMethod;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070042class IsMarkedVisitor;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070043class LockWord;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070044template<class T> class Handle;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070045class StackVisitor;
Mathieu Chartiere401d142015-04-22 13:56:20 -070046class Thread;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070047typedef uint32_t MonitorId;
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080048
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080049namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080050class Object;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080051} // namespace mirror
Ian Rogersef7d42f2014-01-06 12:55:46 -080052
Alex Light77fee872017-09-05 14:51:49 -070053enum class LockReason {
54 kForWait,
55 kForLock,
56};
57
Elliott Hughes5f791332011-09-15 17:45:30 -070058class Monitor {
59 public:
Ian Rogersd9c4fc92013-10-01 19:45:43 -070060 // The default number of spins that are done before thread suspension is used to forcibly inflate
61 // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
62 constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
63
wangguiboe1484e92021-04-24 11:27:06 +080064 static constexpr int kDefaultMonitorTimeoutMs = 500;
65
66 static constexpr int kMonitorTimeoutMinMs = 200;
67
68 static constexpr int kMonitorTimeoutMaxMs = 1000; // 1 second
69
Elliott Hughes5f791332011-09-15 17:45:30 -070070 ~Monitor();
71
Andreas Gamped0210e52017-06-23 13:38:09 -070072 static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070073
Ian Rogersd9c4fc92013-10-01 19:45:43 -070074 // Return the thread id of the lock owner or 0 when there is no owner.
Vladimir Markof52d92f2019-03-29 12:33:02 +000075 static uint32_t GetLockOwnerThreadId(ObjPtr<mirror::Object> obj)
Ian Rogersd9c4fc92013-10-01 19:45:43 -070076 NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy.
Elliott Hughes5f791332011-09-15 17:45:30 -070077
Mathieu Chartier90443472015-07-16 20:32:27 -070078 // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
Vladimir Markof52d92f2019-03-29 12:33:02 +000079 static ObjPtr<mirror::Object> MonitorEnter(Thread* thread,
80 ObjPtr<mirror::Object> obj,
81 bool trylock)
82 EXCLUSIVE_LOCK_FUNCTION(obj.Ptr())
Mathieu Chartier2d096c92015-10-12 16:18:20 -070083 NO_THREAD_SAFETY_ANALYSIS
84 REQUIRES(!Roles::uninterruptible_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070085 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -070086
Mathieu Chartier90443472015-07-16 20:32:27 -070087 // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
Vladimir Markof52d92f2019-03-29 12:33:02 +000088 static bool MonitorExit(Thread* thread, ObjPtr<mirror::Object> obj)
Mathieu Chartier2d096c92015-10-12 16:18:20 -070089 NO_THREAD_SAFETY_ANALYSIS
90 REQUIRES(!Roles::uninterruptible_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070091 REQUIRES_SHARED(Locks::mutator_lock_)
Vladimir Markof52d92f2019-03-29 12:33:02 +000092 UNLOCK_FUNCTION(obj.Ptr());
Mathieu Chartier90443472015-07-16 20:32:27 -070093
Vladimir Markof52d92f2019-03-29 12:33:02 +000094 static void Notify(Thread* self, ObjPtr<mirror::Object> obj)
95 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers13c479e2013-10-11 07:59:01 -070096 DoNotify(self, obj, false);
Ian Rogersd9c4fc92013-10-01 19:45:43 -070097 }
Vladimir Markof52d92f2019-03-29 12:33:02 +000098 static void NotifyAll(Thread* self, ObjPtr<mirror::Object> obj)
99 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers13c479e2013-10-11 07:59:01 -0700100 DoNotify(self, obj, true);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700101 }
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700102
103 // Object.wait(). Also called for class init.
Mathieu Chartier90443472015-07-16 20:32:27 -0700104 // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000105 static void Wait(Thread* self,
106 ObjPtr<mirror::Object> obj,
107 int64_t ms,
108 int32_t ns,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800109 bool interruptShouldThrow, ThreadState why)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700110 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
Elliott Hughes5f791332011-09-15 17:45:30 -0700111
Andreas Gampef3ebcce2017-12-11 20:40:23 -0800112 static ThreadState FetchState(const Thread* thread,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000113 /* out */ ObjPtr<mirror::Object>* monitor_object,
Andreas Gampef3ebcce2017-12-11 20:40:23 -0800114 /* out */ uint32_t* lock_owner_tid)
115 REQUIRES(!Locks::thread_suspend_count_lock_)
116 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800117
Elliott Hughesf9501702013-01-11 11:22:27 -0800118 // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000119 static ObjPtr<mirror::Object> GetContendedMonitor(Thread* thread)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700120 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesf9501702013-01-11 11:22:27 -0800121
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800122 // Calls 'callback' once for each lock held in the single stack frame represented by
123 // the current state of 'stack_visitor'.
Andreas Gampe956a5222014-08-16 13:41:10 -0700124 // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
125 // is necessary when we have already aborted but want to dump the stack as much as we can.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000126 static void VisitLocks(StackVisitor* stack_visitor,
127 void (*callback)(ObjPtr<mirror::Object>, void*),
128 void* callback_context,
129 bool abort_on_failure = true)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700130 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700131
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700132 static bool IsValidLockWord(LockWord lock_word);
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700133
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700134 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Vladimir Markof52d92f2019-03-29 12:33:02 +0000135 ObjPtr<mirror::Object> GetObject() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700136
Vladimir Markof52d92f2019-03-29 12:33:02 +0000137 void SetObject(ObjPtr<mirror::Object> object);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700138
Hans Boehm65c18a22020-01-03 23:37:13 +0000139 // Provides no memory ordering guarantees.
140 Thread* GetOwner() const {
141 return owner_.load(std::memory_order_relaxed);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700142 }
143
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700144 int32_t GetHashCode();
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700145
Hans Boehm65c18a22020-01-03 23:37:13 +0000146 // Is the monitor currently locked? Debug only, provides no memory ordering guarantees.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700147 bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700148
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700149 bool HasHashCode() const {
Orion Hodson88591fe2018-03-06 13:35:43 +0000150 return hash_code_.load(std::memory_order_relaxed) != 0;
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700151 }
152
Ian Rogersef7d42f2014-01-06 12:55:46 -0800153 MonitorId GetMonitorId() const {
154 return monitor_id_;
155 }
156
Ian Rogers43c69cc2014-08-15 11:09:28 -0700157 // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
Mathieu Chartier0cd81352014-05-22 16:48:55 -0700158 static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700159 uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700160
Mathieu Chartier90443472015-07-16 20:32:27 -0700161 // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
162 // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
163 // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000164 static bool Deflate(Thread* self, ObjPtr<mirror::Object> obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700165 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700166
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800167#ifndef __LP64__
168 void* operator new(size_t size) {
169 // Align Monitor* as per the monitor ID field size in the lock word.
Brian Carlstromc57ad202015-03-03 21:21:29 -0800170 void* result;
171 int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
172 CHECK_EQ(error, 0) << strerror(error);
173 return result;
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800174 }
Christopher Ferris8a354052015-04-24 17:23:53 -0700175
176 void operator delete(void* ptr) {
177 free(ptr);
178 }
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800179#endif
180
Elliott Hughes5f791332011-09-15 17:45:30 -0700181 private:
Vladimir Markof52d92f2019-03-29 12:33:02 +0000182 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700183 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Markof52d92f2019-03-29 12:33:02 +0000184 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code, MonitorId id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700185 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700186
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700187 // Install the monitor into its object, may fail if another thread installs a different monitor
Hans Boehm65c18a22020-01-03 23:37:13 +0000188 // first. Monitor remains in the same logical state as before, i.e. held the same # of times.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700189 bool Install(Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700190 REQUIRES(!monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700191 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700192
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700193 // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
194 // routine.
Mathieu Chartier90443472015-07-16 20:32:27 -0700195 void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700196
197 // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of
198 // this routine.
Mathieu Chartier90443472015-07-16 20:32:27 -0700199 void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700200
Hans Boehm65c18a22020-01-03 23:37:13 +0000201 // Release the monitor lock and signal a waiting thread that has been notified and now needs the
202 // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to
203 // null. Caller may be suspended (Wait) or runnable (MonitorExit).
204 void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
Charles Mungerc665d632018-11-06 16:20:13 +0000205
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700206 // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
207 // calling thread must own the lock or the owner must be suspended. There's a race with other
208 // threads inflating the lock, installing hash codes and spurious failures. The caller should
209 // re-read the lock word following the call.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000210 static void Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700211 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700212 NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
Elliott Hughes5f791332011-09-15 17:45:30 -0700213
Andreas Gampe39b98112017-06-01 16:28:27 -0700214 void LogContentionEvent(Thread* self,
215 uint32_t wait_ms,
216 uint32_t sample_percent,
217 ArtMethod* owner_method,
218 uint32_t owner_dex_pc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700219 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700220
Vladimir Markof52d92f2019-03-29 12:33:02 +0000221 static void FailedUnlock(ObjPtr<mirror::Object> obj,
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700222 uint32_t expected_owner_thread_id,
223 uint32_t found_owner_thread_id,
Brian Carlstromc57ad202015-03-03 21:21:29 -0800224 Monitor* mon)
Hans Boehm65c18a22020-01-03 23:37:13 +0000225 REQUIRES(!Locks::thread_list_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700226 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800227
Mathieu Chartier4b0ef1c2016-07-29 16:26:01 -0700228 // Try to lock without blocking, returns true if we acquired the lock.
Hans Boehm65c18a22020-01-03 23:37:13 +0000229 // If spin is true, then we spin for a short period before failing.
230 bool TryLock(Thread* self, bool spin = false)
231 TRY_ACQUIRE(true, monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700232 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4b0ef1c2016-07-29 16:26:01 -0700233
Alex Light77fee872017-09-05 14:51:49 -0700234 template<LockReason reason = LockReason::kForLock>
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700235 void Lock(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000236 ACQUIRE(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700237 REQUIRES_SHARED(Locks::mutator_lock_);
Alex Light77fee872017-09-05 14:51:49 -0700238
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700239 bool Unlock(Thread* thread)
Hans Boehm65c18a22020-01-03 23:37:13 +0000240 RELEASE(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700241 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700242
Vladimir Markof52d92f2019-03-29 12:33:02 +0000243 static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700244 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700245
246 void Notify(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000247 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700248 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700249
250 void NotifyAll(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000251 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700252 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700253
Mathieu Chartier0ffdc9c2016-04-19 13:46:03 -0700254 static std::string PrettyContentionInfo(const std::string& owner_name,
255 pid_t owner_tid,
Mathieu Chartier74b3c8f2016-04-15 19:11:45 -0700256 ArtMethod* owners_method,
257 uint32_t owners_dex_pc,
258 size_t num_waiters)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700259 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700260
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700261 // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
262 // (somewhat indirectly) Thread.sleep() and Thread.join().
263 //
264 // If another thread calls Thread.interrupt(), we throw InterruptedException and return
265 // immediately if one of the following are true:
266 // - blocked in wait(), wait(long), or wait(long, int) methods of Object
267 // - blocked in join(), join(long), or join(long, int) methods of Thread
268 // - blocked in sleep(long), or sleep(long, int) methods of Thread
269 // Otherwise, we set the "interrupted" flag.
270 //
271 // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
272 // throws the appropriate exception if it isn't.
273 //
274 // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
275 // a loop. This appears to derive from concerns about pthread_cond_wait() on multiprocessor
276 // systems. Some commentary on the web casts doubt on whether these can/should occur.
277 //
278 // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
279 // of the 32-bit time epoch.
Elliott Hughes4cd121e2013-01-07 17:35:41 -0800280 void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
Hans Boehm65c18a22020-01-03 23:37:13 +0000281 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700282 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700283
jeffhao33dc7712011-11-09 17:54:24 -0800284 // Translates the provided method and pc into its declaring class' source file and line number.
Mathieu Chartier74b3c8f2016-04-15 19:11:45 -0700285 static void TranslateLocation(ArtMethod* method, uint32_t pc,
286 const char** source_file,
287 int32_t* line_number)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700288 REQUIRES_SHARED(Locks::mutator_lock_);
jeffhao33dc7712011-11-09 17:54:24 -0800289
Hans Boehm65c18a22020-01-03 23:37:13 +0000290 // Provides no memory ordering guarantees.
Mathieu Chartier90443472015-07-16 20:32:27 -0700291 uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700292
Hans Boehm65c18a22020-01-03 23:37:13 +0000293 // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack.
294 // owner is either self or suspended.
295 void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_)
296 REQUIRES_SHARED(Locks::mutator_lock_);
297
298 // The same, but without checking for a proxy method. Currently requires owner == self.
299 void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_)
300 REQUIRES_SHARED(Locks::mutator_lock_);
301
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700302 // Support for systrace output of monitor operations.
303 ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000304 ObjPtr<mirror::Object> obj,
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700305 bool is_wait)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700306 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700307 static void AtraceMonitorLockImpl(Thread* self,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000308 ObjPtr<mirror::Object> obj,
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700309 bool is_wait)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700310 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700311 ALWAYS_INLINE static void AtraceMonitorUnlock();
312
Elliott Hughesfc861622011-10-17 17:57:47 -0700313 static uint32_t lock_profiling_threshold_;
Andreas Gamped0210e52017-06-23 13:38:09 -0700314 static uint32_t stack_dump_lock_profiling_threshold_;
Hans Boehm65c18a22020-01-03 23:37:13 +0000315 static bool capture_method_eagerly_;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700316
Hans Boehm65c18a22020-01-03 23:37:13 +0000317 // Holding the monitor N times is represented by holding monitor_lock_ N times.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700318 Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers719d1a32014-03-06 12:13:39 -0800319
Hans Boehm65c18a22020-01-03 23:37:13 +0000320 // Pretend to unlock monitor lock.
321 void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {}
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700322
Hans Boehm65c18a22020-01-03 23:37:13 +0000323 // Number of threads either waiting on the condition or waiting on a contended
324 // monitor acquisition. Prevents deflation.
325 std::atomic<size_t> num_waiters_;
Mathieu Chartier46bc7782013-11-12 17:03:02 -0800326
Hans Boehm65c18a22020-01-03 23:37:13 +0000327 // Which thread currently owns the lock? monitor_lock_ only keeps the tid.
328 // Only set while holding monitor_lock_. Non-locking readers only use it to
329 // compare to self or for debugging.
330 std::atomic<Thread*> owner_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700331
Hans Boehm65c18a22020-01-03 23:37:13 +0000332 // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once.
333 unsigned int lock_count_ GUARDED_BY(monitor_lock_);
334
335 // Owner's recursive lock depth is given by monitor_lock_.GetDepth().
Elliott Hughes5f791332011-09-15 17:45:30 -0700336
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700337 // What object are we part of. This is a weak root. Do not access
338 // this directly, use GetObject() to read it so it will be guarded
339 // by a read barrier.
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700340 GcRoot<mirror::Object> obj_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700341
Brian Carlstrom4514d3c2011-10-21 17:01:31 -0700342 // Threads currently waiting on this monitor.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700343 Thread* wait_set_ GUARDED_BY(monitor_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700344
Charles Mungerc665d632018-11-06 16:20:13 +0000345 // Threads that were waiting on this monitor, but are now contending on it.
346 Thread* wake_set_ GUARDED_BY(monitor_lock_);
347
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700348 // Stored object hash code, generated lazily by GetHashCode.
349 AtomicInteger hash_code_;
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700350
Hans Boehm65c18a22020-01-03 23:37:13 +0000351 // Data structure used to remember the method and dex pc of a recent holder of the
352 // lock. Used for tracing and contention reporting. Setting these is expensive, since it
353 // involves a partial stack walk. We set them only as follows, to minimize the cost:
354 // - If tracing is enabled, they are needed immediately when we first notice contention, so we
355 // set them unconditionally when a monitor is acquired.
356 // - If contention reporting is enabled, we use the lock_owner_request_ field to have the
357 // contending thread request them. The current owner then sets them when releasing the monitor,
358 // making them available when the contending thread acquires the monitor.
Orion Hodsond1fab542020-07-27 15:07:41 +0100359 // - If tracing and contention reporting are enabled, we do both. This usually prevents us from
360 // switching between reporting the end and beginning of critical sections for contention logging
361 // when tracing is enabled. We expect that tracing overhead is normally much higher than for
362 // contention logging, so the added cost should be small. It also minimizes glitches when
363 // enabling and disabling traces.
Hans Boehm65c18a22020-01-03 23:37:13 +0000364 // We're tolerant of missing information. E.g. when tracing is initially turned on, we may
365 // not have the lock holder information if the holder acquired the lock with tracing off.
366 //
367 // We make this data unconditionally atomic; for contention logging all accesses are in fact
368 // protected by the monitor, but for tracing, reads are not. Writes are always
369 // protected by the monitor.
370 //
371 // The fields are always accessed without memory ordering. We store a checksum, and reread if
372 // the checksum doesn't correspond to the values. This results in values that are correct with
373 // very high probability, but not certainty.
374 //
375 // If we need lock_owner information for a certain thread for contenion logging, we store its
376 // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_,
377 // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the
378 // monitor.
379 //
380 // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively
381 // in the process of establishing one of those states. Only one thread at a time can be actively
382 // establishing such a state, since writes are protected by the monitor.
383 std::atomic<Thread*> lock_owner_; // *lock_owner_ may no longer exist!
384 std::atomic<ArtMethod*> lock_owner_method_;
385 std::atomic<uint32_t> lock_owner_dex_pc_;
386 std::atomic<uintptr_t> lock_owner_sum_;
387
388 // Request lock owner save method and dex_pc. Written asynchronously.
389 std::atomic<Thread*> lock_owner_request_;
390
391 // Compute method, dex pc, and tid "checksum".
392 uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t);
393
394 // Set owning method, dex pc, and tid. owner_ field is set and points to current thread.
395 void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t)
396 REQUIRES(monitor_lock_);
397
398 // Get owning method and dex pc for the given thread, if available.
399 void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t);
400
401 // Do the same, while holding the monitor. There are no concurrent updates.
402 void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
403 uint32_t thread_id)
404 REQUIRES(monitor_lock_);
405
406 // We never clear lock_owner method and dex pc. Since it often reflects
407 // ownership when we last detected contention, it may be inconsistent with owner_
408 // and not 100% reliable. For lock contention monitoring, in the absence of tracing,
409 // there is a small risk that the current owner may finish before noticing the request,
410 // or the information will be overwritten by another intervening request and monitor
411 // release, so it's also not 100% reliable. But if we report information at all, it
412 // should generally (modulo accidental checksum matches) pertain to to an acquisition of the
413 // right monitor by the right thread, so it's extremely unlikely to be seriously misleading.
414 // Since we track threads by a pointer to the Thread structure, there is a small chance we may
415 // confuse threads allocated at the same exact address, if a contending thread dies before
416 // we inquire about it.
417
418 // Check for and act on a pending lock_owner_request_
419 void CheckLockOwnerRequest(Thread* self)
420 REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700421
wangguiboe1484e92021-04-24 11:27:06 +0800422 void MaybeEnableTimeout() REQUIRES(Locks::mutator_lock_);
423
Ian Rogersef7d42f2014-01-06 12:55:46 -0800424 // The denser encoded version of this monitor as stored in the lock word.
425 MonitorId monitor_id_;
426
Andreas Gampe74240812014-04-17 10:35:09 -0700427#ifdef __LP64__
428 // Free list for monitor pool.
429 Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
430#endif
431
Elliott Hughesf327e072013-01-09 16:01:26 -0800432 friend class MonitorInfo;
Brian Carlstrom4514d3c2011-10-21 17:01:31 -0700433 friend class MonitorList;
Andreas Gampe74240812014-04-17 10:35:09 -0700434 friend class MonitorPool;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800435 friend class mirror::Object;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700436 DISALLOW_COPY_AND_ASSIGN(Monitor);
437};
438
439class MonitorList {
440 public:
441 MonitorList();
442 ~MonitorList();
443
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700444 void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700445
Mathieu Chartier97509952015-07-13 14:35:43 -0700446 void SweepMonitorList(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700447 REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700448 void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
449 void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700450 void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
Mathieu Chartier48ab6872014-06-24 11:21:59 -0700451 // Returns how many monitors were deflated.
Mathieu Chartier90443472015-07-16 20:32:27 -0700452 size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
Hans Boehm6fe97e02016-05-04 18:35:57 -0700453 size_t Size() REQUIRES(!monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700454
Mathieu Chartierbad02672014-08-25 13:08:22 -0700455 typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
456
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700457 private:
Ian Rogers5c597c62014-04-17 16:08:07 -0700458 // During sweeping we may free an object and on a separate thread have an object created using
459 // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
460 // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
461 // the object wasn't marked when sweeping began.
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700462 bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700463 Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700464 ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
Mathieu Chartierbad02672014-08-25 13:08:22 -0700465 Monitors list_ GUARDED_BY(monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700466
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700467 friend class Monitor;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700468 DISALLOW_COPY_AND_ASSIGN(MonitorList);
Elliott Hughes5f791332011-09-15 17:45:30 -0700469};
470
Elliott Hughesf327e072013-01-09 16:01:26 -0800471// Collects information about the current state of an object's monitor.
472// This is very unsafe, and must only be called when all threads are suspended.
473// For use only by the JDWP implementation.
474class MonitorInfo {
475 public:
Andreas Gamped9911ee2017-03-27 13:27:24 -0700476 MonitorInfo() : owner_(nullptr), entry_count_(0) {}
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700477 MonitorInfo(const MonitorInfo&) = default;
478 MonitorInfo& operator=(const MonitorInfo&) = default;
Vladimir Markof52d92f2019-03-29 12:33:02 +0000479 explicit MonitorInfo(ObjPtr<mirror::Object> o) REQUIRES(Locks::mutator_lock_);
Elliott Hughesf327e072013-01-09 16:01:26 -0800480
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700481 Thread* owner_;
482 size_t entry_count_;
483 std::vector<Thread*> waiters_;
Elliott Hughesf327e072013-01-09 16:01:26 -0800484};
485
Elliott Hughes5f791332011-09-15 17:45:30 -0700486} // namespace art
487
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700488#endif // ART_RUNTIME_MONITOR_H_