blob: 0714da1a94eeccf9518939f2d00cc6f419c7b7ab [file] [log] [blame]
Elliott Hughes5f791332011-09-15 17:45:30 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_MONITOR_H_
18#define ART_RUNTIME_MONITOR_H_
Elliott Hughes5f791332011-09-15 17:45:30 -070019
20#include <pthread.h>
21#include <stdint.h>
Brian Carlstromc57ad202015-03-03 21:21:29 -080022#include <stdlib.h>
Elliott Hughes5f791332011-09-15 17:45:30 -070023
Hans Boehm65c18a22020-01-03 23:37:13 +000024#include <atomic>
Elliott Hughes8e4aac52011-09-26 17:03:36 -070025#include <iosfwd>
Elliott Hughesc33a32b2011-10-11 18:18:07 -070026#include <list>
Elliott Hughesf327e072013-01-09 16:01:26 -080027#include <vector>
Elliott Hughes8e4aac52011-09-26 17:03:36 -070028
Mathieu Chartierbad02672014-08-25 13:08:22 -070029#include "base/allocator.h"
David Sehrc431b9d2018-03-02 12:01:51 -080030#include "base/atomic.h"
Elliott Hughes76b61672012-12-12 17:47:30 -080031#include "base/mutex.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070032#include "gc_root.h"
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080033#include "lock_word.h"
Vladimir Markof52d92f2019-03-29 12:33:02 +000034#include "obj_ptr.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070035#include "read_barrier_option.h"
Alex Light77fee872017-09-05 14:51:49 -070036#include "runtime_callbacks.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080037#include "thread_state.h"
Elliott Hughes5f791332011-09-15 17:45:30 -070038
39namespace art {
40
Mathieu Chartiere401d142015-04-22 13:56:20 -070041class ArtMethod;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070042class IsMarkedVisitor;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070043class LockWord;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070044template<class T> class Handle;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070045class StackVisitor;
Mathieu Chartiere401d142015-04-22 13:56:20 -070046class Thread;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070047typedef uint32_t MonitorId;
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080048
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080049namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080050class Object;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080051} // namespace mirror
Ian Rogersef7d42f2014-01-06 12:55:46 -080052
Alex Light77fee872017-09-05 14:51:49 -070053enum class LockReason {
54 kForWait,
55 kForLock,
56};
57
Elliott Hughes5f791332011-09-15 17:45:30 -070058class Monitor {
59 public:
Ian Rogersd9c4fc92013-10-01 19:45:43 -070060 // The default number of spins that are done before thread suspension is used to forcibly inflate
61 // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
62 constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
63
Elliott Hughes5f791332011-09-15 17:45:30 -070064 ~Monitor();
65
Andreas Gamped0210e52017-06-23 13:38:09 -070066 static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold);
Elliott Hughes32d6e1e2011-10-11 14:47:44 -070067
Ian Rogersd9c4fc92013-10-01 19:45:43 -070068 // Return the thread id of the lock owner or 0 when there is no owner.
Vladimir Markof52d92f2019-03-29 12:33:02 +000069 static uint32_t GetLockOwnerThreadId(ObjPtr<mirror::Object> obj)
Ian Rogersd9c4fc92013-10-01 19:45:43 -070070 NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy.
Elliott Hughes5f791332011-09-15 17:45:30 -070071
Mathieu Chartier90443472015-07-16 20:32:27 -070072 // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
Vladimir Markof52d92f2019-03-29 12:33:02 +000073 static ObjPtr<mirror::Object> MonitorEnter(Thread* thread,
74 ObjPtr<mirror::Object> obj,
75 bool trylock)
76 EXCLUSIVE_LOCK_FUNCTION(obj.Ptr())
Mathieu Chartier2d096c92015-10-12 16:18:20 -070077 NO_THREAD_SAFETY_ANALYSIS
78 REQUIRES(!Roles::uninterruptible_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070079 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -070080
Mathieu Chartier90443472015-07-16 20:32:27 -070081 // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
Vladimir Markof52d92f2019-03-29 12:33:02 +000082 static bool MonitorExit(Thread* thread, ObjPtr<mirror::Object> obj)
Mathieu Chartier2d096c92015-10-12 16:18:20 -070083 NO_THREAD_SAFETY_ANALYSIS
84 REQUIRES(!Roles::uninterruptible_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070085 REQUIRES_SHARED(Locks::mutator_lock_)
Vladimir Markof52d92f2019-03-29 12:33:02 +000086 UNLOCK_FUNCTION(obj.Ptr());
Mathieu Chartier90443472015-07-16 20:32:27 -070087
Vladimir Markof52d92f2019-03-29 12:33:02 +000088 static void Notify(Thread* self, ObjPtr<mirror::Object> obj)
89 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers13c479e2013-10-11 07:59:01 -070090 DoNotify(self, obj, false);
Ian Rogersd9c4fc92013-10-01 19:45:43 -070091 }
Vladimir Markof52d92f2019-03-29 12:33:02 +000092 static void NotifyAll(Thread* self, ObjPtr<mirror::Object> obj)
93 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers13c479e2013-10-11 07:59:01 -070094 DoNotify(self, obj, true);
Ian Rogersd9c4fc92013-10-01 19:45:43 -070095 }
Ian Rogers0ef3bd22014-08-15 13:39:34 -070096
97 // Object.wait(). Also called for class init.
Mathieu Chartier90443472015-07-16 20:32:27 -070098 // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
Vladimir Markof52d92f2019-03-29 12:33:02 +000099 static void Wait(Thread* self,
100 ObjPtr<mirror::Object> obj,
101 int64_t ms,
102 int32_t ns,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800103 bool interruptShouldThrow, ThreadState why)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700104 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
Elliott Hughes5f791332011-09-15 17:45:30 -0700105
Andreas Gampef3ebcce2017-12-11 20:40:23 -0800106 static ThreadState FetchState(const Thread* thread,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000107 /* out */ ObjPtr<mirror::Object>* monitor_object,
Andreas Gampef3ebcce2017-12-11 20:40:23 -0800108 /* out */ uint32_t* lock_owner_tid)
109 REQUIRES(!Locks::thread_suspend_count_lock_)
110 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800111
Elliott Hughesf9501702013-01-11 11:22:27 -0800112 // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000113 static ObjPtr<mirror::Object> GetContendedMonitor(Thread* thread)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700114 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesf9501702013-01-11 11:22:27 -0800115
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800116 // Calls 'callback' once for each lock held in the single stack frame represented by
117 // the current state of 'stack_visitor'.
Andreas Gampe956a5222014-08-16 13:41:10 -0700118 // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
119 // is necessary when we have already aborted but want to dump the stack as much as we can.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000120 static void VisitLocks(StackVisitor* stack_visitor,
121 void (*callback)(ObjPtr<mirror::Object>, void*),
122 void* callback_context,
123 bool abort_on_failure = true)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700124 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700125
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700126 static bool IsValidLockWord(LockWord lock_word);
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700127
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700128 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Vladimir Markof52d92f2019-03-29 12:33:02 +0000129 ObjPtr<mirror::Object> GetObject() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700130
Vladimir Markof52d92f2019-03-29 12:33:02 +0000131 void SetObject(ObjPtr<mirror::Object> object);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700132
Hans Boehm65c18a22020-01-03 23:37:13 +0000133 // Provides no memory ordering guarantees.
134 Thread* GetOwner() const {
135 return owner_.load(std::memory_order_relaxed);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700136 }
137
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700138 int32_t GetHashCode();
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700139
Hans Boehm65c18a22020-01-03 23:37:13 +0000140 // Is the monitor currently locked? Debug only, provides no memory ordering guarantees.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700141 bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700142
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700143 bool HasHashCode() const {
Orion Hodson88591fe2018-03-06 13:35:43 +0000144 return hash_code_.load(std::memory_order_relaxed) != 0;
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700145 }
146
Ian Rogersef7d42f2014-01-06 12:55:46 -0800147 MonitorId GetMonitorId() const {
148 return monitor_id_;
149 }
150
Ian Rogers43c69cc2014-08-15 11:09:28 -0700151 // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
Mathieu Chartier0cd81352014-05-22 16:48:55 -0700152 static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700153 uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700154
Mathieu Chartier90443472015-07-16 20:32:27 -0700155 // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
156 // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
157 // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000158 static bool Deflate(Thread* self, ObjPtr<mirror::Object> obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700159 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700160
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800161#ifndef __LP64__
162 void* operator new(size_t size) {
163 // Align Monitor* as per the monitor ID field size in the lock word.
Brian Carlstromc57ad202015-03-03 21:21:29 -0800164 void* result;
165 int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
166 CHECK_EQ(error, 0) << strerror(error);
167 return result;
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800168 }
Christopher Ferris8a354052015-04-24 17:23:53 -0700169
170 void operator delete(void* ptr) {
171 free(ptr);
172 }
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -0800173#endif
174
Elliott Hughes5f791332011-09-15 17:45:30 -0700175 private:
Vladimir Markof52d92f2019-03-29 12:33:02 +0000176 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700177 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Markof52d92f2019-03-29 12:33:02 +0000178 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code, MonitorId id)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700179 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700180
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700181 // Install the monitor into its object, may fail if another thread installs a different monitor
Hans Boehm65c18a22020-01-03 23:37:13 +0000182 // first. Monitor remains in the same logical state as before, i.e. held the same # of times.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700183 bool Install(Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700184 REQUIRES(!monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700185 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700186
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700187 // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
188 // routine.
Mathieu Chartier90443472015-07-16 20:32:27 -0700189 void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700190
191 // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of
192 // this routine.
Mathieu Chartier90443472015-07-16 20:32:27 -0700193 void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700194
Hans Boehm65c18a22020-01-03 23:37:13 +0000195 // Release the monitor lock and signal a waiting thread that has been notified and now needs the
196 // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to
197 // null. Caller may be suspended (Wait) or runnable (MonitorExit).
198 void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
Charles Mungerc665d632018-11-06 16:20:13 +0000199
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700200 // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
201 // calling thread must own the lock or the owner must be suspended. There's a race with other
202 // threads inflating the lock, installing hash codes and spurious failures. The caller should
203 // re-read the lock word following the call.
Vladimir Markof52d92f2019-03-29 12:33:02 +0000204 static void Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700205 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700206 NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
Elliott Hughes5f791332011-09-15 17:45:30 -0700207
Andreas Gampe39b98112017-06-01 16:28:27 -0700208 void LogContentionEvent(Thread* self,
209 uint32_t wait_ms,
210 uint32_t sample_percent,
211 ArtMethod* owner_method,
212 uint32_t owner_dex_pc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700213 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700214
Vladimir Markof52d92f2019-03-29 12:33:02 +0000215 static void FailedUnlock(ObjPtr<mirror::Object> obj,
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700216 uint32_t expected_owner_thread_id,
217 uint32_t found_owner_thread_id,
Brian Carlstromc57ad202015-03-03 21:21:29 -0800218 Monitor* mon)
Hans Boehm65c18a22020-01-03 23:37:13 +0000219 REQUIRES(!Locks::thread_list_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700220 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers6d0b13e2012-02-07 09:25:29 -0800221
Mathieu Chartier4b0ef1c2016-07-29 16:26:01 -0700222 // Try to lock without blocking, returns true if we acquired the lock.
Hans Boehm65c18a22020-01-03 23:37:13 +0000223 // If spin is true, then we spin for a short period before failing.
224 bool TryLock(Thread* self, bool spin = false)
225 TRY_ACQUIRE(true, monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700226 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4b0ef1c2016-07-29 16:26:01 -0700227
Alex Light77fee872017-09-05 14:51:49 -0700228 template<LockReason reason = LockReason::kForLock>
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700229 void Lock(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000230 ACQUIRE(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700231 REQUIRES_SHARED(Locks::mutator_lock_);
Alex Light77fee872017-09-05 14:51:49 -0700232
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700233 bool Unlock(Thread* thread)
Hans Boehm65c18a22020-01-03 23:37:13 +0000234 RELEASE(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700235 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700236
Vladimir Markof52d92f2019-03-29 12:33:02 +0000237 static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700238 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700239
240 void Notify(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000241 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700242 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700243
244 void NotifyAll(Thread* self)
Hans Boehm65c18a22020-01-03 23:37:13 +0000245 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700246 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700247
Mathieu Chartier0ffdc9c2016-04-19 13:46:03 -0700248 static std::string PrettyContentionInfo(const std::string& owner_name,
249 pid_t owner_tid,
Mathieu Chartier74b3c8f2016-04-15 19:11:45 -0700250 ArtMethod* owners_method,
251 uint32_t owners_dex_pc,
252 size_t num_waiters)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700253 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700254
Ian Rogers0ef3bd22014-08-15 13:39:34 -0700255 // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
256 // (somewhat indirectly) Thread.sleep() and Thread.join().
257 //
258 // If another thread calls Thread.interrupt(), we throw InterruptedException and return
259 // immediately if one of the following are true:
260 // - blocked in wait(), wait(long), or wait(long, int) methods of Object
261 // - blocked in join(), join(long), or join(long, int) methods of Thread
262 // - blocked in sleep(long), or sleep(long, int) methods of Thread
263 // Otherwise, we set the "interrupted" flag.
264 //
265 // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
266 // throws the appropriate exception if it isn't.
267 //
268 // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
269 // a loop. This appears to derive from concerns about pthread_cond_wait() on multiprocessor
270 // systems. Some commentary on the web casts doubt on whether these can/should occur.
271 //
272 // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
273 // of the 32-bit time epoch.
Elliott Hughes4cd121e2013-01-07 17:35:41 -0800274 void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
Hans Boehm65c18a22020-01-03 23:37:13 +0000275 REQUIRES(monitor_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700276 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700277
jeffhao33dc7712011-11-09 17:54:24 -0800278 // Translates the provided method and pc into its declaring class' source file and line number.
Mathieu Chartier74b3c8f2016-04-15 19:11:45 -0700279 static void TranslateLocation(ArtMethod* method, uint32_t pc,
280 const char** source_file,
281 int32_t* line_number)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700282 REQUIRES_SHARED(Locks::mutator_lock_);
jeffhao33dc7712011-11-09 17:54:24 -0800283
Hans Boehm65c18a22020-01-03 23:37:13 +0000284 // Provides no memory ordering guarantees.
Mathieu Chartier90443472015-07-16 20:32:27 -0700285 uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700286
Hans Boehm65c18a22020-01-03 23:37:13 +0000287 // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack.
288 // owner is either self or suspended.
289 void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_)
290 REQUIRES_SHARED(Locks::mutator_lock_);
291
292 // The same, but without checking for a proxy method. Currently requires owner == self.
293 void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_)
294 REQUIRES_SHARED(Locks::mutator_lock_);
295
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700296 // Support for systrace output of monitor operations.
297 ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000298 ObjPtr<mirror::Object> obj,
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700299 bool is_wait)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700300 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700301 static void AtraceMonitorLockImpl(Thread* self,
Vladimir Markof52d92f2019-03-29 12:33:02 +0000302 ObjPtr<mirror::Object> obj,
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700303 bool is_wait)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700304 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampec7ed09b2016-04-25 20:08:55 -0700305 ALWAYS_INLINE static void AtraceMonitorUnlock();
306
Elliott Hughesfc861622011-10-17 17:57:47 -0700307 static uint32_t lock_profiling_threshold_;
Andreas Gamped0210e52017-06-23 13:38:09 -0700308 static uint32_t stack_dump_lock_profiling_threshold_;
Hans Boehm65c18a22020-01-03 23:37:13 +0000309 static bool capture_method_eagerly_;
Elliott Hughes32d6e1e2011-10-11 14:47:44 -0700310
Hans Boehm65c18a22020-01-03 23:37:13 +0000311 // Holding the monitor N times is represented by holding monitor_lock_ N times.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700312 Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers719d1a32014-03-06 12:13:39 -0800313
Hans Boehm65c18a22020-01-03 23:37:13 +0000314 // Pretend to unlock monitor lock.
315 void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {}
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700316
Hans Boehm65c18a22020-01-03 23:37:13 +0000317 // Number of threads either waiting on the condition or waiting on a contended
318 // monitor acquisition. Prevents deflation.
319 std::atomic<size_t> num_waiters_;
Mathieu Chartier46bc7782013-11-12 17:03:02 -0800320
Hans Boehm65c18a22020-01-03 23:37:13 +0000321 // Which thread currently owns the lock? monitor_lock_ only keeps the tid.
322 // Only set while holding monitor_lock_. Non-locking readers only use it to
323 // compare to self or for debugging.
324 std::atomic<Thread*> owner_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700325
Hans Boehm65c18a22020-01-03 23:37:13 +0000326 // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once.
327 unsigned int lock_count_ GUARDED_BY(monitor_lock_);
328
329 // Owner's recursive lock depth is given by monitor_lock_.GetDepth().
Elliott Hughes5f791332011-09-15 17:45:30 -0700330
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700331 // What object are we part of. This is a weak root. Do not access
332 // this directly, use GetObject() to read it so it will be guarded
333 // by a read barrier.
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700334 GcRoot<mirror::Object> obj_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700335
Brian Carlstrom4514d3c2011-10-21 17:01:31 -0700336 // Threads currently waiting on this monitor.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700337 Thread* wait_set_ GUARDED_BY(monitor_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700338
Charles Mungerc665d632018-11-06 16:20:13 +0000339 // Threads that were waiting on this monitor, but are now contending on it.
340 Thread* wake_set_ GUARDED_BY(monitor_lock_);
341
Mathieu Chartier4e6a31e2013-10-31 10:35:05 -0700342 // Stored object hash code, generated lazily by GetHashCode.
343 AtomicInteger hash_code_;
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700344
Hans Boehm65c18a22020-01-03 23:37:13 +0000345 // Data structure used to remember the method and dex pc of a recent holder of the
346 // lock. Used for tracing and contention reporting. Setting these is expensive, since it
347 // involves a partial stack walk. We set them only as follows, to minimize the cost:
348 // - If tracing is enabled, they are needed immediately when we first notice contention, so we
349 // set them unconditionally when a monitor is acquired.
350 // - If contention reporting is enabled, we use the lock_owner_request_ field to have the
351 // contending thread request them. The current owner then sets them when releasing the monitor,
352 // making them available when the contending thread acquires the monitor.
353 // - If both are enabled, we blindly do both. This usually prevents us from switching between
354 // reporting the end and beginning of critical sections for contention logging when tracing is
355 // enabled. We expect that tracing overhead is normally much higher than for contention
356 // logging, so the added cost should be small. It also minimizes glitches when enabling and
357 // disabling traces.
358 // We're tolerant of missing information. E.g. when tracing is initially turned on, we may
359 // not have the lock holder information if the holder acquired the lock with tracing off.
360 //
361 // We make this data unconditionally atomic; for contention logging all accesses are in fact
362 // protected by the monitor, but for tracing, reads are not. Writes are always
363 // protected by the monitor.
364 //
365 // The fields are always accessed without memory ordering. We store a checksum, and reread if
366 // the checksum doesn't correspond to the values. This results in values that are correct with
367 // very high probability, but not certainty.
368 //
369 // If we need lock_owner information for a certain thread for contenion logging, we store its
370 // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_,
371 // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the
372 // monitor.
373 //
374 // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively
375 // in the process of establishing one of those states. Only one thread at a time can be actively
376 // establishing such a state, since writes are protected by the monitor.
377 std::atomic<Thread*> lock_owner_; // *lock_owner_ may no longer exist!
378 std::atomic<ArtMethod*> lock_owner_method_;
379 std::atomic<uint32_t> lock_owner_dex_pc_;
380 std::atomic<uintptr_t> lock_owner_sum_;
381
382 // Request lock owner save method and dex_pc. Written asynchronously.
383 std::atomic<Thread*> lock_owner_request_;
384
385 // Compute method, dex pc, and tid "checksum".
386 uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t);
387
388 // Set owning method, dex pc, and tid. owner_ field is set and points to current thread.
389 void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t)
390 REQUIRES(monitor_lock_);
391
392 // Get owning method and dex pc for the given thread, if available.
393 void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t);
394
395 // Do the same, while holding the monitor. There are no concurrent updates.
396 void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
397 uint32_t thread_id)
398 REQUIRES(monitor_lock_);
399
400 // We never clear lock_owner method and dex pc. Since it often reflects
401 // ownership when we last detected contention, it may be inconsistent with owner_
402 // and not 100% reliable. For lock contention monitoring, in the absence of tracing,
403 // there is a small risk that the current owner may finish before noticing the request,
404 // or the information will be overwritten by another intervening request and monitor
405 // release, so it's also not 100% reliable. But if we report information at all, it
406 // should generally (modulo accidental checksum matches) pertain to to an acquisition of the
407 // right monitor by the right thread, so it's extremely unlikely to be seriously misleading.
408 // Since we track threads by a pointer to the Thread structure, there is a small chance we may
409 // confuse threads allocated at the same exact address, if a contending thread dies before
410 // we inquire about it.
411
412 // Check for and act on a pending lock_owner_request_
413 void CheckLockOwnerRequest(Thread* self)
414 REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700415
Ian Rogersef7d42f2014-01-06 12:55:46 -0800416 // The denser encoded version of this monitor as stored in the lock word.
417 MonitorId monitor_id_;
418
Andreas Gampe74240812014-04-17 10:35:09 -0700419#ifdef __LP64__
420 // Free list for monitor pool.
421 Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
422#endif
423
Elliott Hughesf327e072013-01-09 16:01:26 -0800424 friend class MonitorInfo;
Brian Carlstrom4514d3c2011-10-21 17:01:31 -0700425 friend class MonitorList;
Andreas Gampe74240812014-04-17 10:35:09 -0700426 friend class MonitorPool;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800427 friend class mirror::Object;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700428 DISALLOW_COPY_AND_ASSIGN(Monitor);
429};
430
431class MonitorList {
432 public:
433 MonitorList();
434 ~MonitorList();
435
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700436 void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700437
Mathieu Chartier97509952015-07-13 14:35:43 -0700438 void SweepMonitorList(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700439 REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700440 void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
441 void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700442 void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
Mathieu Chartier48ab6872014-06-24 11:21:59 -0700443 // Returns how many monitors were deflated.
Mathieu Chartier90443472015-07-16 20:32:27 -0700444 size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
Hans Boehm6fe97e02016-05-04 18:35:57 -0700445 size_t Size() REQUIRES(!monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700446
Mathieu Chartierbad02672014-08-25 13:08:22 -0700447 typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
448
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700449 private:
Ian Rogers5c597c62014-04-17 16:08:07 -0700450 // During sweeping we may free an object and on a separate thread have an object created using
451 // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
452 // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
453 // the object wasn't marked when sweeping began.
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700454 bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700455 Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700456 ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
Mathieu Chartierbad02672014-08-25 13:08:22 -0700457 Monitors list_ GUARDED_BY(monitor_list_lock_);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700458
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700459 friend class Monitor;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700460 DISALLOW_COPY_AND_ASSIGN(MonitorList);
Elliott Hughes5f791332011-09-15 17:45:30 -0700461};
462
Elliott Hughesf327e072013-01-09 16:01:26 -0800463// Collects information about the current state of an object's monitor.
464// This is very unsafe, and must only be called when all threads are suspended.
465// For use only by the JDWP implementation.
466class MonitorInfo {
467 public:
Andreas Gamped9911ee2017-03-27 13:27:24 -0700468 MonitorInfo() : owner_(nullptr), entry_count_(0) {}
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700469 MonitorInfo(const MonitorInfo&) = default;
470 MonitorInfo& operator=(const MonitorInfo&) = default;
Vladimir Markof52d92f2019-03-29 12:33:02 +0000471 explicit MonitorInfo(ObjPtr<mirror::Object> o) REQUIRES(Locks::mutator_lock_);
Elliott Hughesf327e072013-01-09 16:01:26 -0800472
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700473 Thread* owner_;
474 size_t entry_count_;
475 std::vector<Thread*> waiters_;
Elliott Hughesf327e072013-01-09 16:01:26 -0800476};
477
Elliott Hughes5f791332011-09-15 17:45:30 -0700478} // namespace art
479
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700480#endif // ART_RUNTIME_MONITOR_H_