blob: 4521a22c6fee49f05cb2caf43fc093e39ce8fbed [file] [log] [blame]
Ian Rogersef7d42f2014-01-06 12:55:46 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MONITOR_POOL_H_
18#define ART_RUNTIME_MONITOR_POOL_H_
19
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070020#include "monitor.h"
21
Mathieu Chartierbad02672014-08-25 13:08:22 -070022#include "base/allocator.h"
Ian Rogers44d6ff12014-03-06 23:11:11 -080023#ifdef __LP64__
Ian Rogersef7d42f2014-01-06 12:55:46 -080024#include <stdint.h>
David Sehrc431b9d2018-03-02 12:01:51 -080025#include "base/atomic.h"
Ian Rogers44d6ff12014-03-06 23:11:11 -080026#include "runtime.h"
Andreas Gampe74240812014-04-17 10:35:09 -070027#else
28#include "base/stl_util.h" // STLDeleteElements
Ian Rogers44d6ff12014-03-06 23:11:11 -080029#endif
30
Ian Rogersef7d42f2014-01-06 12:55:46 -080031namespace art {
32
33// Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the
34// monitor id loses the alignment bits of the Monitor*.
35class MonitorPool {
36 public:
37 static MonitorPool* Create() {
38#ifndef __LP64__
39 return nullptr;
40#else
41 return new MonitorPool();
42#endif
43 }
44
Vladimir Markof52d92f2019-03-29 12:33:02 +000045 static Monitor* CreateMonitor(Thread* self,
46 Thread* owner,
47 ObjPtr<mirror::Object> obj,
48 int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070049 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe74240812014-04-17 10:35:09 -070050#ifndef __LP64__
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080051 Monitor* mon = new Monitor(self, owner, obj, hash_code);
52 DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
53 return mon;
Andreas Gampe74240812014-04-17 10:35:09 -070054#else
55 return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code);
56#endif
57 }
58
59 static void ReleaseMonitor(Thread* self, Monitor* monitor) {
60#ifndef __LP64__
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070061 UNUSED(self);
Andreas Gampe74240812014-04-17 10:35:09 -070062 delete monitor;
63#else
64 GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
65#endif
66 }
67
Mathieu Chartierbad02672014-08-25 13:08:22 -070068 static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
Andreas Gampe74240812014-04-17 10:35:09 -070069#ifndef __LP64__
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070070 UNUSED(self);
Andreas Gampe74240812014-04-17 10:35:09 -070071 STLDeleteElements(monitors);
72#else
73 GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
74#endif
75 }
76
Ian Rogersef7d42f2014-01-06 12:55:46 -080077 static Monitor* MonitorFromMonitorId(MonitorId mon_id) {
78#ifndef __LP64__
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080079 return reinterpret_cast<Monitor*>(mon_id << LockWord::kMonitorIdAlignmentShift);
Ian Rogersef7d42f2014-01-06 12:55:46 -080080#else
Andreas Gampe74240812014-04-17 10:35:09 -070081 return GetMonitorPool()->LookupMonitor(mon_id);
Ian Rogersef7d42f2014-01-06 12:55:46 -080082#endif
83 }
84
85 static MonitorId MonitorIdFromMonitor(Monitor* mon) {
86#ifndef __LP64__
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080087 return reinterpret_cast<MonitorId>(mon) >> LockWord::kMonitorIdAlignmentShift;
Ian Rogersef7d42f2014-01-06 12:55:46 -080088#else
89 return mon->GetMonitorId();
90#endif
91 }
92
Andreas Gampe74240812014-04-17 10:35:09 -070093 static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
Ian Rogersef7d42f2014-01-06 12:55:46 -080094#ifndef __LP64__
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070095 UNUSED(self);
Ian Rogersef7d42f2014-01-06 12:55:46 -080096 return MonitorIdFromMonitor(mon);
97#else
Andreas Gampe74240812014-04-17 10:35:09 -070098 return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
Ian Rogersef7d42f2014-01-06 12:55:46 -080099#endif
100 }
101
Andreas Gampe74240812014-04-17 10:35:09 -0700102 static MonitorPool* GetMonitorPool() {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800103#ifndef __LP64__
Andreas Gampe74240812014-04-17 10:35:09 -0700104 return nullptr;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800105#else
Andreas Gampe74240812014-04-17 10:35:09 -0700106 return Runtime::Current()->GetMonitorPool();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800107#endif
108 }
109
Andreas Gampe057134b2016-03-10 08:33:45 -0800110 ~MonitorPool() {
111#ifdef __LP64__
112 FreeInternal();
113#endif
114 }
115
Ian Rogersef7d42f2014-01-06 12:55:46 -0800116 private:
117#ifdef __LP64__
Andreas Gampe74240812014-04-17 10:35:09 -0700118 // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
119 // analysis.
120 MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800121
Mathieu Chartier90443472015-07-16 20:32:27 -0700122 void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800123
Andreas Gampe057134b2016-03-10 08:33:45 -0800124 // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed,
125 // so ignore thead-safety analysis.
126 void FreeInternal() NO_THREAD_SAFETY_ANALYSIS;
127
Vladimir Markof52d92f2019-03-29 12:33:02 +0000128 Monitor* CreateMonitorInPool(Thread* self,
129 Thread* owner,
130 ObjPtr<mirror::Object> obj,
131 int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700132 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800133
Andreas Gampe74240812014-04-17 10:35:09 -0700134 void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
Mathieu Chartierbad02672014-08-25 13:08:22 -0700135 void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800136
Hans Boehma319f4d2016-04-27 15:04:24 -0700137 // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_
138 // data structure are read-only once we get here. Updates happen-before this call because
139 // the lock word was stored with release semantics and we read it with acquire semantics to
140 // retrieve the id.
Andreas Gampe74240812014-04-17 10:35:09 -0700141 Monitor* LookupMonitor(MonitorId mon_id) {
142 size_t offset = MonitorIdToOffset(mon_id);
143 size_t index = offset / kChunkSize;
Hans Boehma319f4d2016-04-27 15:04:24 -0700144 size_t top_index = index / kMaxListSize;
145 size_t list_index = index % kMaxListSize;
Andreas Gampe74240812014-04-17 10:35:09 -0700146 size_t offset_in_chunk = offset % kChunkSize;
Hans Boehma319f4d2016-04-27 15:04:24 -0700147 uintptr_t base = monitor_chunks_[top_index][list_index];
Andreas Gampe74240812014-04-17 10:35:09 -0700148 return reinterpret_cast<Monitor*>(base + offset_in_chunk);
149 }
Ian Rogersef7d42f2014-01-06 12:55:46 -0800150
Andreas Gampe74240812014-04-17 10:35:09 -0700151 static bool IsInChunk(uintptr_t base_addr, Monitor* mon) {
152 uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon);
153 return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
154 }
155
Andreas Gampe74240812014-04-17 10:35:09 -0700156 MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
157 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
Hans Boehma319f4d2016-04-27 15:04:24 -0700158 for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
159 for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
160 if (j >= num_chunks_ && i == current_chunk_list_index_) {
161 break;
162 }
163 uintptr_t chunk_addr = monitor_chunks_[i][j];
164 if (IsInChunk(chunk_addr, mon)) {
165 return OffsetToMonitorId(
166 reinterpret_cast<uintptr_t>(mon) - chunk_addr
167 + i * (kMaxListSize * kChunkSize) + j * kChunkSize);
168 }
Andreas Gampe74240812014-04-17 10:35:09 -0700169 }
170 }
171 LOG(FATAL) << "Did not find chunk that contains monitor.";
172 return 0;
173 }
174
Hans Boehma319f4d2016-04-27 15:04:24 -0700175 static constexpr size_t MonitorIdToOffset(MonitorId id) {
Andreas Gampe74240812014-04-17 10:35:09 -0700176 return id << 3;
177 }
178
Hans Boehma319f4d2016-04-27 15:04:24 -0700179 static constexpr MonitorId OffsetToMonitorId(size_t offset) {
Andreas Gampe74240812014-04-17 10:35:09 -0700180 return static_cast<MonitorId>(offset >> 3);
181 }
182
Hans Boehma319f4d2016-04-27 15:04:24 -0700183 static constexpr size_t ChunkListCapacity(size_t index) {
184 return kInitialChunkStorage << index;
185 }
186
Andreas Gampe74240812014-04-17 10:35:09 -0700187 // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
188 static constexpr size_t kMonitorAlignment = 8;
189 // Size of a monitor, rounded up to a multiple of alignment.
190 static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) &
191 -kMonitorAlignment;
192 // As close to a page as we can get seems a good start.
193 static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize;
194 // Chunk size that is referenced in the id. We can collapse this to the actually used storage
195 // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions.
196 static constexpr size_t kChunkSize = kPageSize;
Hans Boehma319f4d2016-04-27 15:04:24 -0700197 static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2");
198 // The number of chunks of storage that can be referenced by the initial chunk list.
199 // The total number of usable monitor chunks is typically 255 times this number, so it
200 // should be large enough that we don't run out. We run out of address bits if it's > 512.
201 // Currently we set it a bit smaller, to save half a page per process. We make it tiny in
202 // debug builds to catch growth errors. The only value we really expect to tune.
203 static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 1U : 256U;
204 static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2");
205 // The number of lists, each containing pointers to storage chunks.
206 static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8.
207 static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2");
208 static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1);
209 // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from
210 // the 3 bit alignment constraint on monitors:
211 static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize),
212 "Monitor id bits don't fit");
213 static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2");
Andreas Gampe74240812014-04-17 10:35:09 -0700214
Hans Boehma319f4d2016-04-27 15:04:24 -0700215 // Array of pointers to lists (again arrays) of pointers to chunks containing monitors.
216 // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks.
217 // Each subsequent list as twice as large as the preceding one.
218 // Monitor Ids are interpreted as follows:
219 // Top 3 bits (of 28): index into monitor_chunks_.
220 // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i].
221 // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment.
222 // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of
223 // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list
224 // contains 64K entries, and we make full use of the available index space. With a
225 // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors.
226 // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ .
227 // No field in this entire data structure is ever updated once a monitor id whose lookup
228 // requires it has been made visible to another thread. Thus readers never race with
229 // updates, in spite of the fact that they acquire no locks.
230 uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* .
231 // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks.
232 size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
233 // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far.
Andreas Gampe74240812014-04-17 10:35:09 -0700234 size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
Hans Boehma319f4d2016-04-27 15:04:24 -0700235 // After the initial allocation, this is always equal to
236 // ChunkListCapacity(current_chunk_list_index_).
237 size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
Andreas Gampe74240812014-04-17 10:35:09 -0700238
Ian Rogers13735952014-10-08 12:43:28 -0700239 typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
Mathieu Chartierbad02672014-08-25 13:08:22 -0700240 Allocator allocator_;
241
Andreas Gampe74240812014-04-17 10:35:09 -0700242 // Start of free list of monitors.
243 // Note: these point to the right memory regions, but do *not* denote initialized objects.
244 Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800245#endif
246};
247
248} // namespace art
249
250#endif // ART_RUNTIME_MONITOR_POOL_H_