blob: cf5934b6a093a2363092aea041c924a97d0a11c9 [file] [log] [blame]
Ian Rogersef7d42f2014-01-06 12:55:46 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "monitor_pool.h"
18
Andreas Gampe57943812017-12-06 21:39:13 -080019#include "base/logging.h" // For VLOG.
Ian Rogersef7d42f2014-01-06 12:55:46 -080020#include "base/mutex-inl.h"
21#include "monitor.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070022#include "thread-current-inl.h"
Ian Rogersef7d42f2014-01-06 12:55:46 -080023
24namespace art {
25
Andreas Gampe74240812014-04-17 10:35:09 -070026namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080027class Object;
Andreas Gampe74240812014-04-17 10:35:09 -070028} // namespace mirror
29
30MonitorPool::MonitorPool()
Hans Boehma319f4d2016-04-27 15:04:24 -070031 : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0),
32 first_free_(nullptr) {
33 for (size_t i = 0; i < kMaxChunkLists; ++i) {
34 monitor_chunks_[i] = nullptr; // Not absolutely required, but ...
35 }
Andreas Gampe74240812014-04-17 10:35:09 -070036 AllocateChunk(); // Get our first chunk.
Ian Rogersef7d42f2014-01-06 12:55:46 -080037}
38
Andreas Gampe74240812014-04-17 10:35:09 -070039// Assumes locks are held appropriately when necessary.
40// We do not need a lock in the constructor, but we need one when in CreateMonitorInPool.
41void MonitorPool::AllocateChunk() {
42 DCHECK(first_free_ == nullptr);
Ian Rogersef7d42f2014-01-06 12:55:46 -080043
Hans Boehma319f4d2016-04-27 15:04:24 -070044 // Do we need to allocate another chunk list?
45 if (num_chunks_ == current_chunk_list_capacity_) {
46 if (current_chunk_list_capacity_ != 0U) {
47 ++current_chunk_list_index_;
48 CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors";
49 VLOG(monitor) << "Expanding to capacity "
50 << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage;
51 } // else we're initializing
52 current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_);
53 uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_]();
54 DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr);
55 monitor_chunks_[current_chunk_list_index_] = new_list;
56 num_chunks_ = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080057 }
Andreas Gampe74240812014-04-17 10:35:09 -070058
59 // Allocate the chunk.
Mathieu Chartierbad02672014-08-25 13:08:22 -070060 void* chunk = allocator_.allocate(kChunkSize);
Andreas Gampe74240812014-04-17 10:35:09 -070061 // Check we allocated memory.
62 CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk));
63 // Check it is aligned as we need it.
64 CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
65
66 // Add the chunk.
Hans Boehma319f4d2016-04-27 15:04:24 -070067 monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast<uintptr_t>(chunk);
Andreas Gampe74240812014-04-17 10:35:09 -070068 num_chunks_++;
69
70 // Set up the free list
71 Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) +
72 (kChunkCapacity - 1) * kAlignedMonitorSize);
73 last->next_free_ = nullptr;
74 // Eagerly compute id.
Hans Boehma319f4d2016-04-27 15:04:24 -070075 last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize)
76 + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize);
Andreas Gampe74240812014-04-17 10:35:09 -070077 for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
78 Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
79 kAlignedMonitorSize);
80 before->next_free_ = last;
81 // Derive monitor_id from last.
82 before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) -
83 kAlignedMonitorSize);
84
85 last = before;
86 }
87 DCHECK(last == reinterpret_cast<Monitor*>(chunk));
88 first_free_ = last;
Ian Rogersef7d42f2014-01-06 12:55:46 -080089}
90
Andreas Gampe057134b2016-03-10 08:33:45 -080091void MonitorPool::FreeInternal() {
92 // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock.
Hans Boehma319f4d2016-04-27 15:04:24 -070093 DCHECK_NE(current_chunk_list_capacity_, 0UL);
94 for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
95 DCHECK_NE(monitor_chunks_[i], static_cast<uintptr_t*>(nullptr));
96 for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
97 if (i < current_chunk_list_index_ || j < num_chunks_) {
98 DCHECK_NE(monitor_chunks_[i][j], 0U);
99 allocator_.deallocate(reinterpret_cast<uint8_t*>(monitor_chunks_[i][j]), kChunkSize);
100 } else {
101 DCHECK_EQ(monitor_chunks_[i][j], 0U);
102 }
Andreas Gampe057134b2016-03-10 08:33:45 -0800103 }
Hans Boehma319f4d2016-04-27 15:04:24 -0700104 delete[] monitor_chunks_[i];
Andreas Gampe057134b2016-03-10 08:33:45 -0800105 }
Andreas Gampe057134b2016-03-10 08:33:45 -0800106}
107
Andreas Gampe74240812014-04-17 10:35:09 -0700108Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
109 int32_t hash_code)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700110 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe74240812014-04-17 10:35:09 -0700111 // We are gonna allocate, so acquire the writer lock.
112 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
113
114 // Enough space, or need to resize?
115 if (first_free_ == nullptr) {
Mathieu Chartier2c265012014-08-05 18:15:56 -0700116 VLOG(monitor) << "Allocating a new chunk.";
Andreas Gampe74240812014-04-17 10:35:09 -0700117 AllocateChunk();
118 }
119
120 Monitor* mon_uninitialized = first_free_;
121 first_free_ = first_free_->next_free_;
122
123 // Pull out the id which was preinitialized.
124 MonitorId id = mon_uninitialized->monitor_id_;
125
126 // Initialize it.
127 Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id);
128
129 return monitor;
130}
131
132void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) {
133 // Might be racy with allocation, so acquire lock.
134 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
135
136 // Keep the monitor id. Don't trust it's not cleared.
137 MonitorId id = monitor->monitor_id_;
138
139 // Call the destructor.
140 // TODO: Exception safety?
141 monitor->~Monitor();
142
143 // Add to the head of the free list.
144 monitor->next_free_ = first_free_;
145 first_free_ = monitor;
146
147 // Rewrite monitor id.
148 monitor->monitor_id_ = id;
149}
150
Mathieu Chartierbad02672014-08-25 13:08:22 -0700151void MonitorPool::ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors) {
Andreas Gampe74240812014-04-17 10:35:09 -0700152 for (Monitor* mon : *monitors) {
153 ReleaseMonitorToPool(self, mon);
154 }
Ian Rogersef7d42f2014-01-06 12:55:46 -0800155}
156
157} // namespace art