blob: 2e4475f22595058dcd187b2387472a6ee8ef5c67 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "base/mutex-inl.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "gc/space/memory_tool_settings.h"
Vladimir Marko3481ba22015-04-13 12:22:36 +010022#include "mem_map.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080023#include "mirror/class-inl.h"
24#include "mirror/object.h"
25#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080026#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028
29#include <map>
30#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070031#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070032#include <vector>
33
34namespace art {
35namespace gc {
36namespace allocator {
37
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070038static constexpr bool kUsePrefetchDuringAllocRun = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070039static constexpr bool kPrefetchNewRunDataByZeroing = false;
40static constexpr size_t kPrefetchStride = 64;
41
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
43size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
44size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
45size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070047size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080051RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Evgenii Stepanov1e133742015-05-20 12:30:59 -070052 PageReleaseMode page_release_mode, bool running_on_memory_tool,
Andreas Gamped7576322014-10-24 22:13:45 -070053 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070054 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080057 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070059 page_release_size_threshold_(page_release_size_threshold),
Evgenii Stepanov1e133742015-05-20 12:30:59 -070060 is_running_on_memory_tool_(running_on_memory_tool) {
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080061 DCHECK_ALIGNED(base, kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -070062 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
63 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080064 CHECK_LE(capacity, max_capacity);
Roland Levillain14d90572015-07-16 10:52:26 +010065 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080066 // Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
67 if (!kMadviseZeroes) {
68 memset(base_, 0, max_capacity);
69 }
70 CHECK_EQ(madvise(base_, max_capacity, MADV_DONTNEED), 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070071 if (!initialized_) {
72 Initialize();
73 }
74 VLOG(heap) << "RosAlloc base="
75 << std::hex << (intptr_t)base_ << ", end="
76 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080077 << ", capacity=" << std::dec << capacity_
78 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070080 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070081 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070082 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070083 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070084 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080085 DCHECK_EQ(footprint_, capacity_);
86 size_t num_of_pages = footprint_ / kPageSize;
87 size_t max_num_of_pages = max_capacity_ / kPageSize;
88 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000089 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
90 RoundUp(max_num_of_pages, kPageSize),
91 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070092 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080093 page_map_ = page_map_mem_map_->Begin();
94 page_map_size_ = num_of_pages;
95 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070096 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070097 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
98 if (kIsDebugBuild) {
99 free_pages->magic_num_ = kMagicNumFree;
100 }
101 free_pages->SetByteSize(this, capacity_);
102 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800103 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700104 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800105 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700106 free_page_runs_.insert(free_pages);
107 if (kTraceRosAlloc) {
108 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
109 << reinterpret_cast<intptr_t>(free_pages)
110 << " into free_page_runs_";
111 }
112}
113
Mathieu Chartier661974a2014-01-09 11:23:53 -0800114RosAlloc::~RosAlloc() {
115 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
116 delete size_bracket_locks_[i];
117 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700118 if (is_running_on_memory_tool_) {
119 MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
120 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800121}
122
Ian Rogers13735952014-10-08 12:43:28 -0700123void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 lock_.AssertHeld(self);
125 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700126 FreePageRun* res = nullptr;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700127 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 // Find the lowest address free page run that's large enough.
129 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
130 FreePageRun* fpr = *it;
131 DCHECK(fpr->IsFree());
132 size_t fpr_byte_size = fpr->ByteSize(this);
133 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
134 if (req_byte_size <= fpr_byte_size) {
135 // Found one.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100136 it = free_page_runs_.erase(it);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700137 if (kTraceRosAlloc) {
138 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
139 << std::hex << reinterpret_cast<intptr_t>(fpr)
140 << " from free_page_runs_";
141 }
142 if (req_byte_size < fpr_byte_size) {
143 // Split.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100144 FreePageRun* remainder =
145 reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700146 if (kIsDebugBuild) {
147 remainder->magic_num_ = kMagicNumFree;
148 }
149 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
150 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
151 // Don't need to call madvise on remainder here.
152 free_page_runs_.insert(remainder);
153 if (kTraceRosAlloc) {
154 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
155 << reinterpret_cast<intptr_t>(remainder)
156 << " into free_page_runs_";
157 }
158 fpr->SetByteSize(this, req_byte_size);
159 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
160 }
161 res = fpr;
162 break;
163 } else {
164 ++it;
165 }
166 }
167
168 // Failed to allocate pages. Grow the footprint, if possible.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700169 if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
170 FreePageRun* last_free_page_run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700171 size_t last_free_page_run_size;
172 auto it = free_page_runs_.rbegin();
173 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
174 // There is a free page run at the end.
175 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700176 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700177 last_free_page_run_size = last_free_page_run->ByteSize(this);
178 } else {
179 // There is no free page run at the end.
180 last_free_page_run_size = 0;
181 }
182 DCHECK_LT(last_free_page_run_size, req_byte_size);
183 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
184 // If we grow the heap, we can allocate it.
185 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
186 capacity_ - footprint_);
187 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
188 size_t new_footprint = footprint_ + increment;
189 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800190 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700191 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800192 page_map_size_ = new_num_of_pages;
193 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700194 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800195 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700196 if (last_free_page_run_size > 0) {
197 // There was a free page run at the end. Expand its size.
198 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
199 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
200 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700201 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700202 } else {
203 // Otherwise, insert a new free page run at the end.
204 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
205 if (kIsDebugBuild) {
206 new_free_page_run->magic_num_ = kMagicNumFree;
207 }
208 new_free_page_run->SetByteSize(this, increment);
209 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
210 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700211 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700212 if (kTraceRosAlloc) {
213 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
214 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
215 << " into free_page_runs_";
216 }
217 }
218 DCHECK_LE(footprint_ + increment, capacity_);
219 if (kTraceRosAlloc) {
220 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
221 << footprint_ << " to " << new_footprint;
222 }
223 footprint_ = new_footprint;
224
225 // And retry the last free page run.
226 it = free_page_runs_.rbegin();
227 DCHECK(it != free_page_runs_.rend());
228 FreePageRun* fpr = *it;
229 if (kIsDebugBuild && last_free_page_run_size > 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700230 DCHECK(last_free_page_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700231 DCHECK_EQ(last_free_page_run, fpr);
232 }
233 size_t fpr_byte_size = fpr->ByteSize(this);
234 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
235 DCHECK_LE(req_byte_size, fpr_byte_size);
236 free_page_runs_.erase(fpr);
237 if (kTraceRosAlloc) {
238 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
239 << " from free_page_runs_";
240 }
241 if (req_byte_size < fpr_byte_size) {
242 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700243 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700244 if (kIsDebugBuild) {
245 remainder->magic_num_ = kMagicNumFree;
246 }
247 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
248 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
249 free_page_runs_.insert(remainder);
250 if (kTraceRosAlloc) {
251 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
252 << reinterpret_cast<intptr_t>(remainder)
253 << " into free_page_runs_";
254 }
255 fpr->SetByteSize(this, req_byte_size);
256 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
257 }
258 res = fpr;
259 }
260 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700261 if (LIKELY(res != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700262 // Update the page map.
263 size_t page_map_idx = ToPageMapIndex(res);
264 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700265 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700266 }
267 switch (page_map_type) {
268 case kPageMapRun:
269 page_map_[page_map_idx] = kPageMapRun;
270 for (size_t i = 1; i < num_pages; i++) {
271 page_map_[page_map_idx + i] = kPageMapRunPart;
272 }
273 break;
274 case kPageMapLargeObject:
275 page_map_[page_map_idx] = kPageMapLargeObject;
276 for (size_t i = 1; i < num_pages; i++) {
277 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
278 }
279 break;
280 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700281 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700282 break;
283 }
284 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700285 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286 memset(res, 0, kPageSize);
287 }
288 if (kTraceRosAlloc) {
289 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
290 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
291 << "(" << std::dec << (num_pages * kPageSize) << ")";
292 }
293 return res;
294 }
295
296 // Fail.
297 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700298 LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700299 }
300 return nullptr;
301}
302
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700303size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700304 lock_.AssertHeld(self);
305 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700306 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700307 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700309 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700310 switch (pm_type) {
311 case kPageMapRun:
312 pm_part_type = kPageMapRunPart;
313 break;
314 case kPageMapLargeObject:
315 pm_part_type = kPageMapLargeObjectPart;
316 break;
317 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700318 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700319 << static_cast<int>(pm_type) << ", ptr=" << std::hex
320 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700321 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700322 }
323 // Update the page map and count the number of pages.
324 size_t num_pages = 1;
325 page_map_[pm_idx] = kPageMapEmpty;
326 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800327 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700328 while (idx < end && page_map_[idx] == pm_part_type) {
329 page_map_[idx] = kPageMapEmpty;
330 num_pages++;
331 idx++;
332 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700333 const size_t byte_size = num_pages * kPageSize;
334 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700335 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700336 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
337 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700338 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
339 }
340 }
341 } else if (!DoesReleaseAllPages()) {
342 memset(ptr, 0, byte_size);
343 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700344
345 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700346 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700347 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700348 << "(" << std::dec << (num_pages * kPageSize) << ")";
349 }
350
351 // Turn it into a free run.
352 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
353 if (kIsDebugBuild) {
354 fpr->magic_num_ = kMagicNumFree;
355 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700356 fpr->SetByteSize(this, byte_size);
Roland Levillain14d90572015-07-16 10:52:26 +0100357 DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700358
359 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
360 if (!free_page_runs_.empty()) {
361 // Try to coalesce in the higher address direction.
362 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700363 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700364 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
365 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800366 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700367 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100368 for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) {
369 FreePageRun* h = *it;
370 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
371 if (kTraceRosAlloc) {
372 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
373 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
374 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
375 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
376 }
377 if (fpr->End(this) == h->Begin()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700378 if (kTraceRosAlloc) {
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100379 LOG(INFO) << "Success";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700380 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100381 // Clear magic num since this is no longer the start of a free page run.
382 if (kIsDebugBuild) {
383 h->magic_num_ = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700384 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100385 it = free_page_runs_.erase(it);
386 if (kTraceRosAlloc) {
387 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
388 << reinterpret_cast<intptr_t>(h)
389 << " from free_page_runs_";
390 }
391 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
392 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
393 } else {
394 // Not adjacent. Stop.
395 if (kTraceRosAlloc) {
396 LOG(INFO) << "Fail";
397 }
398 break;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700399 }
400 }
401 // Try to coalesce in the lower address direction.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100402 for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.begin(); ) {
403 --it;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700404
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100405 FreePageRun* l = *it;
406 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
407 if (kTraceRosAlloc) {
408 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
409 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
410 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
411 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
412 }
413 if (l->End(this) == fpr->Begin()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700414 if (kTraceRosAlloc) {
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100415 LOG(INFO) << "Success";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700416 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100417 it = free_page_runs_.erase(it);
418 if (kTraceRosAlloc) {
419 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
420 << reinterpret_cast<intptr_t>(l)
421 << " from free_page_runs_";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700422 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100423 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
424 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
425 // Clear magic num since this is no longer the start of a free page run.
426 if (kIsDebugBuild) {
427 fpr->magic_num_ = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700428 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100429 fpr = l;
430 } else {
431 // Not adjacent. Stop.
432 if (kTraceRosAlloc) {
433 LOG(INFO) << "Fail";
434 }
435 break;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700436 }
437 }
438 }
439
440 // Insert it.
441 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
442 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800443 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700444 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800445 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700446 free_page_runs_.insert(fpr);
447 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
448 if (kTraceRosAlloc) {
449 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
450 << " into free_page_runs_";
451 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700452 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700453}
454
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700455void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
456 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
457 DCHECK(bytes_allocated != nullptr);
458 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700459 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800460 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
461 void* r;
462 {
463 MutexLock mu(self, lock_);
464 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700465 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800466 if (UNLIKELY(r == nullptr)) {
467 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700468 LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800469 }
470 return nullptr;
471 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700472 const size_t total_bytes = num_pages * kPageSize;
473 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700474 *usable_size = total_bytes;
475 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800476 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800477 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
478 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
479 << "(" << std::dec << (num_pages * kPageSize) << ")";
480 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700481 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700482 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700483 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
484 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
485 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700486 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700487 }
488 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800489 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700490}
491
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700492size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700493 DCHECK_LE(base_, ptr);
494 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700495 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700496 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700497 {
498 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700499 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700500 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700501 if (kTraceRosAlloc) {
502 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
503 << ", page_map_entry=" << static_cast<int>(page_map_entry);
504 }
505 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700506 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700507 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700509 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700510 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700511 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700512 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700513 do {
514 --pm_idx;
515 DCHECK_LT(pm_idx, capacity_ / kPageSize);
516 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700517 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700518 case kPageMapRun:
519 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700520 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700521 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700522 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700523 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700524 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700525 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700526 }
527 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700528 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700529 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 }
531 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700532 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700533 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700534}
535
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700536size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700537 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700538 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700539}
540
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700541RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
542 RosAlloc::Run* new_run = nullptr;
543 {
544 MutexLock mu(self, lock_);
545 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
546 }
547 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700548 if (kIsDebugBuild) {
549 new_run->magic_num_ = kMagicNum;
550 }
551 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700552 DCHECK(!new_run->IsThreadLocal());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700553 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700554 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700555 // Take ownership of the cache lines if we are likely to be thread local run.
556 if (kPrefetchNewRunDataByZeroing) {
557 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
558 // since we end up dirtying zero pages which may have been madvised.
559 new_run->ZeroData();
560 } else {
561 const size_t num_of_slots = numOfSlots[idx];
562 const size_t bracket_size = bracketSizes[idx];
563 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700564 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700565 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
566 __builtin_prefetch(begin + i);
567 }
568 }
569 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700570 new_run->InitFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700571 }
572 return new_run;
573}
574
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700575RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
576 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700577 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700578 if (!bt->empty()) {
579 // If there's one, use it as the current run.
580 auto it = bt->begin();
581 Run* non_full_run = *it;
582 DCHECK(non_full_run != nullptr);
583 DCHECK(!non_full_run->IsThreadLocal());
584 bt->erase(it);
585 return non_full_run;
586 }
587 // If there's none, allocate a new run and use it as the current run.
588 return AllocRun(self, idx);
589}
590
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700591inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700592 Run* current_run = current_runs_[idx];
593 DCHECK(current_run != nullptr);
594 void* slot_addr = current_run->AllocSlot();
595 if (UNLIKELY(slot_addr == nullptr)) {
596 // The current run got full. Try to refill it.
597 DCHECK(current_run->IsFull());
598 if (kIsDebugBuild && current_run != dedicated_full_run_) {
599 full_runs_[idx].insert(current_run);
600 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700601 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
602 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700603 << " into full_runs_[" << std::dec << idx << "]";
604 }
605 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
606 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
607 }
608 current_run = RefillRun(self, idx);
609 if (UNLIKELY(current_run == nullptr)) {
610 // Failed to allocate a new run, make sure that it is the dedicated full run.
611 current_runs_[idx] = dedicated_full_run_;
612 return nullptr;
613 }
614 DCHECK(current_run != nullptr);
615 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
616 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
617 current_run->SetIsThreadLocal(false);
618 current_runs_[idx] = current_run;
619 DCHECK(!current_run->IsFull());
620 slot_addr = current_run->AllocSlot();
621 // Must succeed now with a new run.
622 DCHECK(slot_addr != nullptr);
623 }
624 return slot_addr;
625}
626
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700627void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
628 size_t* usable_size,
629 size_t* bytes_tl_bulk_allocated) {
630 DCHECK(bytes_allocated != nullptr);
631 DCHECK(usable_size != nullptr);
632 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700633 DCHECK_LE(size, kLargeSizeThreshold);
634 size_t bracket_size;
635 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700636 Locks::mutator_lock_->AssertExclusiveHeld(self);
637 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
638 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700639 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700640 *usable_size = bracket_size;
641 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700642 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700643 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700644 return slot_addr;
645}
646
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700647void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
648 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
649 DCHECK(bytes_allocated != nullptr);
650 DCHECK(usable_size != nullptr);
651 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700652 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700653 size_t bracket_size;
654 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700655 void* slot_addr;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700656 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700657 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700658 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700659 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700660 if (kIsDebugBuild) {
661 // Need the lock to prevent race conditions.
662 MutexLock mu(self, *size_bracket_locks_[idx]);
663 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
664 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
665 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700666 DCHECK(thread_local_run != nullptr);
667 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700668 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700669 // The allocation must fail if the run is invalid.
670 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
671 << "allocated from an invalid run";
672 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700673 // The run got full. Try to free slots.
674 DCHECK(thread_local_run->IsFull());
675 MutexLock mu(self, *size_bracket_locks_[idx]);
676 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700677 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700678 if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700679 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700680 // Some slot got freed. Keep it.
681 DCHECK(!thread_local_run->IsFull());
682 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700683 } else {
684 // No slots got freed. Try to refill the thread-local run.
685 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700686 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700687 thread_local_run->SetIsThreadLocal(false);
688 if (kIsDebugBuild) {
689 full_runs_[idx].insert(thread_local_run);
690 if (kTraceRosAlloc) {
691 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
692 << reinterpret_cast<intptr_t>(thread_local_run)
693 << " into full_runs_[" << std::dec << idx << "]";
694 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700695 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700696 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
697 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700699
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700700 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700701 if (UNLIKELY(thread_local_run == nullptr)) {
702 self->SetRosAllocRun(idx, dedicated_full_run_);
703 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700704 }
705 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
706 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700707 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700708 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700709 DCHECK(!thread_local_run->IsFull());
710 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700711 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700712 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700713 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700714 // Account for all the free slots in the new or refreshed thread local run.
715 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700716 slot_addr = thread_local_run->AllocSlot();
717 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700718 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700719 } else {
720 // The slot is already counted. Leave it as is.
721 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700722 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700723 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700724 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700725 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
726 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700727 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
728 << "(" << std::dec << (bracket_size) << ")";
729 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700730 *bytes_allocated = bracket_size;
731 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700732 } else {
733 // Use the (shared) current run.
734 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700735 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700737 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
738 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700739 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
740 << "(" << std::dec << (bracket_size) << ")";
741 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700742 if (LIKELY(slot_addr != nullptr)) {
743 *bytes_allocated = bracket_size;
744 *usable_size = bracket_size;
745 *bytes_tl_bulk_allocated = bracket_size;
746 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700747 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700748 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700749 return slot_addr;
750}
751
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700752size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700753 DCHECK_EQ(run->magic_num_, kMagicNum);
754 DCHECK_LT(run, ptr);
755 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700756 const size_t idx = run->size_bracket_idx_;
757 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700758 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800759 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700760 if (kIsDebugBuild) {
761 run_was_full = run->IsFull();
762 }
763 if (kTraceRosAlloc) {
764 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
765 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700766 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700767 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700768 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700769 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
770 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700771 run->AddToThreadLocalFreeList(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700772 if (kTraceRosAlloc) {
773 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
774 << reinterpret_cast<intptr_t>(run);
775 }
776 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700777 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700778 }
779 // Free the slot in the run.
780 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700781 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700782 if (run->IsAllFree()) {
783 // It has just become completely free. Free the pages of this run.
784 std::set<Run*>::iterator pos = non_full_runs->find(run);
785 if (pos != non_full_runs->end()) {
786 non_full_runs->erase(pos);
787 if (kTraceRosAlloc) {
788 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
789 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
790 }
791 }
792 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700793 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700794 }
795 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
796 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700797 run->ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700798 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800799 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700800 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700801 }
802 } else {
803 // It is not completely free. If it wasn't the current run or
804 // already in the non-full run set (i.e., it was full) insert it
805 // into the non-full run set.
806 if (run != current_runs_[idx]) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700807 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Mathieu Chartier58553c72014-09-16 16:25:55 -0700808 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700809 if (pos == non_full_runs->end()) {
810 DCHECK(run_was_full);
811 DCHECK(full_runs->find(run) != full_runs->end());
812 if (kIsDebugBuild) {
813 full_runs->erase(run);
814 if (kTraceRosAlloc) {
815 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
816 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
817 }
818 }
819 non_full_runs->insert(run);
820 DCHECK(!run->IsFull());
821 if (kTraceRosAlloc) {
822 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
823 << reinterpret_cast<intptr_t>(run)
824 << " into non_full_runs_[" << std::dec << idx << "]";
825 }
826 }
827 }
828 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700829 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700830}
831
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700832template<bool kUseTail>
833std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
834 std::string free_list_str;
835 const uint8_t idx = size_bracket_idx_;
836 const size_t bracket_size = bracketSizes[idx];
837 for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
838 bool is_last = slot->Next() == nullptr;
839 uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
840 reinterpret_cast<uintptr_t>(FirstSlot());
841 DCHECK_EQ(slot_offset % bracket_size, 0U);
842 uintptr_t slot_idx = slot_offset / bracket_size;
843 if (!is_last) {
844 free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700845 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700846 free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700847 }
848 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700849 return free_list_str;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800850}
851
852std::string RosAlloc::Run::Dump() {
853 size_t idx = size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800854 std::ostringstream stream;
855 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
856 << "{ magic_num=" << static_cast<int>(magic_num_)
857 << " size_bracket_idx=" << idx
858 << " is_thread_local=" << static_cast<int>(is_thread_local_)
859 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700860 << " free_list=" << FreeListToStr(&free_list_)
861 << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
862 << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800863 << " }" << std::endl;
864 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700865}
866
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700867void RosAlloc::Run::FreeSlot(void* ptr) {
868 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700869 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700870 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700871 Slot* slot = ToSlot(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700872 // Zero out the memory.
873 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700874 memset(slot, 0, bracket_size);
875 free_list_.Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700876 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700877 LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
878 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700879 }
880}
881
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700882inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700883 DCHECK(IsThreadLocal());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700884 // Merge the thread local free list into the free list and clear the thread local free list.
Ian Rogers13735952014-10-08 12:43:28 -0700885 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700886 bool thread_local_free_list_size = thread_local_free_list_.Size();
887 const size_t size_before = free_list_.Size();
888 free_list_.Merge(&thread_local_free_list_);
889 const size_t size_after = free_list_.Size();
890 DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
891 DCHECK_LE(size_before, size_after);
892 *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
893 // Return true at least one slot was added to the free list.
894 return size_before < size_after;
895}
896
897inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
898 DCHECK(!IsThreadLocal());
899 // Merge the bulk free list into the free list and clear the bulk free list.
900 free_list_.Merge(&bulk_free_list_);
901}
902
903inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
904 DCHECK(IsThreadLocal());
905 // Merge the bulk free list into the thread local free list and clear the bulk free list.
906 thread_local_free_list_.Merge(&bulk_free_list_);
907}
908
909inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
910 DCHECK(IsThreadLocal());
911 AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
912}
913
914inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
915 return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
916}
917
918inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
919 SlotFreeList<true>* free_list,
920 const char* caller_name) {
921 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700922 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700923 Slot* slot = ToSlot(ptr);
924 memset(slot, 0, bracket_size);
925 free_list->Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700926 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700927 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
928 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700929 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700930 return bracket_size;
931}
932
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700933inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
934 DCHECK(IsAllFree());
Ian Rogers13735952014-10-08 12:43:28 -0700935 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700936 // Zero the slot header (next pointers).
937 for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
938 Slot* next_slot = slot->Next();
939 slot->Clear();
940 slot = next_slot;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700941 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700942 // Zero the header.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700943 memset(this, 0, headerSizes[idx]);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700944 // Check that the entire run is all zero.
945 if (kIsDebugBuild) {
946 const size_t size = numOfPages[idx] * kPageSize;
947 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
948 for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
949 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
950 }
951 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700952}
953
954inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -0700955 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700956 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700957 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
958}
959
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700960void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
961 void* arg) {
962 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -0700963 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700964 size_t num_slots = numOfSlots[idx];
965 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800966 DCHECK_EQ(slot_base + num_slots * bracket_size,
967 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700968 // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
969 // to find out and record which slots are free in the is_free array.
970 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
971 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
972 size_t slot_idx = SlotIndex(slot);
973 DCHECK_LT(slot_idx, num_slots);
974 is_free[slot_idx] = true;
975 }
976 if (IsThreadLocal()) {
977 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
978 size_t slot_idx = SlotIndex(slot);
979 DCHECK_LT(slot_idx, num_slots);
980 is_free[slot_idx] = true;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800981 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700982 }
983 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
984 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
985 if (!is_free[slot_idx]) {
986 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
987 } else {
988 handler(slot_addr, slot_addr + bracket_size, 0, arg);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700989 }
990 }
991}
992
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800993// If true, read the page map entries in BulkFree() without using the
994// lock for better performance, assuming that the existence of an
995// allocated chunk/pointer being freed in BulkFree() guarantees that
996// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700997static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800998
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700999size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1000 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001001 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001002 // Used only to test Free() as GC uses only BulkFree().
1003 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001004 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001005 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001006 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001007 }
1008
1009 WriterMutexLock wmu(self, bulk_free_lock_);
1010
1011 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001012 // size bracket locks. On host, unordered_set is faster than vector + flag.
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001013#ifdef ART_TARGET_ANDROID
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001014 std::vector<Run*> runs;
1015#else
Ian Rogers700a4022014-05-19 16:49:03 -07001016 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001017#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001018 for (size_t i = 0; i < num_ptrs; i++) {
1019 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001020 DCHECK_LE(base_, ptr);
1021 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001022 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001023 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001024 if (kReadPageMapEntryWithoutLockInBulkFree) {
1025 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001026 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001027 if (kTraceRosAlloc) {
1028 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1029 << std::dec << pm_idx
1030 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1031 }
1032 if (LIKELY(page_map_entry == kPageMapRun)) {
1033 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001034 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1035 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001036 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001037 do {
1038 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001039 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001040 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001041 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001042 } else if (page_map_entry == kPageMapLargeObject) {
1043 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001044 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001045 continue;
1046 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001047 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001048 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001049 } else {
1050 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001051 MutexLock mu(self, lock_);
1052 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001053 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001054 if (kTraceRosAlloc) {
1055 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1056 << std::dec << pm_idx
1057 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001058 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001059 if (LIKELY(page_map_entry == kPageMapRun)) {
1060 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1061 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1062 size_t pi = pm_idx;
1063 // Find the beginning of the run.
1064 do {
1065 --pi;
1066 DCHECK_LT(pi, capacity_ / kPageSize);
1067 } while (page_map_[pi] != kPageMapRun);
1068 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1069 } else if (page_map_entry == kPageMapLargeObject) {
1070 freed_bytes += FreePages(self, ptr, false);
1071 continue;
1072 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001073 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001074 }
1075 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001076 DCHECK(run != nullptr);
1077 DCHECK_EQ(run->magic_num_, kMagicNum);
1078 // Set the bit in the bulk free bit map.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001079 freed_bytes += run->AddToBulkFreeList(ptr);
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001080#ifdef ART_TARGET_ANDROID
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001081 if (!run->to_be_bulk_freed_) {
1082 run->to_be_bulk_freed_ = true;
1083 runs.push_back(run);
1084 }
1085#else
1086 runs.insert(run);
1087#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001088 }
1089
1090 // Now, iterate over the affected runs and update the alloc bit map
1091 // based on the bulk free bit map (for non-thread-local runs) and
1092 // union the bulk free bit map into the thread-local free bit map
1093 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001094 for (Run* run : runs) {
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001095#ifdef ART_TARGET_ANDROID
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001096 DCHECK(run->to_be_bulk_freed_);
1097 run->to_be_bulk_freed_ = false;
1098#endif
1099 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001100 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001101 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001102 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001103 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1104 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001105 run->MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001106 if (kTraceRosAlloc) {
1107 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1108 << std::hex << reinterpret_cast<intptr_t>(run);
1109 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001110 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001111 // A thread local run will be kept as a thread local even if
1112 // it's become all free.
1113 } else {
1114 bool run_was_full = run->IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001115 run->MergeBulkFreeListToFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001116 if (kTraceRosAlloc) {
1117 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1118 << reinterpret_cast<intptr_t>(run);
1119 }
1120 // Check if the run should be moved to non_full_runs_ or
1121 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001122 auto* non_full_runs = &non_full_runs_[idx];
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001123 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001124 if (run->IsAllFree()) {
1125 // It has just become completely free. Free the pages of the
1126 // run.
1127 bool run_was_current = run == current_runs_[idx];
1128 if (run_was_current) {
1129 DCHECK(full_runs->find(run) == full_runs->end());
1130 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1131 // If it was a current run, reuse it.
1132 } else if (run_was_full) {
1133 // If it was full, remove it from the full run set (debug
1134 // only.)
1135 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001136 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001137 DCHECK(pos != full_runs->end());
1138 full_runs->erase(pos);
1139 if (kTraceRosAlloc) {
1140 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1141 << reinterpret_cast<intptr_t>(run)
1142 << " from full_runs_";
1143 }
1144 DCHECK(full_runs->find(run) == full_runs->end());
1145 }
1146 } else {
1147 // If it was in a non full run set, remove it from the set.
1148 DCHECK(full_runs->find(run) == full_runs->end());
1149 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1150 non_full_runs->erase(run);
1151 if (kTraceRosAlloc) {
1152 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1153 << reinterpret_cast<intptr_t>(run)
1154 << " from non_full_runs_";
1155 }
1156 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1157 }
1158 if (!run_was_current) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001159 run->ZeroHeaderAndSlotHeaders();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001160 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001161 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001162 }
1163 } else {
1164 // It is not completely free. If it wasn't the current run or
1165 // already in the non-full run set (i.e., it was full) insert
1166 // it into the non-full run set.
1167 if (run == current_runs_[idx]) {
1168 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1169 DCHECK(full_runs->find(run) == full_runs->end());
1170 // If it was a current run, keep it.
1171 } else if (run_was_full) {
1172 // If it was full, remove it from the full run set (debug
1173 // only) and insert into the non-full run set.
1174 DCHECK(full_runs->find(run) != full_runs->end());
1175 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1176 if (kIsDebugBuild) {
1177 full_runs->erase(run);
1178 if (kTraceRosAlloc) {
1179 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1180 << reinterpret_cast<intptr_t>(run)
1181 << " from full_runs_";
1182 }
1183 }
1184 non_full_runs->insert(run);
1185 if (kTraceRosAlloc) {
1186 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1187 << reinterpret_cast<intptr_t>(run)
1188 << " into non_full_runs_[" << std::dec << idx;
1189 }
1190 } else {
1191 // If it was not full, so leave it in the non full run set.
1192 DCHECK(full_runs->find(run) == full_runs->end());
1193 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1194 }
1195 }
1196 }
1197 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001198 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001199}
1200
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001201std::string RosAlloc::DumpPageMap() {
1202 std::ostringstream stream;
1203 stream << "RosAlloc PageMap: " << std::endl;
1204 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001205 size_t end = page_map_size_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001206 FreePageRun* curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001207 size_t curr_fpr_size = 0;
1208 size_t remaining_curr_fpr_size = 0;
1209 size_t num_running_empty_pages = 0;
1210 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001211 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001212 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001213 case kPageMapReleased:
1214 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001215 case kPageMapEmpty: {
1216 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1217 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1218 // Encountered a fresh free page run.
1219 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1220 DCHECK(fpr->IsFree());
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001221 DCHECK(curr_fpr == nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001222 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1223 curr_fpr = fpr;
1224 curr_fpr_size = fpr->ByteSize(this);
1225 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1226 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001227 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1228 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001229 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001230 if (remaining_curr_fpr_size == 0) {
1231 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001232 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001233 curr_fpr_size = 0;
1234 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001235 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001236 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1237 } else {
1238 // Still part of the current free page run.
1239 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001240 DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001241 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1242 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1243 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001244 stream << "[" << i << "]=Empty (FPR part)"
1245 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001246 if (remaining_curr_fpr_size == 0) {
1247 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001248 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001249 curr_fpr_size = 0;
1250 }
1251 }
1252 num_running_empty_pages++;
1253 break;
1254 }
1255 case kPageMapLargeObject: {
1256 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1257 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001258 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001259 break;
1260 }
1261 case kPageMapLargeObjectPart:
1262 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1263 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001264 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001265 break;
1266 case kPageMapRun: {
1267 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1268 num_running_empty_pages = 0;
1269 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1270 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001271 stream << "[" << i << "]=Run (start)"
1272 << " idx=" << idx
1273 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001274 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001275 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1276 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001277 break;
1278 }
1279 case kPageMapRunPart:
1280 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1281 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001282 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001283 break;
1284 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001285 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001286 break;
1287 }
1288 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001289 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001290}
1291
Andreas Gamped7576322014-10-24 22:13:45 -07001292size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001293 DCHECK_LE(base_, ptr);
1294 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001295 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1296 MutexLock mu(Thread::Current(), lock_);
1297 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001298 case kPageMapReleased:
1299 // Fall-through.
1300 case kPageMapEmpty:
1301 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1302 << std::hex << reinterpret_cast<intptr_t>(ptr);
1303 break;
1304 case kPageMapLargeObject: {
1305 size_t num_pages = 1;
1306 size_t idx = pm_idx + 1;
1307 size_t end = page_map_size_;
1308 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1309 num_pages++;
1310 idx++;
1311 }
1312 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001313 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001314 case kPageMapLargeObjectPart:
1315 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1316 << std::hex << reinterpret_cast<intptr_t>(ptr);
1317 break;
1318 case kPageMapRun:
1319 case kPageMapRunPart: {
1320 // Find the beginning of the run.
1321 while (page_map_[pm_idx] != kPageMapRun) {
1322 pm_idx--;
1323 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1324 }
1325 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1326 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1327 DCHECK_EQ(run->magic_num_, kMagicNum);
1328 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001329 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001330 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001331 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1332 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001333 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001334 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001335 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001336 break;
1337 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001338 }
1339 return 0;
1340}
1341
1342bool RosAlloc::Trim() {
1343 MutexLock mu(Thread::Current(), lock_);
1344 FreePageRun* last_free_page_run;
1345 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1346 auto it = free_page_runs_.rbegin();
1347 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1348 // Remove the last free page run, if any.
1349 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001350 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001351 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1352 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1353 free_page_runs_.erase(last_free_page_run);
1354 size_t decrement = last_free_page_run->ByteSize(this);
1355 size_t new_footprint = footprint_ - decrement;
1356 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1357 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001358 DCHECK_GE(page_map_size_, new_num_of_pages);
1359 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001360 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1361 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001362 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1363 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1364 if (madvise_size > 0) {
1365 DCHECK_ALIGNED(madvise_begin, kPageSize);
1366 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001367 if (!kMadviseZeroes) {
1368 memset(madvise_begin, 0, madvise_size);
1369 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001370 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1371 }
1372 if (madvise_begin - zero_begin) {
1373 memset(zero_begin, 0, madvise_begin - zero_begin);
1374 }
1375 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001376 free_page_run_size_map_.resize(new_num_of_pages);
1377 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001378 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001379 if (kTraceRosAlloc) {
1380 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1381 << footprint_ << " to " << new_footprint;
1382 }
1383 DCHECK_LT(new_footprint, footprint_);
1384 DCHECK_LT(new_footprint, capacity_);
1385 footprint_ = new_footprint;
1386 return true;
1387 }
1388 return false;
1389}
1390
1391void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1392 void* arg) {
1393 // Note: no need to use this to release pages as we already do so in FreePages().
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001394 if (handler == nullptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001395 return;
1396 }
1397 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001398 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001399 size_t i = 0;
1400 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001401 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001402 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001403 case kPageMapReleased:
1404 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001405 case kPageMapEmpty: {
1406 // The start of a free page run.
1407 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1408 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1409 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001410 DCHECK_ALIGNED(fpr_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001411 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001412 if (kIsDebugBuild) {
1413 // In the debug build, the first page of a free page run
1414 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001415 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001416 }
Ian Rogers13735952014-10-08 12:43:28 -07001417 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001418 handler(start, end, 0, arg);
1419 size_t num_pages = fpr_size / kPageSize;
1420 if (kIsDebugBuild) {
1421 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001422 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001423 }
1424 }
1425 i += fpr_size / kPageSize;
1426 DCHECK_LE(i, pm_end);
1427 break;
1428 }
1429 case kPageMapLargeObject: {
1430 // The start of a large object.
1431 size_t num_pages = 1;
1432 size_t idx = i + 1;
1433 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1434 num_pages++;
1435 idx++;
1436 }
1437 void* start = base_ + i * kPageSize;
1438 void* end = base_ + (i + num_pages) * kPageSize;
1439 size_t used_bytes = num_pages * kPageSize;
1440 handler(start, end, used_bytes, arg);
1441 if (kIsDebugBuild) {
1442 for (size_t j = i + 1; j < i + num_pages; ++j) {
1443 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1444 }
1445 }
1446 i += num_pages;
1447 DCHECK_LE(i, pm_end);
1448 break;
1449 }
1450 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001451 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001452 break;
1453 case kPageMapRun: {
1454 // The start of a run.
1455 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001456 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001457 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1458 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001459 run->InspectAllSlots(handler, arg);
1460 size_t num_pages = numOfPages[run->size_bracket_idx_];
1461 if (kIsDebugBuild) {
1462 for (size_t j = i + 1; j < i + num_pages; ++j) {
1463 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1464 }
1465 }
1466 i += num_pages;
1467 DCHECK_LE(i, pm_end);
1468 break;
1469 }
1470 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001471 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001472 break;
1473 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001474 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001475 break;
1476 }
1477 }
1478}
1479
1480size_t RosAlloc::Footprint() {
1481 MutexLock mu(Thread::Current(), lock_);
1482 return footprint_;
1483}
1484
1485size_t RosAlloc::FootprintLimit() {
1486 MutexLock mu(Thread::Current(), lock_);
1487 return capacity_;
1488}
1489
1490void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1491 MutexLock mu(Thread::Current(), lock_);
1492 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1493 // Only growing is supported here. But Trim() is supported.
1494 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001495 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001496 capacity_ = new_capacity;
1497 VLOG(heap) << "new capacity=" << capacity_;
1498 }
1499}
1500
Lei Li57846212015-06-11 17:50:20 +08001501// Below may be called by mutator itself just before thread termination.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001502size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001503 Thread* self = Thread::Current();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001504 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001505 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001506 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001507 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001508 CHECK(thread_local_run != nullptr);
1509 // Invalid means already revoked.
1510 DCHECK(thread_local_run->IsThreadLocal());
1511 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001512 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001513 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001514 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001515 // Count the number of free slots left.
1516 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1517 free_bytes += num_free_slots * bracketSizes[idx];
Lei Li57846212015-06-11 17:50:20 +08001518 // The above bracket index lock guards thread local free list to avoid race condition
1519 // with unioning bulk free list to thread local free list by GC thread in BulkFree.
1520 // If thread local run is true, GC thread will help update thread local free list
1521 // in BulkFree. And the latest thread local free list will be merged to free list
1522 // either when this thread local run is full or when revoking this run here. In this
1523 // case the free list wll be updated. If thread local run is false, GC thread will help
1524 // merge bulk free list in next BulkFree.
1525 // Thus no need to merge bulk free list to free list again here.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001526 bool dont_care;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001527 thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001528 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001529 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1530 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001531 RevokeRun(self, idx, thread_local_run);
1532 }
1533 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001534 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001535}
1536
1537void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1538 size_bracket_locks_[idx]->AssertHeld(self);
1539 DCHECK(run != dedicated_full_run_);
1540 if (run->IsFull()) {
1541 if (kIsDebugBuild) {
1542 full_runs_[idx].insert(run);
1543 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1544 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001545 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001546 << reinterpret_cast<intptr_t>(run)
1547 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001548 }
1549 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001550 } else if (run->IsAllFree()) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001551 run->ZeroHeaderAndSlotHeaders();
Mathieu Chartier0651d412014-04-29 14:37:57 -07001552 MutexLock mu(self, lock_);
1553 FreePages(self, run, true);
1554 } else {
1555 non_full_runs_[idx].insert(run);
1556 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1557 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001558 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001559 << reinterpret_cast<intptr_t>(run)
1560 << " into non_full_runs_[" << std::dec << idx << "]";
1561 }
1562 }
1563}
1564
1565void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1566 // Revoke the current runs which share the same idx as thread local runs.
1567 Thread* self = Thread::Current();
1568 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1569 MutexLock mu(self, *size_bracket_locks_[idx]);
1570 if (current_runs_[idx] != dedicated_full_run_) {
1571 RevokeRun(self, idx, current_runs_[idx]);
1572 current_runs_[idx] = dedicated_full_run_;
1573 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001574 }
1575}
1576
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001577size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001578 // This is called when a mutator thread won't allocate such as at
1579 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001580 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1581 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001582 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001583 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001584 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001585 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001586 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001587 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001588 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001589}
1590
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001591void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1592 if (kIsDebugBuild) {
1593 Thread* self = Thread::Current();
1594 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001595 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001596 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001597 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001598 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001599 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001600 }
1601 }
1602}
1603
1604void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1605 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001606 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001607 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1608 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001609 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1610 for (Thread* t : thread_list) {
1611 AssertThreadLocalRunsAreRevoked(t);
1612 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001613 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001614 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001615 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1616 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001617 }
1618}
1619
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001620void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001621 // bracketSizes.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001622 static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2,
1623 "There should be two non-regular brackets");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001624 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001625 if (i < kNumThreadLocalSizeBrackets) {
1626 bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1);
1627 } else if (i < kNumRegularSizeBrackets) {
1628 bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) +
1629 (kThreadLocalBracketQuantumSize * kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001630 } else if (i == kNumOfSizeBrackets - 2) {
1631 bracketSizes[i] = 1 * KB;
1632 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001633 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001634 bracketSizes[i] = 2 * KB;
1635 }
1636 if (kTraceRosAlloc) {
1637 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1638 }
1639 }
1640 // numOfPages.
1641 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001642 if (i < kNumThreadLocalSizeBrackets) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001643 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001644 } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001645 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001646 } else if (i < kNumRegularSizeBrackets) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001647 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001648 } else if (i == kNumOfSizeBrackets - 2) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001649 numOfPages[i] = 2;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001650 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001651 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001652 numOfPages[i] = 4;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001653 }
1654 if (kTraceRosAlloc) {
1655 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1656 }
1657 }
1658 // Compute numOfSlots and slotOffsets.
1659 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1660 size_t bracket_size = bracketSizes[i];
1661 size_t run_size = kPageSize * numOfPages[i];
1662 size_t max_num_of_slots = run_size / bracket_size;
1663 // Compute the actual number of slots by taking the header and
1664 // alignment into account.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001665 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
1666 DCHECK_EQ(fixed_header_size, 80U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001667 size_t header_size = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001668 size_t num_of_slots = 0;
1669 // Search for the maximum number of slots that allows enough space
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001670 // for the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001671 for (int s = max_num_of_slots; s >= 0; s--) {
1672 size_t tmp_slots_size = bracket_size * s;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001673 size_t tmp_unaligned_header_size = fixed_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001674 // Align up the unaligned header size. bracket_size may not be a power of two.
1675 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1676 tmp_unaligned_header_size :
1677 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001678 DCHECK_EQ(tmp_header_size % bracket_size, 0U);
1679 DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001680 if (tmp_slots_size + tmp_header_size <= run_size) {
1681 // Found the right number of slots, that is, there was enough
1682 // space for the header (including the bit maps.)
1683 num_of_slots = s;
1684 header_size = tmp_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001685 break;
1686 }
1687 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001688 DCHECK_GT(num_of_slots, 0U) << i;
1689 DCHECK_GT(header_size, 0U) << i;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001690 // Add the padding for the alignment remainder.
1691 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001692 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001693 numOfSlots[i] = num_of_slots;
1694 headerSizes[i] = header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001695 if (kTraceRosAlloc) {
1696 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001697 << ", headerSizes[" << i << "]=" << headerSizes[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001698 }
1699 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001700 // Set up the dedicated full run so that nobody can successfully allocate from it.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001701 if (kIsDebugBuild) {
1702 dedicated_full_run_->magic_num_ = kMagicNum;
1703 }
1704 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1705 // fail 100% of the time you attempt to allocate into the dedicated full run.
1706 dedicated_full_run_->size_bracket_idx_ = 0;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001707 DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U); // It looks full.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001708 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001709
1710 // The smallest bracket size must be at least as large as the sizeof(Slot).
1711 DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001712 // Check the invariants between the max bracket sizes and the number of brackets.
1713 DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]);
1714 DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001715}
1716
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001717void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1718 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719 if (used_bytes == 0) {
1720 return;
1721 }
1722 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1723 *bytes_allocated += used_bytes;
1724}
1725
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001726void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1727 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001728 if (used_bytes == 0) {
1729 return;
1730 }
1731 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1732 ++(*objects_allocated);
1733}
1734
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001735void RosAlloc::Verify() {
1736 Thread* self = Thread::Current();
1737 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001738 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001739 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001740 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001741 std::vector<Run*> runs;
1742 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001743 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001744 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001745 size_t i = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001746 size_t memory_tool_modifier = is_running_on_memory_tool_ ?
1747 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after.
Andreas Gampefef16ad2015-02-19 16:44:32 -08001748 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001749 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001750 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001751 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001752 case kPageMapReleased:
1753 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001754 case kPageMapEmpty: {
1755 // The start of a free page run.
1756 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001757 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001758 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1759 << "An empty page must belong to the free page run set";
1760 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001761 CHECK_ALIGNED(fpr_size, kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001762 << "A free page run size isn't page-aligned : " << fpr_size;
1763 size_t num_pages = fpr_size / kPageSize;
1764 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1765 << "A free page run size must be > 0 : " << fpr_size;
1766 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001767 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001768 << "A mismatch between the page map table for kPageMapEmpty "
1769 << " at page index " << j
1770 << " and the free page run size : page index range : "
1771 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1772 }
1773 i += num_pages;
1774 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1775 << std::endl << DumpPageMap();
1776 break;
1777 }
1778 case kPageMapLargeObject: {
1779 // The start of a large object.
1780 size_t num_pages = 1;
1781 size_t idx = i + 1;
1782 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1783 num_pages++;
1784 idx++;
1785 }
Andreas Gamped7576322014-10-24 22:13:45 -07001786 uint8_t* start = base_ + i * kPageSize;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001787 if (is_running_on_memory_tool_) {
1788 start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07001789 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001790 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1791 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001792 CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001793 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001794 CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1795 << "A rosalloc large object size " << obj_size + memory_tool_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001796 << " does not match the page map table " << (num_pages * kPageSize)
1797 << std::endl << DumpPageMap();
1798 i += num_pages;
1799 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1800 << std::endl << DumpPageMap();
1801 break;
1802 }
1803 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001804 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001805 break;
1806 case kPageMapRun: {
1807 // The start of a run.
1808 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001809 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001810 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001811 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001812 size_t num_pages = numOfPages[idx];
1813 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1814 << "Run size must be > 0 : " << num_pages;
1815 for (size_t j = i + 1; j < i + num_pages; ++j) {
1816 CHECK_EQ(page_map_[j], kPageMapRunPart)
1817 << "A mismatch between the page map table for kPageMapRunPart "
1818 << " at page index " << j
1819 << " and the run size : page index range " << i << " to " << (i + num_pages)
1820 << std::endl << DumpPageMap();
1821 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001822 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001823 runs.push_back(run);
1824 i += num_pages;
1825 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1826 << std::endl << DumpPageMap();
1827 break;
1828 }
1829 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001830 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001831 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001832 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001833 break;
1834 }
1835 }
1836 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001837 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1838 for (Thread* thread : threads) {
1839 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001840 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001841 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1842 CHECK(thread_local_run != nullptr);
1843 CHECK(thread_local_run->IsThreadLocal());
1844 CHECK(thread_local_run == dedicated_full_run_ ||
1845 thread_local_run->size_bracket_idx_ == i);
1846 }
1847 }
1848 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001849 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001850 Run* current_run = current_runs_[i];
1851 CHECK(current_run != nullptr);
1852 if (current_run != dedicated_full_run_) {
1853 // The dedicated full run is currently marked as thread local.
1854 CHECK(!current_run->IsThreadLocal());
1855 CHECK_EQ(current_run->size_bracket_idx_, i);
1856 }
1857 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001858 // Call Verify() here for the lock order.
1859 for (auto& run : runs) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001860 run->Verify(self, this, is_running_on_memory_tool_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001861 }
1862}
1863
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001864void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -07001865 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001866 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001867 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001868 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001869 const size_t num_slots = numOfSlots[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001870 size_t bracket_size = IndexToBracketSize(idx);
1871 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07001872 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001873 << "Mismatch in the end address of the run " << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001874 // Check that the bulk free list is empty. It's only used during BulkFree().
1875 CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001876 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001877 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001878 // If it's a thread local run, then it must be pointed to by an owner thread.
1879 bool owner_found = false;
1880 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1881 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1882 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001883 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001884 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001885 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001886 if (thread_local_run == this) {
1887 CHECK(!owner_found)
1888 << "A thread local run has more than one owner thread " << Dump();
1889 CHECK_EQ(i, idx)
1890 << "A mismatching size bracket index in a thread local run " << Dump();
1891 owner_found = true;
1892 }
1893 }
1894 }
1895 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1896 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001897 // If it's not thread local, check that the thread local free list is empty.
1898 CHECK(IsThreadLocalFreeListEmpty())
1899 << "A non-thread-local run's thread local free list isn't empty "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001900 << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001901 // Check if it's a current run for the size bracket.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001902 bool is_current_run = false;
1903 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1904 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1905 Run* current_run = rosalloc->current_runs_[i];
1906 if (idx == i) {
1907 if (this == current_run) {
1908 is_current_run = true;
1909 }
1910 } else {
1911 // If the size bucket index does not match, then it must not
1912 // be a current run.
1913 CHECK_NE(this, current_run)
1914 << "A current run points to a run with a wrong size bracket index " << Dump();
1915 }
1916 }
1917 // If it's neither a thread local or current run, then it must be
1918 // in a run set.
1919 if (!is_current_run) {
1920 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07001921 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001922 // If it's all free, it must be a free page run rather than a run.
1923 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1924 if (!IsFull()) {
1925 // If it's not full, it must in the non-full run set.
1926 CHECK(non_full_runs.find(this) != non_full_runs.end())
1927 << "A non-full run isn't in the non-full run set " << Dump();
1928 } else {
1929 // If it's full, it must in the full run set (debug build only.)
1930 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07001931 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001932 CHECK(full_runs.find(this) != full_runs.end())
1933 << " A full run isn't in the full run set " << Dump();
1934 }
1935 }
1936 }
1937 }
1938 // Check each slot.
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001939 size_t memory_tool_modifier = running_on_memory_tool ?
1940 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
Andreas Gamped7576322014-10-24 22:13:45 -07001941 0U;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001942 // TODO: reuse InspectAllSlots().
1943 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
1944 // Mark the free slots and the remaining ones are allocated.
1945 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1946 size_t slot_idx = SlotIndex(slot);
1947 DCHECK_LT(slot_idx, num_slots);
1948 is_free[slot_idx] = true;
1949 }
1950 if (IsThreadLocal()) {
1951 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1952 size_t slot_idx = SlotIndex(slot);
1953 DCHECK_LT(slot_idx, num_slots);
1954 is_free[slot_idx] = true;
1955 }
1956 }
1957 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1958 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1959 if (running_on_memory_tool) {
1960 slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1961 }
1962 if (!is_free[slot_idx]) {
1963 // The slot is allocated
1964 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1965 size_t obj_size = obj->SizeOf();
1966 CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1967 << "A run slot contains a large object " << Dump();
1968 CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
David Sehr709b0702016-10-13 09:12:37 -07001969 << obj->PrettyTypeOf() << " "
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001970 << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
1971 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001972 }
1973 }
1974}
1975
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001976size_t RosAlloc::ReleasePages() {
1977 VLOG(heap) << "RosAlloc::ReleasePages()";
1978 DCHECK(!DoesReleaseAllPages());
1979 Thread* self = Thread::Current();
1980 size_t reclaimed_bytes = 0;
1981 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001982 // Check the page map size which might have changed due to grow/shrink.
1983 while (i < page_map_size_) {
1984 // Reading the page map without a lock is racy but the race is benign since it should only
1985 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07001986 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001987 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001988 case kPageMapReleased:
1989 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001990 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001991 // This is currently the start of a free page run.
1992 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001993 MutexLock mu(self, lock_);
1994 // Check that it's still empty after we acquired the lock since another thread could have
1995 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001996 if (IsFreePage(i)) {
1997 // Free page runs can start with a released page if we coalesced a released page free
1998 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001999 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002000 // There is a race condition where FreePage can coalesce fpr with the previous
2001 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2002 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2003 // to the next page.
2004 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2005 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002006 DCHECK_ALIGNED(fpr_size, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -07002007 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002008 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2009 size_t pages = fpr_size / kPageSize;
2010 CHECK_GT(pages, 0U) << "Infinite loop probable";
2011 i += pages;
2012 DCHECK_LE(i, page_map_size_);
2013 break;
2014 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002015 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002016 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002017 }
2018 case kPageMapLargeObject: // Fall through.
2019 case kPageMapLargeObjectPart: // Fall through.
2020 case kPageMapRun: // Fall through.
2021 case kPageMapRunPart: // Fall through.
2022 ++i;
2023 break; // Skip.
2024 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002025 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002026 break;
2027 }
2028 }
2029 return reclaimed_bytes;
2030}
2031
Ian Rogers13735952014-10-08 12:43:28 -07002032size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002033 DCHECK_ALIGNED(start, kPageSize);
2034 DCHECK_ALIGNED(end, kPageSize);
2035 DCHECK_LT(start, end);
2036 if (kIsDebugBuild) {
2037 // In the debug build, the first page of a free page run
2038 // contains a magic number for debugging. Exclude it.
2039 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002040
2041 // Single pages won't be released.
2042 if (start == end) {
2043 return 0;
2044 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002045 }
2046 if (!kMadviseZeroes) {
2047 // TODO: Do this when we resurrect the page instead.
2048 memset(start, 0, end - start);
2049 }
2050 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2051 size_t pm_idx = ToPageMapIndex(start);
2052 size_t reclaimed_bytes = 0;
2053 // Calculate reclaimed bytes and upate page map.
2054 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2055 for (; pm_idx < max_idx; ++pm_idx) {
2056 DCHECK(IsFreePage(pm_idx));
2057 if (page_map_[pm_idx] == kPageMapEmpty) {
2058 // Mark the page as released and update how many bytes we released.
2059 reclaimed_bytes += kPageSize;
2060 page_map_[pm_idx] = kPageMapReleased;
2061 }
2062 }
2063 return reclaimed_bytes;
2064}
2065
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002066void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2067 Thread* self = Thread::Current();
2068 size_t largest_continuous_free_pages = 0;
2069 WriterMutexLock wmu(self, bulk_free_lock_);
2070 MutexLock mu(self, lock_);
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002071 uint64_t total_free = 0;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002072 for (FreePageRun* fpr : free_page_runs_) {
2073 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2074 fpr->ByteSize(this));
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002075 total_free += fpr->ByteSize(this);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002076 }
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002077 size_t required_bytes = 0;
2078 const char* new_buffer_msg = "";
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002079 if (failed_alloc_bytes > kLargeSizeThreshold) {
2080 // Large allocation.
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002081 required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002082 } else {
2083 // Non-large allocation.
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002084 required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2085 new_buffer_msg = " for a new buffer";
2086 }
2087 if (required_bytes > largest_continuous_free_pages) {
2088 os << "; failed due to fragmentation ("
2089 << "required contiguous free " << required_bytes << " bytes" << new_buffer_msg
2090 << ", largest contiguous free " << largest_continuous_free_pages << " bytes"
2091 << ", total free pages " << total_free << " bytes"
2092 << ", space footprint " << footprint_ << " bytes"
2093 << ", space max capacity " << max_capacity_ << " bytes"
2094 << ")" << std::endl;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002095 }
2096}
2097
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -07002098void RosAlloc::DumpStats(std::ostream& os) {
2099 Thread* self = Thread::Current();
2100 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
2101 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
2102 size_t num_large_objects = 0;
2103 size_t num_pages_large_objects = 0;
2104 // These arrays are zero initialized.
2105 std::unique_ptr<size_t[]> num_runs(new size_t[kNumOfSizeBrackets]());
2106 std::unique_ptr<size_t[]> num_pages_runs(new size_t[kNumOfSizeBrackets]());
2107 std::unique_ptr<size_t[]> num_slots(new size_t[kNumOfSizeBrackets]());
2108 std::unique_ptr<size_t[]> num_used_slots(new size_t[kNumOfSizeBrackets]());
2109 std::unique_ptr<size_t[]> num_metadata_bytes(new size_t[kNumOfSizeBrackets]());
2110 ReaderMutexLock rmu(self, bulk_free_lock_);
2111 MutexLock lock_mu(self, lock_);
2112 for (size_t i = 0; i < page_map_size_; ) {
2113 uint8_t pm = page_map_[i];
2114 switch (pm) {
2115 case kPageMapReleased:
2116 case kPageMapEmpty:
2117 ++i;
2118 break;
2119 case kPageMapLargeObject: {
2120 size_t num_pages = 1;
2121 size_t idx = i + 1;
2122 while (idx < page_map_size_ && page_map_[idx] == kPageMapLargeObjectPart) {
2123 num_pages++;
2124 idx++;
2125 }
2126 num_large_objects++;
2127 num_pages_large_objects += num_pages;
2128 i += num_pages;
2129 break;
2130 }
2131 case kPageMapLargeObjectPart:
2132 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2133 << DumpPageMap();
2134 break;
2135 case kPageMapRun: {
2136 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
2137 size_t idx = run->size_bracket_idx_;
2138 size_t num_pages = numOfPages[idx];
2139 num_runs[idx]++;
2140 num_pages_runs[idx] += num_pages;
2141 num_slots[idx] += numOfSlots[idx];
2142 size_t num_free_slots = run->NumberOfFreeSlots();
2143 num_used_slots[idx] += numOfSlots[idx] - num_free_slots;
2144 num_metadata_bytes[idx] += headerSizes[idx];
2145 i += num_pages;
2146 break;
2147 }
2148 case kPageMapRunPart:
2149 // Fall-through.
2150 default:
2151 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2152 << DumpPageMap();
2153 break;
2154 }
2155 }
2156 os << "RosAlloc stats:\n";
2157 for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2158 os << "Bracket " << i << " (" << bracketSizes[i] << "):"
2159 << " #runs=" << num_runs[i]
2160 << " #pages=" << num_pages_runs[i]
2161 << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")"
2162 << " #metadata_bytes=" << PrettySize(num_metadata_bytes[i])
2163 << " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")"
2164 << " #used_slots=" << num_used_slots[i]
2165 << " (" << PrettySize(num_used_slots[i] * bracketSizes[i]) << ")\n";
2166 }
2167 os << "Large #allocations=" << num_large_objects
2168 << " #pages=" << num_pages_large_objects
2169 << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n";
2170 size_t total_num_pages = 0;
2171 size_t total_metadata_bytes = 0;
2172 size_t total_allocated_bytes = 0;
2173 for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2174 total_num_pages += num_pages_runs[i];
2175 total_metadata_bytes += num_metadata_bytes[i];
2176 total_allocated_bytes += num_used_slots[i] * bracketSizes[i];
2177 }
2178 total_num_pages += num_pages_large_objects;
2179 total_allocated_bytes += num_pages_large_objects * kPageSize;
2180 os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize)
2181 << " #metadata_bytes=" << PrettySize(total_metadata_bytes)
2182 << " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n";
2183 os << "\n";
2184}
2185
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002186} // namespace allocator
2187} // namespace gc
2188} // namespace art