blob: f64a4ff8df3b3e16cba575f0fc38323b3ecd2210 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070019#include "base/mutex-inl.h"
Andreas Gamped7576322014-10-24 22:13:45 -070020#include "gc/space/valgrind_settings.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080021#include "mirror/class-inl.h"
22#include "mirror/object.h"
23#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080024#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026
27#include <map>
28#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070029#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070030#include <vector>
31
32namespace art {
33namespace gc {
34namespace allocator {
35
Mathieu Chartier73d1e172014-04-11 17:53:48 -070036static constexpr bool kUsePrefetchDuringAllocRun = true;
37static constexpr bool kPrefetchNewRunDataByZeroing = false;
38static constexpr size_t kPrefetchStride = 64;
39
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070040size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
41size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
42size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
43size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
44size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
45size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
46bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070047size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080051RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Andreas Gamped7576322014-10-24 22:13:45 -070052 PageReleaseMode page_release_mode, bool running_on_valgrind,
53 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070054 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080057 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070059 page_release_size_threshold_(page_release_size_threshold),
60 running_on_valgrind_(running_on_valgrind) {
Ian Rogers5d057052014-03-12 14:32:27 -070061 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
62 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080063 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080064 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070065 if (!initialized_) {
66 Initialize();
67 }
68 VLOG(heap) << "RosAlloc base="
69 << std::hex << (intptr_t)base_ << ", end="
70 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080071 << ", capacity=" << std::dec << capacity_
72 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070073 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070074 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070075 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070076 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070077 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080079 DCHECK_EQ(footprint_, capacity_);
80 size_t num_of_pages = footprint_ / kPageSize;
81 size_t max_num_of_pages = max_capacity_ / kPageSize;
82 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000083 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
84 RoundUp(max_num_of_pages, kPageSize),
85 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070086 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080087 page_map_ = page_map_mem_map_->Begin();
88 page_map_size_ = num_of_pages;
89 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070090 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070091 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
92 if (kIsDebugBuild) {
93 free_pages->magic_num_ = kMagicNumFree;
94 }
95 free_pages->SetByteSize(this, capacity_);
96 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080097 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070098 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080099 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700100 free_page_runs_.insert(free_pages);
101 if (kTraceRosAlloc) {
102 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
103 << reinterpret_cast<intptr_t>(free_pages)
104 << " into free_page_runs_";
105 }
106}
107
Mathieu Chartier661974a2014-01-09 11:23:53 -0800108RosAlloc::~RosAlloc() {
109 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
110 delete size_bracket_locks_[i];
111 }
112}
113
Ian Rogers13735952014-10-08 12:43:28 -0700114void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700115 lock_.AssertHeld(self);
116 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
117 FreePageRun* res = NULL;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700118 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700119 // Find the lowest address free page run that's large enough.
120 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
121 FreePageRun* fpr = *it;
122 DCHECK(fpr->IsFree());
123 size_t fpr_byte_size = fpr->ByteSize(this);
124 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
125 if (req_byte_size <= fpr_byte_size) {
126 // Found one.
127 free_page_runs_.erase(it++);
128 if (kTraceRosAlloc) {
129 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
130 << std::hex << reinterpret_cast<intptr_t>(fpr)
131 << " from free_page_runs_";
132 }
133 if (req_byte_size < fpr_byte_size) {
134 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700135 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700136 if (kIsDebugBuild) {
137 remainder->magic_num_ = kMagicNumFree;
138 }
139 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
140 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
141 // Don't need to call madvise on remainder here.
142 free_page_runs_.insert(remainder);
143 if (kTraceRosAlloc) {
144 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
145 << reinterpret_cast<intptr_t>(remainder)
146 << " into free_page_runs_";
147 }
148 fpr->SetByteSize(this, req_byte_size);
149 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
150 }
151 res = fpr;
152 break;
153 } else {
154 ++it;
155 }
156 }
157
158 // Failed to allocate pages. Grow the footprint, if possible.
159 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
160 FreePageRun* last_free_page_run = NULL;
161 size_t last_free_page_run_size;
162 auto it = free_page_runs_.rbegin();
163 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
164 // There is a free page run at the end.
165 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700166 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700167 last_free_page_run_size = last_free_page_run->ByteSize(this);
168 } else {
169 // There is no free page run at the end.
170 last_free_page_run_size = 0;
171 }
172 DCHECK_LT(last_free_page_run_size, req_byte_size);
173 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
174 // If we grow the heap, we can allocate it.
175 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
176 capacity_ - footprint_);
177 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
178 size_t new_footprint = footprint_ + increment;
179 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800180 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700181 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800182 page_map_size_ = new_num_of_pages;
183 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700184 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800185 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700186 if (last_free_page_run_size > 0) {
187 // There was a free page run at the end. Expand its size.
188 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
189 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
190 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700191 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700192 } else {
193 // Otherwise, insert a new free page run at the end.
194 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
195 if (kIsDebugBuild) {
196 new_free_page_run->magic_num_ = kMagicNumFree;
197 }
198 new_free_page_run->SetByteSize(this, increment);
199 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
200 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700201 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700202 if (kTraceRosAlloc) {
203 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
204 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
205 << " into free_page_runs_";
206 }
207 }
208 DCHECK_LE(footprint_ + increment, capacity_);
209 if (kTraceRosAlloc) {
210 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
211 << footprint_ << " to " << new_footprint;
212 }
213 footprint_ = new_footprint;
214
215 // And retry the last free page run.
216 it = free_page_runs_.rbegin();
217 DCHECK(it != free_page_runs_.rend());
218 FreePageRun* fpr = *it;
219 if (kIsDebugBuild && last_free_page_run_size > 0) {
220 DCHECK(last_free_page_run != NULL);
221 DCHECK_EQ(last_free_page_run, fpr);
222 }
223 size_t fpr_byte_size = fpr->ByteSize(this);
224 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
225 DCHECK_LE(req_byte_size, fpr_byte_size);
226 free_page_runs_.erase(fpr);
227 if (kTraceRosAlloc) {
228 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
229 << " from free_page_runs_";
230 }
231 if (req_byte_size < fpr_byte_size) {
232 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700233 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700234 if (kIsDebugBuild) {
235 remainder->magic_num_ = kMagicNumFree;
236 }
237 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
238 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
239 free_page_runs_.insert(remainder);
240 if (kTraceRosAlloc) {
241 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
242 << reinterpret_cast<intptr_t>(remainder)
243 << " into free_page_runs_";
244 }
245 fpr->SetByteSize(this, req_byte_size);
246 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
247 }
248 res = fpr;
249 }
250 }
251 if (LIKELY(res != NULL)) {
252 // Update the page map.
253 size_t page_map_idx = ToPageMapIndex(res);
254 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700255 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700256 }
257 switch (page_map_type) {
258 case kPageMapRun:
259 page_map_[page_map_idx] = kPageMapRun;
260 for (size_t i = 1; i < num_pages; i++) {
261 page_map_[page_map_idx + i] = kPageMapRunPart;
262 }
263 break;
264 case kPageMapLargeObject:
265 page_map_[page_map_idx] = kPageMapLargeObject;
266 for (size_t i = 1; i < num_pages; i++) {
267 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
268 }
269 break;
270 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700271 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700272 break;
273 }
274 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700275 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700276 memset(res, 0, kPageSize);
277 }
278 if (kTraceRosAlloc) {
279 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
280 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
281 << "(" << std::dec << (num_pages * kPageSize) << ")";
282 }
283 return res;
284 }
285
286 // Fail.
287 if (kTraceRosAlloc) {
288 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
289 }
290 return nullptr;
291}
292
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700293size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700294 lock_.AssertHeld(self);
295 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700296 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700297 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700298 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700299 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700300 switch (pm_type) {
301 case kPageMapRun:
302 pm_part_type = kPageMapRunPart;
303 break;
304 case kPageMapLargeObject:
305 pm_part_type = kPageMapLargeObjectPart;
306 break;
307 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700308 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700309 << static_cast<int>(pm_type) << ", ptr=" << std::hex
310 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700311 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700312 }
313 // Update the page map and count the number of pages.
314 size_t num_pages = 1;
315 page_map_[pm_idx] = kPageMapEmpty;
316 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800317 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700318 while (idx < end && page_map_[idx] == pm_part_type) {
319 page_map_[idx] = kPageMapEmpty;
320 num_pages++;
321 idx++;
322 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700323 const size_t byte_size = num_pages * kPageSize;
324 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700325 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700326 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
327 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700328 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
329 }
330 }
331 } else if (!DoesReleaseAllPages()) {
332 memset(ptr, 0, byte_size);
333 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700334
335 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700336 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700337 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700338 << "(" << std::dec << (num_pages * kPageSize) << ")";
339 }
340
341 // Turn it into a free run.
342 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
343 if (kIsDebugBuild) {
344 fpr->magic_num_ = kMagicNumFree;
345 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700346 fpr->SetByteSize(this, byte_size);
347 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700348
349 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
350 if (!free_page_runs_.empty()) {
351 // Try to coalesce in the higher address direction.
352 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700353 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700354 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
355 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800356 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700357 }
358 auto higher_it = free_page_runs_.upper_bound(fpr);
359 if (higher_it != free_page_runs_.end()) {
360 for (auto it = higher_it; it != free_page_runs_.end(); ) {
361 FreePageRun* h = *it;
362 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
363 if (kTraceRosAlloc) {
364 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
365 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
366 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800367 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700368 }
369 if (fpr->End(this) == h->Begin()) {
370 if (kTraceRosAlloc) {
371 LOG(INFO) << "Success";
372 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700373 // Clear magic num since this is no longer the start of a free page run.
374 if (kIsDebugBuild) {
375 h->magic_num_ = 0;
376 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700377 free_page_runs_.erase(it++);
378 if (kTraceRosAlloc) {
379 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
380 << reinterpret_cast<intptr_t>(h)
381 << " from free_page_runs_";
382 }
383 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
384 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
385 } else {
386 // Not adjacent. Stop.
387 if (kTraceRosAlloc) {
388 LOG(INFO) << "Fail";
389 }
390 break;
391 }
392 }
393 }
394 // Try to coalesce in the lower address direction.
395 auto lower_it = free_page_runs_.upper_bound(fpr);
396 if (lower_it != free_page_runs_.begin()) {
397 --lower_it;
398 for (auto it = lower_it; ; ) {
399 // We want to try to coalesce with the first element but
400 // there's no "<=" operator for the iterator.
401 bool to_exit_loop = it == free_page_runs_.begin();
402
403 FreePageRun* l = *it;
404 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
405 if (kTraceRosAlloc) {
406 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
407 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
408 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800409 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700410 }
411 if (l->End(this) == fpr->Begin()) {
412 if (kTraceRosAlloc) {
413 LOG(INFO) << "Success";
414 }
415 free_page_runs_.erase(it--);
416 if (kTraceRosAlloc) {
417 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
418 << reinterpret_cast<intptr_t>(l)
419 << " from free_page_runs_";
420 }
421 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
422 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700423 // Clear magic num since this is no longer the start of a free page run.
424 if (kIsDebugBuild) {
425 fpr->magic_num_ = 0;
426 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700427 fpr = l;
428 } else {
429 // Not adjacent. Stop.
430 if (kTraceRosAlloc) {
431 LOG(INFO) << "Fail";
432 }
433 break;
434 }
435 if (to_exit_loop) {
436 break;
437 }
438 }
439 }
440 }
441
442 // Insert it.
443 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
444 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800445 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700446 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800447 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700448 free_page_runs_.insert(fpr);
449 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
450 if (kTraceRosAlloc) {
451 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
452 << " into free_page_runs_";
453 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700454 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700455}
456
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700457void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
458 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
459 DCHECK(bytes_allocated != nullptr);
460 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700461 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800462 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
463 void* r;
464 {
465 MutexLock mu(self, lock_);
466 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700467 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800468 if (UNLIKELY(r == nullptr)) {
469 if (kTraceRosAlloc) {
470 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
471 }
472 return nullptr;
473 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700474 const size_t total_bytes = num_pages * kPageSize;
475 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700476 *usable_size = total_bytes;
477 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800478 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800479 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
480 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
481 << "(" << std::dec << (num_pages * kPageSize) << ")";
482 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700483 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700484 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700485 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
486 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
487 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700488 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700489 }
490 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800491 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700492}
493
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700494size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700495 DCHECK_LE(base_, ptr);
496 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700497 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700498 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700499 {
500 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700501 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700502 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700503 if (kTraceRosAlloc) {
504 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
505 << ", page_map_entry=" << static_cast<int>(page_map_entry);
506 }
507 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700509 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700510 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700511 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700512 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700513 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700514 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700515 do {
516 --pm_idx;
517 DCHECK_LT(pm_idx, capacity_ / kPageSize);
518 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700519 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700520 case kPageMapRun:
521 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700522 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700523 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700524 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700525 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700526 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700527 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700528 }
529 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700530 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700531 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700532 }
533 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700534 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700535 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700536}
537
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700538size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700539 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700540 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700541}
542
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700543RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
544 RosAlloc::Run* new_run = nullptr;
545 {
546 MutexLock mu(self, lock_);
547 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
548 }
549 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700550 if (kIsDebugBuild) {
551 new_run->magic_num_ = kMagicNum;
552 }
553 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700554 new_run->SetAllocBitMapBitsForInvalidSlots();
555 DCHECK(!new_run->IsThreadLocal());
556 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
557 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700558 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700559 // Take ownership of the cache lines if we are likely to be thread local run.
560 if (kPrefetchNewRunDataByZeroing) {
561 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
562 // since we end up dirtying zero pages which may have been madvised.
563 new_run->ZeroData();
564 } else {
565 const size_t num_of_slots = numOfSlots[idx];
566 const size_t bracket_size = bracketSizes[idx];
567 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700568 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700569 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
570 __builtin_prefetch(begin + i);
571 }
572 }
573 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700574 }
575 return new_run;
576}
577
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700578RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
579 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700580 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700581 if (!bt->empty()) {
582 // If there's one, use it as the current run.
583 auto it = bt->begin();
584 Run* non_full_run = *it;
585 DCHECK(non_full_run != nullptr);
586 DCHECK(!non_full_run->IsThreadLocal());
587 bt->erase(it);
588 return non_full_run;
589 }
590 // If there's none, allocate a new run and use it as the current run.
591 return AllocRun(self, idx);
592}
593
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700594inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700595 Run* current_run = current_runs_[idx];
596 DCHECK(current_run != nullptr);
597 void* slot_addr = current_run->AllocSlot();
598 if (UNLIKELY(slot_addr == nullptr)) {
599 // The current run got full. Try to refill it.
600 DCHECK(current_run->IsFull());
601 if (kIsDebugBuild && current_run != dedicated_full_run_) {
602 full_runs_[idx].insert(current_run);
603 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700604 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
605 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700606 << " into full_runs_[" << std::dec << idx << "]";
607 }
608 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
609 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
610 }
611 current_run = RefillRun(self, idx);
612 if (UNLIKELY(current_run == nullptr)) {
613 // Failed to allocate a new run, make sure that it is the dedicated full run.
614 current_runs_[idx] = dedicated_full_run_;
615 return nullptr;
616 }
617 DCHECK(current_run != nullptr);
618 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
619 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
620 current_run->SetIsThreadLocal(false);
621 current_runs_[idx] = current_run;
622 DCHECK(!current_run->IsFull());
623 slot_addr = current_run->AllocSlot();
624 // Must succeed now with a new run.
625 DCHECK(slot_addr != nullptr);
626 }
627 return slot_addr;
628}
629
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700630void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
631 size_t* usable_size,
632 size_t* bytes_tl_bulk_allocated) {
633 DCHECK(bytes_allocated != nullptr);
634 DCHECK(usable_size != nullptr);
635 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700636 DCHECK_LE(size, kLargeSizeThreshold);
637 size_t bracket_size;
638 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
639 DCHECK_EQ(idx, SizeToIndex(size));
640 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
641 DCHECK_EQ(bracket_size, bracketSizes[idx]);
642 DCHECK_LE(size, bracket_size);
643 DCHECK(size > 512 || bracket_size - size < 16);
644 Locks::mutator_lock_->AssertExclusiveHeld(self);
645 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
646 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700647 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700648 *usable_size = bracket_size;
649 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700650 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700651 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700652 return slot_addr;
653}
654
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700655void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
656 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
657 DCHECK(bytes_allocated != nullptr);
658 DCHECK(usable_size != nullptr);
659 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700660 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700661 size_t bracket_size;
662 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
663 DCHECK_EQ(idx, SizeToIndex(size));
664 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
665 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700666 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700667 DCHECK(size > 512 || bracket_size - size < 16);
668
669 void* slot_addr;
670
Mathieu Chartier0651d412014-04-29 14:37:57 -0700671 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700672 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700673 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700674 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700675 if (kIsDebugBuild) {
676 // Need the lock to prevent race conditions.
677 MutexLock mu(self, *size_bracket_locks_[idx]);
678 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
679 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
680 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700681 DCHECK(thread_local_run != nullptr);
682 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700683 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700684 // The allocation must fail if the run is invalid.
685 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
686 << "allocated from an invalid run";
687 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700688 // The run got full. Try to free slots.
689 DCHECK(thread_local_run->IsFull());
690 MutexLock mu(self, *size_bracket_locks_[idx]);
691 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700692 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700693 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700694 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700695 // Some slot got freed. Keep it.
696 DCHECK(!thread_local_run->IsFull());
697 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
698 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700699 // Check that the bitmap idx is back at 0 if it's all free.
700 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700701 }
702 } else {
703 // No slots got freed. Try to refill the thread-local run.
704 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700705 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700706 thread_local_run->SetIsThreadLocal(false);
707 if (kIsDebugBuild) {
708 full_runs_[idx].insert(thread_local_run);
709 if (kTraceRosAlloc) {
710 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
711 << reinterpret_cast<intptr_t>(thread_local_run)
712 << " into full_runs_[" << std::dec << idx << "]";
713 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700714 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700715 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
716 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700717 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700718
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700719 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700720 if (UNLIKELY(thread_local_run == nullptr)) {
721 self->SetRosAllocRun(idx, dedicated_full_run_);
722 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700723 }
724 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
725 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700726 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700727 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 DCHECK(!thread_local_run->IsFull());
729 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700730 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700731 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700732 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700733 // Account for all the free slots in the new or refreshed thread local run.
734 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700735 slot_addr = thread_local_run->AllocSlot();
736 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700737 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700738 } else {
739 // The slot is already counted. Leave it as is.
740 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700741 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700742 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700743 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700744 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
745 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700746 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
747 << "(" << std::dec << (bracket_size) << ")";
748 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700749 *bytes_allocated = bracket_size;
750 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 } else {
752 // Use the (shared) current run.
753 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700754 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700755 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700756 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
757 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700758 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
759 << "(" << std::dec << (bracket_size) << ")";
760 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700761 if (LIKELY(slot_addr != nullptr)) {
762 *bytes_allocated = bracket_size;
763 *usable_size = bracket_size;
764 *bytes_tl_bulk_allocated = bracket_size;
765 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700766 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700767 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700768 return slot_addr;
769}
770
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700771size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700772 DCHECK_EQ(run->magic_num_, kMagicNum);
773 DCHECK_LT(run, ptr);
774 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700775 const size_t idx = run->size_bracket_idx_;
776 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700777 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800778 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700779 if (kIsDebugBuild) {
780 run_was_full = run->IsFull();
781 }
782 if (kTraceRosAlloc) {
783 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
784 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700785 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700786 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700787 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700788 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
789 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
790 run->MarkThreadLocalFreeBitMap(ptr);
791 if (kTraceRosAlloc) {
792 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
793 << reinterpret_cast<intptr_t>(run);
794 }
795 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700796 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700797 }
798 // Free the slot in the run.
799 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700800 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700801 if (run->IsAllFree()) {
802 // It has just become completely free. Free the pages of this run.
803 std::set<Run*>::iterator pos = non_full_runs->find(run);
804 if (pos != non_full_runs->end()) {
805 non_full_runs->erase(pos);
806 if (kTraceRosAlloc) {
807 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
808 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
809 }
810 }
811 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700812 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700813 }
814 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
815 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700816 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700817 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800818 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700819 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700820 }
821 } else {
822 // It is not completely free. If it wasn't the current run or
823 // already in the non-full run set (i.e., it was full) insert it
824 // into the non-full run set.
825 if (run != current_runs_[idx]) {
Mathieu Chartier58553c72014-09-16 16:25:55 -0700826 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
827 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700828 if (pos == non_full_runs->end()) {
829 DCHECK(run_was_full);
830 DCHECK(full_runs->find(run) != full_runs->end());
831 if (kIsDebugBuild) {
832 full_runs->erase(run);
833 if (kTraceRosAlloc) {
834 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
835 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
836 }
837 }
838 non_full_runs->insert(run);
839 DCHECK(!run->IsFull());
840 if (kTraceRosAlloc) {
841 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
842 << reinterpret_cast<intptr_t>(run)
843 << " into non_full_runs_[" << std::dec << idx << "]";
844 }
845 }
846 }
847 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700848 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700849}
850
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800851std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700852 std::string bit_map_str;
853 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800854 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700855 if (v != num_vec - 1) {
856 bit_map_str.append(StringPrintf("%x-", vec));
857 } else {
858 bit_map_str.append(StringPrintf("%x", vec));
859 }
860 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800861 return bit_map_str.c_str();
862}
863
864std::string RosAlloc::Run::Dump() {
865 size_t idx = size_bracket_idx_;
866 size_t num_slots = numOfSlots[idx];
867 size_t num_vec = RoundUp(num_slots, 32) / 32;
868 std::ostringstream stream;
869 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
870 << "{ magic_num=" << static_cast<int>(magic_num_)
871 << " size_bracket_idx=" << idx
872 << " is_thread_local=" << static_cast<int>(is_thread_local_)
873 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700874 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800875 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
876 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
877 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
878 << " }" << std::endl;
879 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700880}
881
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700882void RosAlloc::Run::FreeSlot(void* ptr) {
883 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700884 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700885 const size_t bracket_size = bracketSizes[idx];
Ian Rogers13735952014-10-08 12:43:28 -0700886 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
887 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700888 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
889 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700890 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700891 size_t vec_idx = slot_idx / 32;
892 if (kIsDebugBuild) {
893 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700894 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700895 }
896 size_t vec_off = slot_idx % 32;
897 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700898 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
899 const uint32_t mask = 1U << vec_off;
900 DCHECK_NE(*vec & mask, 0U);
901 *vec &= ~mask;
902 DCHECK_EQ(*vec & mask, 0U);
903 // Zero out the memory.
904 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
905 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700906 if (kTraceRosAlloc) {
907 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
908 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
909 }
910}
911
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700912size_t RosAlloc::Run::NumberOfFreeSlots() {
913 size_t num_alloc_slots = 0;
914 const size_t idx = size_bracket_idx_;
915 const size_t num_slots = numOfSlots[idx];
916 const size_t num_vec = RoundUp(num_slots, 32) / 32;
917 DCHECK_NE(num_vec, 0U);
918 for (size_t v = 0; v < num_vec - 1; v++) {
919 num_alloc_slots += POPCOUNT(alloc_bit_map_[v]);
920 }
921 // Don't count the invalid bits in the last vector.
922 uint32_t last_vec_masked = alloc_bit_map_[num_vec - 1] &
923 ~GetBitmapLastVectorMask(num_slots, num_vec);
924 num_alloc_slots += POPCOUNT(last_vec_masked);
925 size_t num_free_slots = num_slots - num_alloc_slots;
926 DCHECK_LE(num_alloc_slots, num_slots);
927 DCHECK_LE(num_free_slots, num_slots);
928 return num_free_slots;
929}
930
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700931inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700932 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700933 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700934 const size_t idx = size_bracket_idx_;
935 const size_t num_of_slots = numOfSlots[idx];
936 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700937 bool changed = false;
938 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800939 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700940 bool is_all_free_after = true;
941 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
942 uint32_t tl_free_vec = *tl_free_vecp;
943 uint32_t vec_before = *vecp;
944 uint32_t vec_after;
945 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700946 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700947 vec_after = vec_before & ~tl_free_vec;
948 *vecp = vec_after;
949 changed = true;
950 *tl_free_vecp = 0; // clear the thread local free bit map.
951 } else {
952 vec_after = vec_before;
953 }
954 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700955 if (v == num_vec - 1) {
956 // Only not all free if a bit other than the mask bits are set.
957 is_all_free_after =
958 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
959 } else {
960 is_all_free_after = false;
961 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700962 }
963 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
964 }
965 *is_all_free_after_out = is_all_free_after;
966 // Return true if there was at least a bit set in the thread-local
967 // free bit map and at least a bit in the alloc bit map changed.
968 return changed;
969}
970
971inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700972 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700973 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700974 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700975 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800976 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700977 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
978 uint32_t free_vec = *free_vecp;
979 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700980 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700981 *vecp &= ~free_vec;
982 *free_vecp = 0; // clear the bulk free bit map.
983 }
984 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
985 }
986}
987
988inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700989 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700990 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700991 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800992 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
993 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700994 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
995 uint32_t from_vec = *from_vecp;
996 if (from_vec != 0) {
997 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800998 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700999 }
1000 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
1001 }
1002}
1003
1004inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001005 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001006 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001007}
1008
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001009inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
1010 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001011}
1012
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001013inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
1014 const char* caller_name) {
Ian Rogers13735952014-10-08 12:43:28 -07001015 const uint8_t idx = size_bracket_idx_;
1016 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1017 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001018 const size_t bracket_size = bracketSizes[idx];
1019 memset(ptr, 0, bracket_size);
1020 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
1021 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001022 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001023 size_t vec_idx = slot_idx / 32;
1024 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001025 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001026 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001027 }
1028 size_t vec_off = slot_idx % 32;
1029 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001030 const uint32_t mask = 1U << vec_off;
1031 DCHECK_EQ(*vec & mask, 0U);
1032 *vec |= mask;
1033 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001034 if (kTraceRosAlloc) {
1035 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1036 << reinterpret_cast<intptr_t>(ptr)
1037 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1038 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001039 return bracket_size;
1040}
1041
1042inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1043 const size_t kBitsPerVec = 32;
1044 DCHECK_GE(num_slots * kBitsPerVec, num_vec);
1045 size_t remain = num_vec * kBitsPerVec - num_slots;
1046 DCHECK_NE(remain, kBitsPerVec);
1047 return ((1U << remain) - 1) << (kBitsPerVec - remain);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001048}
1049
1050inline bool RosAlloc::Run::IsAllFree() {
Ian Rogers13735952014-10-08 12:43:28 -07001051 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001052 const size_t num_slots = numOfSlots[idx];
1053 const size_t num_vec = NumberOfBitmapVectors();
1054 DCHECK_NE(num_vec, 0U);
1055 // Check the last vector after the loop since it uses a special case for the masked bits.
1056 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001057 uint32_t vec = alloc_bit_map_[v];
1058 if (vec != 0) {
1059 return false;
1060 }
1061 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001062 // Make sure the last word is equal to the mask, all other bits must be 0.
1063 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001064}
1065
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001066inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001067 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001068 for (size_t v = 0; v < num_vec; v++) {
1069 uint32_t vec = BulkFreeBitMap()[v];
1070 if (vec != 0) {
1071 return false;
1072 }
1073 }
1074 return true;
1075}
1076
1077inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001078 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001079 for (size_t v = 0; v < num_vec; v++) {
1080 uint32_t vec = ThreadLocalFreeBitMap()[v];
1081 if (vec != 0) {
1082 return false;
1083 }
1084 }
1085 return true;
1086}
1087
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001088inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1089 const size_t idx = size_bracket_idx_;
1090 const size_t num_slots = numOfSlots[idx];
1091 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1092 DCHECK_NE(num_vec, 0U);
1093 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1094 // don't represent valid slots.
1095 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1096}
1097
1098inline void RosAlloc::Run::ZeroHeader() {
Ian Rogers13735952014-10-08 12:43:28 -07001099 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001100 memset(this, 0, headerSizes[idx]);
1101}
1102
1103inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -07001104 const uint8_t idx = size_bracket_idx_;
1105 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001106 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1107}
1108
1109inline void RosAlloc::Run::FillAllocBitMap() {
1110 size_t num_vec = NumberOfBitmapVectors();
1111 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1112 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001113}
1114
1115void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1116 void* arg) {
1117 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001118 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001119 size_t num_slots = numOfSlots[idx];
1120 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001121 DCHECK_EQ(slot_base + num_slots * bracket_size,
1122 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001123 size_t num_vec = RoundUp(num_slots, 32) / 32;
1124 size_t slots = 0;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001125 const uint32_t* const tl_free_vecp = IsThreadLocal() ? ThreadLocalFreeBitMap() : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001126 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001127 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001128 uint32_t vec = alloc_bit_map_[v];
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001129 if (tl_free_vecp != nullptr) {
1130 // Clear out the set bits in the thread local free bitmap since these aren't actually
1131 // allocated.
1132 vec &= ~tl_free_vecp[v];
1133 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001134 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1135 for (size_t i = 0; i < end; ++i) {
1136 bool is_allocated = ((vec >> i) & 0x1) != 0;
Ian Rogers13735952014-10-08 12:43:28 -07001137 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001138 if (is_allocated) {
1139 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1140 } else {
1141 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1142 }
1143 }
1144 }
1145}
1146
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001147// If true, read the page map entries in BulkFree() without using the
1148// lock for better performance, assuming that the existence of an
1149// allocated chunk/pointer being freed in BulkFree() guarantees that
1150// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001151static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001152
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001153size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1154 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001155 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001156 // Used only to test Free() as GC uses only BulkFree().
1157 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001158 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001159 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001160 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001161 }
1162
1163 WriterMutexLock wmu(self, bulk_free_lock_);
1164
1165 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001166 // size bracket locks. On host, unordered_set is faster than vector + flag.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001167#ifdef HAVE_ANDROID_OS
1168 std::vector<Run*> runs;
1169#else
Ian Rogers700a4022014-05-19 16:49:03 -07001170 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001171#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001172 for (size_t i = 0; i < num_ptrs; i++) {
1173 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001174 DCHECK_LE(base_, ptr);
1175 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001176 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001177 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001178 if (kReadPageMapEntryWithoutLockInBulkFree) {
1179 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001180 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001181 if (kTraceRosAlloc) {
1182 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1183 << std::dec << pm_idx
1184 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1185 }
1186 if (LIKELY(page_map_entry == kPageMapRun)) {
1187 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001188 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1189 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001190 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001191 do {
1192 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001193 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001194 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001195 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001196 } else if (page_map_entry == kPageMapLargeObject) {
1197 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001198 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001199 continue;
1200 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001201 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001202 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001203 } else {
1204 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001205 MutexLock mu(self, lock_);
1206 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001207 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001208 if (kTraceRosAlloc) {
1209 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1210 << std::dec << pm_idx
1211 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001212 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001213 if (LIKELY(page_map_entry == kPageMapRun)) {
1214 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1215 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1216 size_t pi = pm_idx;
1217 // Find the beginning of the run.
1218 do {
1219 --pi;
1220 DCHECK_LT(pi, capacity_ / kPageSize);
1221 } while (page_map_[pi] != kPageMapRun);
1222 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1223 } else if (page_map_entry == kPageMapLargeObject) {
1224 freed_bytes += FreePages(self, ptr, false);
1225 continue;
1226 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001227 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001228 }
1229 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001230 DCHECK(run != nullptr);
1231 DCHECK_EQ(run->magic_num_, kMagicNum);
1232 // Set the bit in the bulk free bit map.
1233 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1234#ifdef HAVE_ANDROID_OS
1235 if (!run->to_be_bulk_freed_) {
1236 run->to_be_bulk_freed_ = true;
1237 runs.push_back(run);
1238 }
1239#else
1240 runs.insert(run);
1241#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001242 }
1243
1244 // Now, iterate over the affected runs and update the alloc bit map
1245 // based on the bulk free bit map (for non-thread-local runs) and
1246 // union the bulk free bit map into the thread-local free bit map
1247 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001248 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001249#ifdef HAVE_ANDROID_OS
1250 DCHECK(run->to_be_bulk_freed_);
1251 run->to_be_bulk_freed_ = false;
1252#endif
1253 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001254 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001255 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001256 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001257 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1258 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1259 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1260 if (kTraceRosAlloc) {
1261 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1262 << std::hex << reinterpret_cast<intptr_t>(run);
1263 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001264 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001265 // A thread local run will be kept as a thread local even if
1266 // it's become all free.
1267 } else {
1268 bool run_was_full = run->IsFull();
1269 run->MergeBulkFreeBitMapIntoAllocBitMap();
1270 if (kTraceRosAlloc) {
1271 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1272 << reinterpret_cast<intptr_t>(run);
1273 }
1274 // Check if the run should be moved to non_full_runs_ or
1275 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001276 auto* non_full_runs = &non_full_runs_[idx];
1277 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001278 if (run->IsAllFree()) {
1279 // It has just become completely free. Free the pages of the
1280 // run.
1281 bool run_was_current = run == current_runs_[idx];
1282 if (run_was_current) {
1283 DCHECK(full_runs->find(run) == full_runs->end());
1284 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1285 // If it was a current run, reuse it.
1286 } else if (run_was_full) {
1287 // If it was full, remove it from the full run set (debug
1288 // only.)
1289 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001290 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001291 DCHECK(pos != full_runs->end());
1292 full_runs->erase(pos);
1293 if (kTraceRosAlloc) {
1294 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1295 << reinterpret_cast<intptr_t>(run)
1296 << " from full_runs_";
1297 }
1298 DCHECK(full_runs->find(run) == full_runs->end());
1299 }
1300 } else {
1301 // If it was in a non full run set, remove it from the set.
1302 DCHECK(full_runs->find(run) == full_runs->end());
1303 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1304 non_full_runs->erase(run);
1305 if (kTraceRosAlloc) {
1306 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1307 << reinterpret_cast<intptr_t>(run)
1308 << " from non_full_runs_";
1309 }
1310 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1311 }
1312 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001313 run->ZeroHeader();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001314 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001315 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001316 }
1317 } else {
1318 // It is not completely free. If it wasn't the current run or
1319 // already in the non-full run set (i.e., it was full) insert
1320 // it into the non-full run set.
1321 if (run == current_runs_[idx]) {
1322 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1323 DCHECK(full_runs->find(run) == full_runs->end());
1324 // If it was a current run, keep it.
1325 } else if (run_was_full) {
1326 // If it was full, remove it from the full run set (debug
1327 // only) and insert into the non-full run set.
1328 DCHECK(full_runs->find(run) != full_runs->end());
1329 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1330 if (kIsDebugBuild) {
1331 full_runs->erase(run);
1332 if (kTraceRosAlloc) {
1333 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1334 << reinterpret_cast<intptr_t>(run)
1335 << " from full_runs_";
1336 }
1337 }
1338 non_full_runs->insert(run);
1339 if (kTraceRosAlloc) {
1340 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1341 << reinterpret_cast<intptr_t>(run)
1342 << " into non_full_runs_[" << std::dec << idx;
1343 }
1344 } else {
1345 // If it was not full, so leave it in the non full run set.
1346 DCHECK(full_runs->find(run) == full_runs->end());
1347 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1348 }
1349 }
1350 }
1351 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001352 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001353}
1354
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001355std::string RosAlloc::DumpPageMap() {
1356 std::ostringstream stream;
1357 stream << "RosAlloc PageMap: " << std::endl;
1358 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001359 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001360 FreePageRun* curr_fpr = NULL;
1361 size_t curr_fpr_size = 0;
1362 size_t remaining_curr_fpr_size = 0;
1363 size_t num_running_empty_pages = 0;
1364 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001365 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001366 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001367 case kPageMapReleased:
1368 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001369 case kPageMapEmpty: {
1370 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1371 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1372 // Encountered a fresh free page run.
1373 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1374 DCHECK(fpr->IsFree());
1375 DCHECK(curr_fpr == NULL);
1376 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1377 curr_fpr = fpr;
1378 curr_fpr_size = fpr->ByteSize(this);
1379 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1380 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001381 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1382 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001383 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001384 if (remaining_curr_fpr_size == 0) {
1385 // Reset at the end of the current free page run.
1386 curr_fpr = NULL;
1387 curr_fpr_size = 0;
1388 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001389 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001390 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1391 } else {
1392 // Still part of the current free page run.
1393 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1394 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1395 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1396 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1397 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001398 stream << "[" << i << "]=Empty (FPR part)"
1399 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001400 if (remaining_curr_fpr_size == 0) {
1401 // Reset at the end of the current free page run.
1402 curr_fpr = NULL;
1403 curr_fpr_size = 0;
1404 }
1405 }
1406 num_running_empty_pages++;
1407 break;
1408 }
1409 case kPageMapLargeObject: {
1410 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1411 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001412 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001413 break;
1414 }
1415 case kPageMapLargeObjectPart:
1416 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1417 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001418 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001419 break;
1420 case kPageMapRun: {
1421 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1422 num_running_empty_pages = 0;
1423 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1424 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001425 stream << "[" << i << "]=Run (start)"
1426 << " idx=" << idx
1427 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001428 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001429 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1430 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001431 break;
1432 }
1433 case kPageMapRunPart:
1434 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1435 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001436 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001437 break;
1438 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001439 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001440 break;
1441 }
1442 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001443 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001444}
1445
Andreas Gamped7576322014-10-24 22:13:45 -07001446size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001447 DCHECK_LE(base_, ptr);
1448 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001449 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1450 MutexLock mu(Thread::Current(), lock_);
1451 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001452 case kPageMapReleased:
1453 // Fall-through.
1454 case kPageMapEmpty:
1455 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1456 << std::hex << reinterpret_cast<intptr_t>(ptr);
1457 break;
1458 case kPageMapLargeObject: {
1459 size_t num_pages = 1;
1460 size_t idx = pm_idx + 1;
1461 size_t end = page_map_size_;
1462 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1463 num_pages++;
1464 idx++;
1465 }
1466 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001467 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001468 case kPageMapLargeObjectPart:
1469 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1470 << std::hex << reinterpret_cast<intptr_t>(ptr);
1471 break;
1472 case kPageMapRun:
1473 case kPageMapRunPart: {
1474 // Find the beginning of the run.
1475 while (page_map_[pm_idx] != kPageMapRun) {
1476 pm_idx--;
1477 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1478 }
1479 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1480 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1481 DCHECK_EQ(run->magic_num_, kMagicNum);
1482 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001483 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001484 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001485 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1486 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001487 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001488 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001489 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001490 break;
1491 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001492 }
1493 return 0;
1494}
1495
1496bool RosAlloc::Trim() {
1497 MutexLock mu(Thread::Current(), lock_);
1498 FreePageRun* last_free_page_run;
1499 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1500 auto it = free_page_runs_.rbegin();
1501 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1502 // Remove the last free page run, if any.
1503 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001504 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001505 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1506 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1507 free_page_runs_.erase(last_free_page_run);
1508 size_t decrement = last_free_page_run->ByteSize(this);
1509 size_t new_footprint = footprint_ - decrement;
1510 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1511 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001512 DCHECK_GE(page_map_size_, new_num_of_pages);
1513 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001514 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1515 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001516 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1517 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1518 if (madvise_size > 0) {
1519 DCHECK_ALIGNED(madvise_begin, kPageSize);
1520 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001521 if (!kMadviseZeroes) {
1522 memset(madvise_begin, 0, madvise_size);
1523 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001524 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1525 }
1526 if (madvise_begin - zero_begin) {
1527 memset(zero_begin, 0, madvise_begin - zero_begin);
1528 }
1529 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001530 free_page_run_size_map_.resize(new_num_of_pages);
1531 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001532 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001533 if (kTraceRosAlloc) {
1534 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1535 << footprint_ << " to " << new_footprint;
1536 }
1537 DCHECK_LT(new_footprint, footprint_);
1538 DCHECK_LT(new_footprint, capacity_);
1539 footprint_ = new_footprint;
1540 return true;
1541 }
1542 return false;
1543}
1544
1545void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1546 void* arg) {
1547 // Note: no need to use this to release pages as we already do so in FreePages().
1548 if (handler == NULL) {
1549 return;
1550 }
1551 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001552 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001553 size_t i = 0;
1554 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001555 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001556 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001557 case kPageMapReleased:
1558 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001559 case kPageMapEmpty: {
1560 // The start of a free page run.
1561 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1562 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1563 size_t fpr_size = fpr->ByteSize(this);
1564 DCHECK(IsAligned<kPageSize>(fpr_size));
1565 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001566 if (kIsDebugBuild) {
1567 // In the debug build, the first page of a free page run
1568 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001569 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001570 }
Ian Rogers13735952014-10-08 12:43:28 -07001571 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001572 handler(start, end, 0, arg);
1573 size_t num_pages = fpr_size / kPageSize;
1574 if (kIsDebugBuild) {
1575 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001576 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001577 }
1578 }
1579 i += fpr_size / kPageSize;
1580 DCHECK_LE(i, pm_end);
1581 break;
1582 }
1583 case kPageMapLargeObject: {
1584 // The start of a large object.
1585 size_t num_pages = 1;
1586 size_t idx = i + 1;
1587 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1588 num_pages++;
1589 idx++;
1590 }
1591 void* start = base_ + i * kPageSize;
1592 void* end = base_ + (i + num_pages) * kPageSize;
1593 size_t used_bytes = num_pages * kPageSize;
1594 handler(start, end, used_bytes, arg);
1595 if (kIsDebugBuild) {
1596 for (size_t j = i + 1; j < i + num_pages; ++j) {
1597 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1598 }
1599 }
1600 i += num_pages;
1601 DCHECK_LE(i, pm_end);
1602 break;
1603 }
1604 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001605 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001606 break;
1607 case kPageMapRun: {
1608 // The start of a run.
1609 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001610 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001611 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1612 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001613 run->InspectAllSlots(handler, arg);
1614 size_t num_pages = numOfPages[run->size_bracket_idx_];
1615 if (kIsDebugBuild) {
1616 for (size_t j = i + 1; j < i + num_pages; ++j) {
1617 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1618 }
1619 }
1620 i += num_pages;
1621 DCHECK_LE(i, pm_end);
1622 break;
1623 }
1624 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001625 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001626 break;
1627 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001628 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001629 break;
1630 }
1631 }
1632}
1633
1634size_t RosAlloc::Footprint() {
1635 MutexLock mu(Thread::Current(), lock_);
1636 return footprint_;
1637}
1638
1639size_t RosAlloc::FootprintLimit() {
1640 MutexLock mu(Thread::Current(), lock_);
1641 return capacity_;
1642}
1643
1644void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1645 MutexLock mu(Thread::Current(), lock_);
1646 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1647 // Only growing is supported here. But Trim() is supported.
1648 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001649 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001650 capacity_ = new_capacity;
1651 VLOG(heap) << "new capacity=" << capacity_;
1652 }
1653}
1654
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001655size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001656 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001657 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001658 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001659 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001660 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001661 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001662 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001663 CHECK(thread_local_run != nullptr);
1664 // Invalid means already revoked.
1665 DCHECK(thread_local_run->IsThreadLocal());
1666 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001667 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001668 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001669 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001670 // Count the number of free slots left.
1671 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1672 free_bytes += num_free_slots * bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001673 bool dont_care;
1674 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001675 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001676 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1677 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1678 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001679 RevokeRun(self, idx, thread_local_run);
1680 }
1681 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001682 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001683}
1684
1685void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1686 size_bracket_locks_[idx]->AssertHeld(self);
1687 DCHECK(run != dedicated_full_run_);
1688 if (run->IsFull()) {
1689 if (kIsDebugBuild) {
1690 full_runs_[idx].insert(run);
1691 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1692 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001693 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001694 << reinterpret_cast<intptr_t>(run)
1695 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001696 }
1697 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001698 } else if (run->IsAllFree()) {
1699 run->ZeroHeader();
1700 MutexLock mu(self, lock_);
1701 FreePages(self, run, true);
1702 } else {
1703 non_full_runs_[idx].insert(run);
1704 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1705 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001706 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001707 << reinterpret_cast<intptr_t>(run)
1708 << " into non_full_runs_[" << std::dec << idx << "]";
1709 }
1710 }
1711}
1712
1713void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1714 // Revoke the current runs which share the same idx as thread local runs.
1715 Thread* self = Thread::Current();
1716 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1717 MutexLock mu(self, *size_bracket_locks_[idx]);
1718 if (current_runs_[idx] != dedicated_full_run_) {
1719 RevokeRun(self, idx, current_runs_[idx]);
1720 current_runs_[idx] = dedicated_full_run_;
1721 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001722 }
1723}
1724
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001725size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001726 // This is called when a mutator thread won't allocate such as at
1727 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001728 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1729 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001730 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001731 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001732 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001733 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001734 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001735 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001736 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001737}
1738
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001739void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1740 if (kIsDebugBuild) {
1741 Thread* self = Thread::Current();
1742 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001743 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001744 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001745 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001746 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001747 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001748 }
1749 }
1750}
1751
1752void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1753 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001754 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001755 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1756 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001757 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1758 for (Thread* t : thread_list) {
1759 AssertThreadLocalRunsAreRevoked(t);
1760 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001761 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001762 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001763 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1764 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001765 }
1766}
1767
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001768void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001769 // bracketSizes.
1770 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1771 if (i < kNumOfSizeBrackets - 2) {
1772 bracketSizes[i] = 16 * (i + 1);
1773 } else if (i == kNumOfSizeBrackets - 2) {
1774 bracketSizes[i] = 1 * KB;
1775 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001776 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001777 bracketSizes[i] = 2 * KB;
1778 }
1779 if (kTraceRosAlloc) {
1780 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1781 }
1782 }
1783 // numOfPages.
1784 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1785 if (i < 4) {
1786 numOfPages[i] = 1;
1787 } else if (i < 8) {
Hiroshi Yamauchi3f3c6c02014-11-20 14:16:06 -08001788 numOfPages[i] = 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001789 } else if (i < 16) {
1790 numOfPages[i] = 4;
1791 } else if (i < 32) {
1792 numOfPages[i] = 8;
1793 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001794 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001795 numOfPages[i] = 16;
1796 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001797 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001798 numOfPages[i] = 32;
1799 }
1800 if (kTraceRosAlloc) {
1801 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1802 }
1803 }
1804 // Compute numOfSlots and slotOffsets.
1805 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1806 size_t bracket_size = bracketSizes[i];
1807 size_t run_size = kPageSize * numOfPages[i];
1808 size_t max_num_of_slots = run_size / bracket_size;
1809 // Compute the actual number of slots by taking the header and
1810 // alignment into account.
1811 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1812 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1813 size_t header_size = 0;
1814 size_t bulk_free_bit_map_offset = 0;
1815 size_t thread_local_free_bit_map_offset = 0;
1816 size_t num_of_slots = 0;
1817 // Search for the maximum number of slots that allows enough space
1818 // for the header (including the bit maps.)
1819 for (int s = max_num_of_slots; s >= 0; s--) {
1820 size_t tmp_slots_size = bracket_size * s;
1821 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1822 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1823 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1824 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1825 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1826 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1827 // Align up the unaligned header size. bracket_size may not be a power of two.
1828 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1829 tmp_unaligned_header_size :
1830 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1831 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1832 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1833 if (tmp_slots_size + tmp_header_size <= run_size) {
1834 // Found the right number of slots, that is, there was enough
1835 // space for the header (including the bit maps.)
1836 num_of_slots = s;
1837 header_size = tmp_header_size;
1838 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1839 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1840 break;
1841 }
1842 }
1843 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1844 // Add the padding for the alignment remainder.
1845 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001846 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001847 numOfSlots[i] = num_of_slots;
1848 headerSizes[i] = header_size;
1849 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1850 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1851 if (kTraceRosAlloc) {
1852 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1853 << ", headerSizes[" << i << "]=" << headerSizes[i]
1854 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1855 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1856 }
1857 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001858 // Fill the alloc bitmap so nobody can successfully allocate from it.
1859 if (kIsDebugBuild) {
1860 dedicated_full_run_->magic_num_ = kMagicNum;
1861 }
1862 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1863 // fail 100% of the time you attempt to allocate into the dedicated full run.
1864 dedicated_full_run_->size_bracket_idx_ = 0;
1865 dedicated_full_run_->FillAllocBitMap();
1866 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001867}
1868
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001869void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1870 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001871 if (used_bytes == 0) {
1872 return;
1873 }
1874 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1875 *bytes_allocated += used_bytes;
1876}
1877
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001878void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1879 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001880 if (used_bytes == 0) {
1881 return;
1882 }
1883 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1884 ++(*objects_allocated);
1885}
1886
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001887void RosAlloc::Verify() {
1888 Thread* self = Thread::Current();
1889 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001890 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001891 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001892 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001893 std::vector<Run*> runs;
1894 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001895 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001896 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001897 size_t i = 0;
Andreas Gampefef16ad2015-02-19 16:44:32 -08001898 size_t valgrind_modifier = running_on_valgrind_ ?
1899 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes : // Redzones before and after.
1900 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001901 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001902 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001903 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001904 case kPageMapReleased:
1905 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001906 case kPageMapEmpty: {
1907 // The start of a free page run.
1908 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001909 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001910 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1911 << "An empty page must belong to the free page run set";
1912 size_t fpr_size = fpr->ByteSize(this);
1913 CHECK(IsAligned<kPageSize>(fpr_size))
1914 << "A free page run size isn't page-aligned : " << fpr_size;
1915 size_t num_pages = fpr_size / kPageSize;
1916 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1917 << "A free page run size must be > 0 : " << fpr_size;
1918 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001919 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001920 << "A mismatch between the page map table for kPageMapEmpty "
1921 << " at page index " << j
1922 << " and the free page run size : page index range : "
1923 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1924 }
1925 i += num_pages;
1926 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1927 << std::endl << DumpPageMap();
1928 break;
1929 }
1930 case kPageMapLargeObject: {
1931 // The start of a large object.
1932 size_t num_pages = 1;
1933 size_t idx = i + 1;
1934 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1935 num_pages++;
1936 idx++;
1937 }
Andreas Gamped7576322014-10-24 22:13:45 -07001938 uint8_t* start = base_ + i * kPageSize;
1939 if (running_on_valgrind_) {
1940 start += ::art::gc::space::kDefaultValgrindRedZoneBytes;
1941 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001942 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1943 size_t obj_size = obj->SizeOf();
Andreas Gampefef16ad2015-02-19 16:44:32 -08001944 CHECK_GT(obj_size + valgrind_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001945 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Andreas Gampefef16ad2015-02-19 16:44:32 -08001946 CHECK_EQ(num_pages, RoundUp(obj_size + valgrind_modifier, kPageSize) / kPageSize)
1947 << "A rosalloc large object size " << obj_size + valgrind_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001948 << " does not match the page map table " << (num_pages * kPageSize)
1949 << std::endl << DumpPageMap();
1950 i += num_pages;
1951 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1952 << std::endl << DumpPageMap();
1953 break;
1954 }
1955 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001956 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001957 break;
1958 case kPageMapRun: {
1959 // The start of a run.
1960 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001961 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001962 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001963 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001964 size_t num_pages = numOfPages[idx];
1965 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1966 << "Run size must be > 0 : " << num_pages;
1967 for (size_t j = i + 1; j < i + num_pages; ++j) {
1968 CHECK_EQ(page_map_[j], kPageMapRunPart)
1969 << "A mismatch between the page map table for kPageMapRunPart "
1970 << " at page index " << j
1971 << " and the run size : page index range " << i << " to " << (i + num_pages)
1972 << std::endl << DumpPageMap();
1973 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001974 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001975 runs.push_back(run);
1976 i += num_pages;
1977 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1978 << std::endl << DumpPageMap();
1979 break;
1980 }
1981 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001982 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001983 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001984 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001985 break;
1986 }
1987 }
1988 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001989 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1990 for (Thread* thread : threads) {
1991 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001992 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001993 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1994 CHECK(thread_local_run != nullptr);
1995 CHECK(thread_local_run->IsThreadLocal());
1996 CHECK(thread_local_run == dedicated_full_run_ ||
1997 thread_local_run->size_bracket_idx_ == i);
1998 }
1999 }
2000 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002001 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07002002 Run* current_run = current_runs_[i];
2003 CHECK(current_run != nullptr);
2004 if (current_run != dedicated_full_run_) {
2005 // The dedicated full run is currently marked as thread local.
2006 CHECK(!current_run->IsThreadLocal());
2007 CHECK_EQ(current_run->size_bracket_idx_, i);
2008 }
2009 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002010 // Call Verify() here for the lock order.
2011 for (auto& run : runs) {
Andreas Gamped7576322014-10-24 22:13:45 -07002012 run->Verify(self, this, running_on_valgrind_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002013 }
2014}
2015
Andreas Gamped7576322014-10-24 22:13:45 -07002016void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) {
Ian Rogers5d057052014-03-12 14:32:27 -07002017 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002018 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07002019 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07002020 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002021 const size_t num_slots = numOfSlots[idx];
2022 const size_t num_vec = RoundUp(num_slots, 32) / 32;
2023 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002024 size_t bracket_size = IndexToBracketSize(idx);
2025 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07002026 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002027 << "Mismatch in the end address of the run " << Dump();
2028 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
2029 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002030 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
2031 // Make sure all the bits at the end of the run are set so that we don't allocate there.
2032 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
2033 // Ensure that the first bitmap index is valid.
2034 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002035 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002036 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002037 // If it's a thread local run, then it must be pointed to by an owner thread.
2038 bool owner_found = false;
2039 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2040 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
2041 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07002042 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002043 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07002044 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002045 if (thread_local_run == this) {
2046 CHECK(!owner_found)
2047 << "A thread local run has more than one owner thread " << Dump();
2048 CHECK_EQ(i, idx)
2049 << "A mismatching size bracket index in a thread local run " << Dump();
2050 owner_found = true;
2051 }
2052 }
2053 }
2054 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
2055 } else {
2056 // If it's not thread local, check that the thread local free bitmap is clean.
2057 CHECK(IsThreadLocalFreeBitmapClean())
2058 << "A non-thread-local run's thread local free bitmap isn't clean "
2059 << Dump();
2060 // Check if it's a current run for the size bucket.
2061 bool is_current_run = false;
2062 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
2063 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
2064 Run* current_run = rosalloc->current_runs_[i];
2065 if (idx == i) {
2066 if (this == current_run) {
2067 is_current_run = true;
2068 }
2069 } else {
2070 // If the size bucket index does not match, then it must not
2071 // be a current run.
2072 CHECK_NE(this, current_run)
2073 << "A current run points to a run with a wrong size bracket index " << Dump();
2074 }
2075 }
2076 // If it's neither a thread local or current run, then it must be
2077 // in a run set.
2078 if (!is_current_run) {
2079 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07002080 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002081 // If it's all free, it must be a free page run rather than a run.
2082 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
2083 if (!IsFull()) {
2084 // If it's not full, it must in the non-full run set.
2085 CHECK(non_full_runs.find(this) != non_full_runs.end())
2086 << "A non-full run isn't in the non-full run set " << Dump();
2087 } else {
2088 // If it's full, it must in the full run set (debug build only.)
2089 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07002090 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002091 CHECK(full_runs.find(this) != full_runs.end())
2092 << " A full run isn't in the full run set " << Dump();
2093 }
2094 }
2095 }
2096 }
2097 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002098 size_t slots = 0;
Andreas Gamped7576322014-10-24 22:13:45 -07002099 size_t valgrind_modifier = running_on_valgrind ?
2100 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :
2101 0U;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002102 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002103 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002104 uint32_t vec = alloc_bit_map_[v];
2105 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2106 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2107 for (size_t i = 0; i < end; ++i) {
2108 bool is_allocated = ((vec >> i) & 0x1) != 0;
2109 // If a thread local run, slots may be marked freed in the
2110 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002111 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002112 if (is_allocated && !is_thread_local_freed) {
Ian Rogers13735952014-10-08 12:43:28 -07002113 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Andreas Gamped7576322014-10-24 22:13:45 -07002114 if (running_on_valgrind) {
2115 slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes;
2116 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002117 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2118 size_t obj_size = obj->SizeOf();
Andreas Gamped7576322014-10-24 22:13:45 -07002119 CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002120 << "A run slot contains a large object " << Dump();
Andreas Gamped7576322014-10-24 22:13:45 -07002121 CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002122 << PrettyTypeOf(obj) << " "
Andreas Gamped7576322014-10-24 22:13:45 -07002123 << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx
2124 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002125 }
2126 }
2127 }
2128}
2129
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002130size_t RosAlloc::ReleasePages() {
2131 VLOG(heap) << "RosAlloc::ReleasePages()";
2132 DCHECK(!DoesReleaseAllPages());
2133 Thread* self = Thread::Current();
2134 size_t reclaimed_bytes = 0;
2135 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002136 // Check the page map size which might have changed due to grow/shrink.
2137 while (i < page_map_size_) {
2138 // Reading the page map without a lock is racy but the race is benign since it should only
2139 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002140 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002141 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002142 case kPageMapReleased:
2143 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002144 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002145 // This is currently the start of a free page run.
2146 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002147 MutexLock mu(self, lock_);
2148 // Check that it's still empty after we acquired the lock since another thread could have
2149 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002150 if (IsFreePage(i)) {
2151 // Free page runs can start with a released page if we coalesced a released page free
2152 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002153 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002154 // There is a race condition where FreePage can coalesce fpr with the previous
2155 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2156 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2157 // to the next page.
2158 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2159 size_t fpr_size = fpr->ByteSize(this);
2160 DCHECK(IsAligned<kPageSize>(fpr_size));
Ian Rogers13735952014-10-08 12:43:28 -07002161 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002162 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2163 size_t pages = fpr_size / kPageSize;
2164 CHECK_GT(pages, 0U) << "Infinite loop probable";
2165 i += pages;
2166 DCHECK_LE(i, page_map_size_);
2167 break;
2168 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002169 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002170 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002171 }
2172 case kPageMapLargeObject: // Fall through.
2173 case kPageMapLargeObjectPart: // Fall through.
2174 case kPageMapRun: // Fall through.
2175 case kPageMapRunPart: // Fall through.
2176 ++i;
2177 break; // Skip.
2178 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002179 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002180 break;
2181 }
2182 }
2183 return reclaimed_bytes;
2184}
2185
Ian Rogers13735952014-10-08 12:43:28 -07002186size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002187 DCHECK_ALIGNED(start, kPageSize);
2188 DCHECK_ALIGNED(end, kPageSize);
2189 DCHECK_LT(start, end);
2190 if (kIsDebugBuild) {
2191 // In the debug build, the first page of a free page run
2192 // contains a magic number for debugging. Exclude it.
2193 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002194
2195 // Single pages won't be released.
2196 if (start == end) {
2197 return 0;
2198 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002199 }
2200 if (!kMadviseZeroes) {
2201 // TODO: Do this when we resurrect the page instead.
2202 memset(start, 0, end - start);
2203 }
2204 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2205 size_t pm_idx = ToPageMapIndex(start);
2206 size_t reclaimed_bytes = 0;
2207 // Calculate reclaimed bytes and upate page map.
2208 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2209 for (; pm_idx < max_idx; ++pm_idx) {
2210 DCHECK(IsFreePage(pm_idx));
2211 if (page_map_[pm_idx] == kPageMapEmpty) {
2212 // Mark the page as released and update how many bytes we released.
2213 reclaimed_bytes += kPageSize;
2214 page_map_[pm_idx] = kPageMapReleased;
2215 }
2216 }
2217 return reclaimed_bytes;
2218}
2219
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002220void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2221 Thread* self = Thread::Current();
2222 size_t largest_continuous_free_pages = 0;
2223 WriterMutexLock wmu(self, bulk_free_lock_);
2224 MutexLock mu(self, lock_);
2225 for (FreePageRun* fpr : free_page_runs_) {
2226 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2227 fpr->ByteSize(this));
2228 }
2229 if (failed_alloc_bytes > kLargeSizeThreshold) {
2230 // Large allocation.
2231 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2232 if (required_bytes > largest_continuous_free_pages) {
2233 os << "; failed due to fragmentation (required continguous free "
2234 << required_bytes << " bytes where largest contiguous free "
2235 << largest_continuous_free_pages << " bytes)";
2236 }
2237 } else {
2238 // Non-large allocation.
2239 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2240 if (required_bytes > largest_continuous_free_pages) {
2241 os << "; failed due to fragmentation (required continguous free "
2242 << required_bytes << " bytes for a new buffer where largest contiguous free "
2243 << largest_continuous_free_pages << " bytes)";
2244 }
2245 }
2246}
2247
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002248} // namespace allocator
2249} // namespace gc
2250} // namespace art