blob: 0f2d6a9fe3c9f57e45007ef5bd19c2e9cbd5d542 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
35size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
36size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
37size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
38size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
39size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
40size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
41bool RosAlloc::initialized_ = false;
42
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080043RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080044 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080046 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
49 page_release_mode_(page_release_mode),
50 page_release_size_threshold_(page_release_size_threshold) {
Ian Rogers5d057052014-03-12 14:32:27 -070051 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
52 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080053 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080054 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 if (!initialized_) {
56 Initialize();
57 }
58 VLOG(heap) << "RosAlloc base="
59 << std::hex << (intptr_t)base_ << ", end="
60 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080061 << ", capacity=" << std::dec << capacity_
62 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070063 memset(current_runs_, 0, sizeof(current_runs_));
64 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
65 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
66 kRosAllocBracketLock);
67 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 DCHECK_EQ(footprint_, capacity_);
69 size_t num_of_pages = footprint_ / kPageSize;
70 size_t max_num_of_pages = max_capacity_ / kPageSize;
71 std::string error_msg;
72 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
73 PROT_READ | PROT_WRITE, false, &error_msg));
74 CHECK(page_map_mem_map_.get() != NULL) << "Couldn't allocate the page map : " << error_msg;
75 page_map_ = page_map_mem_map_->Begin();
76 page_map_size_ = num_of_pages;
77 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
80 if (kIsDebugBuild) {
81 free_pages->magic_num_ = kMagicNumFree;
82 }
83 free_pages->SetByteSize(this, capacity_);
84 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080085 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080087 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088 free_page_runs_.insert(free_pages);
89 if (kTraceRosAlloc) {
90 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
91 << reinterpret_cast<intptr_t>(free_pages)
92 << " into free_page_runs_";
93 }
94}
95
Mathieu Chartier661974a2014-01-09 11:23:53 -080096RosAlloc::~RosAlloc() {
97 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
98 delete size_bracket_locks_[i];
99 }
100}
101
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700102void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
103 lock_.AssertHeld(self);
104 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
105 FreePageRun* res = NULL;
106 size_t req_byte_size = num_pages * kPageSize;
107 // Find the lowest address free page run that's large enough.
108 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
109 FreePageRun* fpr = *it;
110 DCHECK(fpr->IsFree());
111 size_t fpr_byte_size = fpr->ByteSize(this);
112 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
113 if (req_byte_size <= fpr_byte_size) {
114 // Found one.
115 free_page_runs_.erase(it++);
116 if (kTraceRosAlloc) {
117 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
118 << std::hex << reinterpret_cast<intptr_t>(fpr)
119 << " from free_page_runs_";
120 }
121 if (req_byte_size < fpr_byte_size) {
122 // Split.
123 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
124 if (kIsDebugBuild) {
125 remainder->magic_num_ = kMagicNumFree;
126 }
127 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
128 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
129 // Don't need to call madvise on remainder here.
130 free_page_runs_.insert(remainder);
131 if (kTraceRosAlloc) {
132 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
133 << reinterpret_cast<intptr_t>(remainder)
134 << " into free_page_runs_";
135 }
136 fpr->SetByteSize(this, req_byte_size);
137 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
138 }
139 res = fpr;
140 break;
141 } else {
142 ++it;
143 }
144 }
145
146 // Failed to allocate pages. Grow the footprint, if possible.
147 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
148 FreePageRun* last_free_page_run = NULL;
149 size_t last_free_page_run_size;
150 auto it = free_page_runs_.rbegin();
151 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
152 // There is a free page run at the end.
153 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -0700154 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700155 last_free_page_run_size = last_free_page_run->ByteSize(this);
156 } else {
157 // There is no free page run at the end.
158 last_free_page_run_size = 0;
159 }
160 DCHECK_LT(last_free_page_run_size, req_byte_size);
161 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
162 // If we grow the heap, we can allocate it.
163 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
164 capacity_ - footprint_);
165 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
166 size_t new_footprint = footprint_ + increment;
167 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800168 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700169 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800170 page_map_size_ = new_num_of_pages;
171 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700172 free_page_run_size_map_.resize(new_num_of_pages);
173 art_heap_rosalloc_morecore(this, increment);
174 if (last_free_page_run_size > 0) {
175 // There was a free page run at the end. Expand its size.
176 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
177 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
178 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700179 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 } else {
181 // Otherwise, insert a new free page run at the end.
182 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
183 if (kIsDebugBuild) {
184 new_free_page_run->magic_num_ = kMagicNumFree;
185 }
186 new_free_page_run->SetByteSize(this, increment);
187 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
188 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700189 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700190 if (kTraceRosAlloc) {
191 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
192 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
193 << " into free_page_runs_";
194 }
195 }
196 DCHECK_LE(footprint_ + increment, capacity_);
197 if (kTraceRosAlloc) {
198 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
199 << footprint_ << " to " << new_footprint;
200 }
201 footprint_ = new_footprint;
202
203 // And retry the last free page run.
204 it = free_page_runs_.rbegin();
205 DCHECK(it != free_page_runs_.rend());
206 FreePageRun* fpr = *it;
207 if (kIsDebugBuild && last_free_page_run_size > 0) {
208 DCHECK(last_free_page_run != NULL);
209 DCHECK_EQ(last_free_page_run, fpr);
210 }
211 size_t fpr_byte_size = fpr->ByteSize(this);
212 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
213 DCHECK_LE(req_byte_size, fpr_byte_size);
214 free_page_runs_.erase(fpr);
215 if (kTraceRosAlloc) {
216 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
217 << " from free_page_runs_";
218 }
219 if (req_byte_size < fpr_byte_size) {
220 // Split if there's a remainder.
221 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
222 if (kIsDebugBuild) {
223 remainder->magic_num_ = kMagicNumFree;
224 }
225 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
226 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
227 free_page_runs_.insert(remainder);
228 if (kTraceRosAlloc) {
229 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
230 << reinterpret_cast<intptr_t>(remainder)
231 << " into free_page_runs_";
232 }
233 fpr->SetByteSize(this, req_byte_size);
234 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
235 }
236 res = fpr;
237 }
238 }
239 if (LIKELY(res != NULL)) {
240 // Update the page map.
241 size_t page_map_idx = ToPageMapIndex(res);
242 for (size_t i = 0; i < num_pages; i++) {
Ian Rogers5d057052014-03-12 14:32:27 -0700243 DCHECK_EQ(page_map_[page_map_idx + i], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700244 }
245 switch (page_map_type) {
246 case kPageMapRun:
247 page_map_[page_map_idx] = kPageMapRun;
248 for (size_t i = 1; i < num_pages; i++) {
249 page_map_[page_map_idx + i] = kPageMapRunPart;
250 }
251 break;
252 case kPageMapLargeObject:
253 page_map_[page_map_idx] = kPageMapLargeObject;
254 for (size_t i = 1; i < num_pages; i++) {
255 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
256 }
257 break;
258 default:
259 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
260 break;
261 }
262 if (kIsDebugBuild) {
263 // Clear the first page which isn't madvised away in the debug
264 // build for the magic number.
265 memset(res, 0, kPageSize);
266 }
267 if (kTraceRosAlloc) {
268 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
269 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
270 << "(" << std::dec << (num_pages * kPageSize) << ")";
271 }
272 return res;
273 }
274
275 // Fail.
276 if (kTraceRosAlloc) {
277 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
278 }
279 return nullptr;
280}
281
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700282size_t RosAlloc::FreePages(Thread* self, void* ptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700283 lock_.AssertHeld(self);
284 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700285 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286 byte pm_type = page_map_[pm_idx];
287 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
288 byte pm_part_type;
289 switch (pm_type) {
290 case kPageMapRun:
291 pm_part_type = kPageMapRunPart;
292 break;
293 case kPageMapLargeObject:
294 pm_part_type = kPageMapLargeObjectPart;
295 break;
296 default:
297 pm_part_type = kPageMapEmpty;
298 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
299 << static_cast<int>(pm_type) << ", ptr=" << std::hex
300 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700301 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700302 }
303 // Update the page map and count the number of pages.
304 size_t num_pages = 1;
305 page_map_[pm_idx] = kPageMapEmpty;
306 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800307 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 while (idx < end && page_map_[idx] == pm_part_type) {
309 page_map_[idx] = kPageMapEmpty;
310 num_pages++;
311 idx++;
312 }
313
314 if (kTraceRosAlloc) {
315 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
316 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
317 << "(" << std::dec << (num_pages * kPageSize) << ")";
318 }
319
320 // Turn it into a free run.
321 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
322 if (kIsDebugBuild) {
323 fpr->magic_num_ = kMagicNumFree;
324 }
325 fpr->SetByteSize(this, num_pages * kPageSize);
326 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
327
328 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
329 if (!free_page_runs_.empty()) {
330 // Try to coalesce in the higher address direction.
331 if (kTraceRosAlloc) {
332 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
333 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
334 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800335 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700336 }
337 auto higher_it = free_page_runs_.upper_bound(fpr);
338 if (higher_it != free_page_runs_.end()) {
339 for (auto it = higher_it; it != free_page_runs_.end(); ) {
340 FreePageRun* h = *it;
341 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
342 if (kTraceRosAlloc) {
343 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
344 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
345 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800346 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700347 }
348 if (fpr->End(this) == h->Begin()) {
349 if (kTraceRosAlloc) {
350 LOG(INFO) << "Success";
351 }
352 free_page_runs_.erase(it++);
353 if (kTraceRosAlloc) {
354 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
355 << reinterpret_cast<intptr_t>(h)
356 << " from free_page_runs_";
357 }
358 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
359 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
360 } else {
361 // Not adjacent. Stop.
362 if (kTraceRosAlloc) {
363 LOG(INFO) << "Fail";
364 }
365 break;
366 }
367 }
368 }
369 // Try to coalesce in the lower address direction.
370 auto lower_it = free_page_runs_.upper_bound(fpr);
371 if (lower_it != free_page_runs_.begin()) {
372 --lower_it;
373 for (auto it = lower_it; ; ) {
374 // We want to try to coalesce with the first element but
375 // there's no "<=" operator for the iterator.
376 bool to_exit_loop = it == free_page_runs_.begin();
377
378 FreePageRun* l = *it;
379 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
380 if (kTraceRosAlloc) {
381 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
382 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
383 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800384 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700385 }
386 if (l->End(this) == fpr->Begin()) {
387 if (kTraceRosAlloc) {
388 LOG(INFO) << "Success";
389 }
390 free_page_runs_.erase(it--);
391 if (kTraceRosAlloc) {
392 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
393 << reinterpret_cast<intptr_t>(l)
394 << " from free_page_runs_";
395 }
396 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
397 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
398 fpr = l;
399 } else {
400 // Not adjacent. Stop.
401 if (kTraceRosAlloc) {
402 LOG(INFO) << "Fail";
403 }
404 break;
405 }
406 if (to_exit_loop) {
407 break;
408 }
409 }
410 }
411 }
412
413 // Insert it.
414 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
415 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800416 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700417 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800418 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 free_page_runs_.insert(fpr);
420 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
421 if (kTraceRosAlloc) {
422 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
423 << " into free_page_runs_";
424 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700425 return num_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700426}
427
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800428void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700429 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800430 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
431 void* r;
432 {
433 MutexLock mu(self, lock_);
434 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700435 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800436 if (UNLIKELY(r == nullptr)) {
437 if (kTraceRosAlloc) {
438 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
439 }
440 return nullptr;
441 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800442 if (bytes_allocated != NULL) {
443 *bytes_allocated = num_pages * kPageSize;
444 }
445 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800446 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
447 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
448 << "(" << std::dec << (num_pages * kPageSize) << ")";
449 }
450 if (!DoesReleaseAllPages()) {
451 // If it does not release all pages, pages may not be zeroed out.
452 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800453 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700454 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800455 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800456 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700457 for (size_t i = 0; i < size; ++i) {
458 DCHECK_EQ(bytes[i], 0);
459 }
460 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800461 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700462}
463
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700464size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700465 DCHECK_LE(base_, ptr);
466 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700467 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700468 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700469 {
470 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700471 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700472 byte page_map_entry = page_map_[pm_idx];
473 if (kTraceRosAlloc) {
474 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
475 << ", page_map_entry=" << static_cast<int>(page_map_entry);
476 }
477 switch (page_map_[pm_idx]) {
478 case kPageMapEmpty:
479 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700480 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700481 case kPageMapLargeObject:
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700482 return FreePages(self, ptr) * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700483 case kPageMapLargeObjectPart:
484 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700485 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700486 case kPageMapRun:
487 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700488 size_t pi = pm_idx;
489 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
490 // Find the beginning of the run.
491 while (page_map_[pi] != kPageMapRun) {
492 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -0700493 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 }
Ian Rogers5d057052014-03-12 14:32:27 -0700495 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700496 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700497 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 break;
499 }
500 default:
501 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700502 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700503 }
504 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700505 DCHECK(run != nullptr);
506 const size_t size = IndexToBracketSize(run->size_bracket_idx_);
507 FreeFromRun(self, ptr, run);
508 return size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700509}
510
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700511size_t RosAlloc::Free(Thread* self, void* ptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700512 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700513 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700514}
515
516RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
517 Run* new_run;
518 size_t num_pages = numOfPages[idx];
519 // Get the lowest address non-full run from the binary tree.
520 Run* temp = NULL;
521 std::set<Run*>* bt = &non_full_runs_[idx];
522 std::set<Run*>::iterator found = bt->lower_bound(temp);
523 if (found != bt->end()) {
524 // If there's one, use it as the current run.
525 Run* non_full_run = *found;
526 DCHECK(non_full_run != NULL);
527 new_run = non_full_run;
528 DCHECK_EQ(new_run->is_thread_local_, 0);
529 bt->erase(found);
530 DCHECK_EQ(non_full_run->is_thread_local_, 0);
531 } else {
532 // If there's none, allocate a new run and use it as the
533 // current run.
534 {
535 MutexLock mu(self, lock_);
536 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
537 }
538 if (new_run == NULL) {
539 return NULL;
540 }
541 if (kIsDebugBuild) {
542 new_run->magic_num_ = kMagicNum;
543 }
544 new_run->size_bracket_idx_ = idx;
545 new_run->top_slot_idx_ = 0;
546 new_run->ClearBitMaps();
547 new_run->to_be_bulk_freed_ = false;
548 }
549 return new_run;
550}
551
552void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700553 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700554 size_t bracket_size;
555 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
556 DCHECK_EQ(idx, SizeToIndex(size));
557 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
558 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700559 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700560 DCHECK(size > 512 || bracket_size - size < 16);
561
562 void* slot_addr;
563
564 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
565 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700566 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700567 if (UNLIKELY(thread_local_run == NULL)) {
568 MutexLock mu(self, *size_bracket_locks_[idx]);
569 thread_local_run = RefillRun(self, idx);
570 if (UNLIKELY(thread_local_run == NULL)) {
571 return NULL;
572 }
573 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
574 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
575 thread_local_run->is_thread_local_ = 1;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700576 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700577 DCHECK(!thread_local_run->IsFull());
578 }
579
580 DCHECK(thread_local_run != NULL);
581 DCHECK_NE(thread_local_run->is_thread_local_, 0);
582 slot_addr = thread_local_run->AllocSlot();
583
584 if (UNLIKELY(slot_addr == NULL)) {
585 // The run got full. Try to free slots.
586 DCHECK(thread_local_run->IsFull());
587 MutexLock mu(self, *size_bracket_locks_[idx]);
588 bool is_all_free_after_merge;
589 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
590 // Some slot got freed. Keep it.
591 DCHECK(!thread_local_run->IsFull());
592 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
593 if (is_all_free_after_merge) {
594 // Reinstate the bump index mode if it's all free.
595 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
596 thread_local_run->top_slot_idx_ = 0;
597 }
598 } else {
599 // No slots got freed. Try to refill the thread-local run.
600 DCHECK(thread_local_run->IsFull());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700601 self->SetRosAllocRun(idx, nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700602 thread_local_run->is_thread_local_ = 0;
603 if (kIsDebugBuild) {
604 full_runs_[idx].insert(thread_local_run);
605 if (kTraceRosAlloc) {
606 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
607 << reinterpret_cast<intptr_t>(thread_local_run)
608 << " into full_runs_[" << std::dec << idx << "]";
609 }
610 }
611 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
612 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
613 thread_local_run = RefillRun(self, idx);
614 if (UNLIKELY(thread_local_run == NULL)) {
615 return NULL;
616 }
617 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
618 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
619 thread_local_run->is_thread_local_ = 1;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700620 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700621 DCHECK(!thread_local_run->IsFull());
622 }
623
624 DCHECK(thread_local_run != NULL);
625 DCHECK(!thread_local_run->IsFull());
626 DCHECK_NE(thread_local_run->is_thread_local_, 0);
627 slot_addr = thread_local_run->AllocSlot();
628 // Must succeed now with a new run.
629 DCHECK(slot_addr != NULL);
630 }
631 if (kTraceRosAlloc) {
632 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
633 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
634 << "(" << std::dec << (bracket_size) << ")";
635 }
636 } else {
637 // Use the (shared) current run.
638 MutexLock mu(self, *size_bracket_locks_[idx]);
639 Run* current_run = current_runs_[idx];
640 if (UNLIKELY(current_run == NULL)) {
641 current_run = RefillRun(self, idx);
642 if (UNLIKELY(current_run == NULL)) {
643 return NULL;
644 }
645 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
646 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
647 current_run->is_thread_local_ = 0;
648 current_runs_[idx] = current_run;
649 DCHECK(!current_run->IsFull());
650 }
651 DCHECK(current_run != NULL);
652 slot_addr = current_run->AllocSlot();
653 if (UNLIKELY(slot_addr == NULL)) {
654 // The current run got full. Try to refill it.
655 DCHECK(current_run->IsFull());
656 current_runs_[idx] = NULL;
657 if (kIsDebugBuild) {
658 // Insert it into full_runs and set the current run to NULL.
659 full_runs_[idx].insert(current_run);
660 if (kTraceRosAlloc) {
661 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
662 << " into full_runs_[" << std::dec << idx << "]";
663 }
664 }
665 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
666 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
667 current_run = RefillRun(self, idx);
668 if (UNLIKELY(current_run == NULL)) {
669 return NULL;
670 }
671 DCHECK(current_run != NULL);
672 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
673 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
674 current_run->is_thread_local_ = 0;
675 current_runs_[idx] = current_run;
676 DCHECK(!current_run->IsFull());
677 slot_addr = current_run->AllocSlot();
678 // Must succeed now with a new run.
679 DCHECK(slot_addr != NULL);
680 }
681 if (kTraceRosAlloc) {
682 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
683 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
684 << "(" << std::dec << (bracket_size) << ")";
685 }
686 }
687 if (LIKELY(bytes_allocated != NULL)) {
688 *bytes_allocated = bracket_size;
689 }
690 memset(slot_addr, 0, size);
691 return slot_addr;
692}
693
694void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700695 DCHECK_EQ(run->magic_num_, kMagicNum);
696 DCHECK_LT(run, ptr);
697 DCHECK_LT(ptr, run->End());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698 size_t idx = run->size_bracket_idx_;
699 MutexLock mu(self, *size_bracket_locks_[idx]);
700 bool run_was_full = false;
701 if (kIsDebugBuild) {
702 run_was_full = run->IsFull();
703 }
704 if (kTraceRosAlloc) {
705 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
706 }
707 if (LIKELY(run->is_thread_local_ != 0)) {
708 // It's a thread-local run. Just mark the thread-local free bit map and return.
709 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
710 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
711 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
712 run->MarkThreadLocalFreeBitMap(ptr);
713 if (kTraceRosAlloc) {
714 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
715 << reinterpret_cast<intptr_t>(run);
716 }
717 // A thread local run will be kept as a thread local even if it's become all free.
718 return;
719 }
720 // Free the slot in the run.
721 run->FreeSlot(ptr);
722 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
723 if (run->IsAllFree()) {
724 // It has just become completely free. Free the pages of this run.
725 std::set<Run*>::iterator pos = non_full_runs->find(run);
726 if (pos != non_full_runs->end()) {
727 non_full_runs->erase(pos);
728 if (kTraceRosAlloc) {
729 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
730 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
731 }
732 }
733 if (run == current_runs_[idx]) {
734 current_runs_[idx] = NULL;
735 }
736 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
737 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
738 {
739 MutexLock mu(self, lock_);
740 FreePages(self, run);
741 }
742 } else {
743 // It is not completely free. If it wasn't the current run or
744 // already in the non-full run set (i.e., it was full) insert it
745 // into the non-full run set.
746 if (run != current_runs_[idx]) {
747 hash_set<Run*, hash_run, eq_run>* full_runs =
748 kIsDebugBuild ? &full_runs_[idx] : NULL;
749 std::set<Run*>::iterator pos = non_full_runs->find(run);
750 if (pos == non_full_runs->end()) {
751 DCHECK(run_was_full);
752 DCHECK(full_runs->find(run) != full_runs->end());
753 if (kIsDebugBuild) {
754 full_runs->erase(run);
755 if (kTraceRosAlloc) {
756 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
757 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
758 }
759 }
760 non_full_runs->insert(run);
761 DCHECK(!run->IsFull());
762 if (kTraceRosAlloc) {
763 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
764 << reinterpret_cast<intptr_t>(run)
765 << " into non_full_runs_[" << std::dec << idx << "]";
766 }
767 }
768 }
769 }
770}
771
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800772std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773 std::string bit_map_str;
774 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800775 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700776 if (v != num_vec - 1) {
777 bit_map_str.append(StringPrintf("%x-", vec));
778 } else {
779 bit_map_str.append(StringPrintf("%x", vec));
780 }
781 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800782 return bit_map_str.c_str();
783}
784
785std::string RosAlloc::Run::Dump() {
786 size_t idx = size_bracket_idx_;
787 size_t num_slots = numOfSlots[idx];
788 size_t num_vec = RoundUp(num_slots, 32) / 32;
789 std::ostringstream stream;
790 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
791 << "{ magic_num=" << static_cast<int>(magic_num_)
792 << " size_bracket_idx=" << idx
793 << " is_thread_local=" << static_cast<int>(is_thread_local_)
794 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
795 << " top_slot_idx=" << top_slot_idx_
796 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
797 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
798 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
799 << " }" << std::endl;
800 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700801}
802
803void* RosAlloc::Run::AllocSlot() {
804 size_t idx = size_bracket_idx_;
805 size_t num_slots = numOfSlots[idx];
806 DCHECK_LE(top_slot_idx_, num_slots);
807 if (LIKELY(top_slot_idx_ < num_slots)) {
808 // If it's in bump index mode, grab the top slot and increment the top index.
809 size_t slot_idx = top_slot_idx_;
810 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
811 if (kTraceRosAlloc) {
812 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
813 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
814 }
815 top_slot_idx_++;
816 size_t vec_idx = slot_idx / 32;
817 size_t vec_off = slot_idx % 32;
818 uint32_t* vec = &alloc_bit_map_[vec_idx];
819 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
820 *vec |= 1 << vec_off;
821 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
822 return slot_addr;
823 }
824 // Not in bump index mode. Search the alloc bit map for an empty slot.
825 size_t num_vec = RoundUp(num_slots, 32) / 32;
826 size_t slot_idx = 0;
827 bool found_slot = false;
828 for (size_t v = 0; v < num_vec; v++) {
829 uint32_t *vecp = &alloc_bit_map_[v];
830 uint32_t ffz1 = __builtin_ffs(~*vecp);
831 uint32_t ffz;
832 // TODO: Use LIKELY or UNLIKELY here?
833 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
834 // Found an empty slot. Set the bit.
835 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
836 *vecp |= (1 << ffz);
837 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
838 slot_idx = ffz + v * 32;
839 found_slot = true;
840 break;
841 }
842 }
843 if (LIKELY(found_slot)) {
844 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
845 if (kTraceRosAlloc) {
846 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
847 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
848 }
849 return slot_addr;
850 }
851 return NULL;
852}
853
854inline void RosAlloc::Run::FreeSlot(void* ptr) {
855 DCHECK_EQ(is_thread_local_, 0);
856 byte idx = size_bracket_idx_;
857 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
858 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
859 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
860 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
Ian Rogers5d057052014-03-12 14:32:27 -0700861 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700862 size_t vec_idx = slot_idx / 32;
863 if (kIsDebugBuild) {
864 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700865 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700866 }
867 size_t vec_off = slot_idx % 32;
868 uint32_t* vec = &alloc_bit_map_[vec_idx];
869 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
870 *vec &= ~(1 << vec_off);
871 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
872 if (kTraceRosAlloc) {
873 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
874 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
875 }
876}
877
878inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
879 DCHECK_NE(is_thread_local_, 0);
880 // Free slots in the alloc bit map based on the thread local free bit map.
881 byte idx = size_bracket_idx_;
882 size_t num_slots = numOfSlots[idx];
883 size_t num_vec = RoundUp(num_slots, 32) / 32;
884 bool changed = false;
885 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800886 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887 bool is_all_free_after = true;
888 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
889 uint32_t tl_free_vec = *tl_free_vecp;
890 uint32_t vec_before = *vecp;
891 uint32_t vec_after;
892 if (tl_free_vec != 0) {
893 vec_after = vec_before & ~tl_free_vec;
894 *vecp = vec_after;
895 changed = true;
896 *tl_free_vecp = 0; // clear the thread local free bit map.
897 } else {
898 vec_after = vec_before;
899 }
900 if (vec_after != 0) {
901 is_all_free_after = false;
902 }
903 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
904 }
905 *is_all_free_after_out = is_all_free_after;
906 // Return true if there was at least a bit set in the thread-local
907 // free bit map and at least a bit in the alloc bit map changed.
908 return changed;
909}
910
911inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
912 DCHECK_EQ(is_thread_local_, 0);
913 // Free slots in the alloc bit map based on the bulk free bit map.
914 byte idx = size_bracket_idx_;
915 size_t num_slots = numOfSlots[idx];
916 size_t num_vec = RoundUp(num_slots, 32) / 32;
917 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800918 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700919 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
920 uint32_t free_vec = *free_vecp;
921 if (free_vec != 0) {
922 *vecp &= ~free_vec;
923 *free_vecp = 0; // clear the bulk free bit map.
924 }
925 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
926 }
927}
928
929inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
930 DCHECK_NE(is_thread_local_, 0);
931 // Union the thread local bit map with the bulk free bit map.
932 byte idx = size_bracket_idx_;
933 size_t num_slots = numOfSlots[idx];
934 size_t num_vec = RoundUp(num_slots, 32) / 32;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800935 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
936 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700937 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
938 uint32_t from_vec = *from_vecp;
939 if (from_vec != 0) {
940 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800941 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700942 }
943 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
944 }
945}
946
947inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
948 DCHECK_NE(is_thread_local_, 0);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800949 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700950}
951
952inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800953 MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700954}
955
956inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
957 const char* caller_name) {
958 byte idx = size_bracket_idx_;
959 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
960 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
961 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
962 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
Ian Rogers5d057052014-03-12 14:32:27 -0700963 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700964 size_t vec_idx = slot_idx / 32;
965 if (kIsDebugBuild) {
966 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700967 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700968 }
969 size_t vec_off = slot_idx % 32;
970 uint32_t* vec = &free_bit_map_base[vec_idx];
971 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
972 *vec |= 1 << vec_off;
973 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
974 if (kTraceRosAlloc) {
975 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
976 << reinterpret_cast<intptr_t>(ptr)
977 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
978 }
979}
980
981inline bool RosAlloc::Run::IsAllFree() {
982 byte idx = size_bracket_idx_;
983 size_t num_slots = numOfSlots[idx];
984 size_t num_vec = RoundUp(num_slots, 32) / 32;
985 for (size_t v = 0; v < num_vec; v++) {
986 uint32_t vec = alloc_bit_map_[v];
987 if (vec != 0) {
988 return false;
989 }
990 }
991 return true;
992}
993
994inline bool RosAlloc::Run::IsFull() {
995 byte idx = size_bracket_idx_;
996 size_t num_slots = numOfSlots[idx];
997 size_t num_vec = RoundUp(num_slots, 32) / 32;
998 size_t slots = 0;
999 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001000 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001001 uint32_t vec = alloc_bit_map_[v];
1002 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
1003 : (1 << (num_slots - slots)) - 1;
Ian Rogers5d057052014-03-12 14:32:27 -07001004 if ((num_slots - slots) >= 32) {
1005 DCHECK_EQ(mask, static_cast<uint32_t>(-1));
1006 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001007 if (vec != mask) {
1008 return false;
1009 }
1010 }
1011 return true;
1012}
1013
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001014inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
1015 byte idx = size_bracket_idx_;
1016 size_t num_slots = numOfSlots[idx];
1017 size_t num_vec = RoundUp(num_slots, 32) / 32;
1018 for (size_t v = 0; v < num_vec; v++) {
1019 uint32_t vec = BulkFreeBitMap()[v];
1020 if (vec != 0) {
1021 return false;
1022 }
1023 }
1024 return true;
1025}
1026
1027inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
1028 byte idx = size_bracket_idx_;
1029 size_t num_slots = numOfSlots[idx];
1030 size_t num_vec = RoundUp(num_slots, 32) / 32;
1031 for (size_t v = 0; v < num_vec; v++) {
1032 uint32_t vec = ThreadLocalFreeBitMap()[v];
1033 if (vec != 0) {
1034 return false;
1035 }
1036 }
1037 return true;
1038}
1039
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001040inline void RosAlloc::Run::ClearBitMaps() {
1041 byte idx = size_bracket_idx_;
1042 size_t num_slots = numOfSlots[idx];
1043 size_t num_vec = RoundUp(num_slots, 32) / 32;
1044 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
1045}
1046
1047void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1048 void* arg) {
1049 size_t idx = size_bracket_idx_;
1050 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1051 size_t num_slots = numOfSlots[idx];
1052 size_t bracket_size = IndexToBracketSize(idx);
1053 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1054 size_t num_vec = RoundUp(num_slots, 32) / 32;
1055 size_t slots = 0;
1056 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001057 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001058 uint32_t vec = alloc_bit_map_[v];
1059 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1060 for (size_t i = 0; i < end; ++i) {
1061 bool is_allocated = ((vec >> i) & 0x1) != 0;
1062 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1063 if (is_allocated) {
1064 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1065 } else {
1066 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1067 }
1068 }
1069 }
1070}
1071
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001072// If true, read the page map entries in BulkFree() without using the
1073// lock for better performance, assuming that the existence of an
1074// allocated chunk/pointer being freed in BulkFree() guarantees that
1075// the page map entry won't change. Disabled for now.
1076static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
1077
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001078size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1079 size_t freed_bytes = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001080 if (false) {
1081 // Used only to test Free() as GC uses only BulkFree().
1082 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001083 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001084 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001085 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001086 }
1087
1088 WriterMutexLock wmu(self, bulk_free_lock_);
1089
1090 // First mark slots to free in the bulk free bit map without locking the
1091 // size bracket locks. On host, hash_set is faster than vector + flag.
1092#ifdef HAVE_ANDROID_OS
1093 std::vector<Run*> runs;
1094#else
1095 hash_set<Run*, hash_run, eq_run> runs;
1096#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001097 for (size_t i = 0; i < num_ptrs; i++) {
1098 void* ptr = ptrs[i];
1099 ptrs[i] = NULL;
Ian Rogers5d057052014-03-12 14:32:27 -07001100 DCHECK_LE(base_, ptr);
1101 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001102 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1103 Run* run = NULL;
1104 if (kReadPageMapEntryWithoutLockInBulkFree) {
1105 // Read the page map entries without locking the lock.
1106 byte page_map_entry = page_map_[pm_idx];
1107 if (kTraceRosAlloc) {
1108 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1109 << std::dec << pm_idx
1110 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1111 }
1112 if (LIKELY(page_map_entry == kPageMapRun)) {
1113 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001114 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001115 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1116 size_t pi = pm_idx;
1117 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1118 // Find the beginning of the run.
1119 while (page_map_[pi] != kPageMapRun) {
1120 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -07001121 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001122 }
Ian Rogers5d057052014-03-12 14:32:27 -07001123 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001124 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001125 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001126 } else if (page_map_entry == kPageMapLargeObject) {
1127 MutexLock mu(self, lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001128 freed_bytes += FreePages(self, ptr) * kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001129 continue;
1130 } else {
1131 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1132 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001133 DCHECK(run != nullptr);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001134 // Set the bit in the bulk free bit map.
1135 run->MarkBulkFreeBitMap(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001136 freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001137#ifdef HAVE_ANDROID_OS
1138 if (!run->to_be_bulk_freed_) {
1139 run->to_be_bulk_freed_ = true;
1140 runs.push_back(run);
1141 }
1142#else
1143 runs.insert(run);
1144#endif
1145 } else {
1146 // Read the page map entries with a lock.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001147 bool free_from_run = false;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001148 {
1149 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -07001150 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001151 byte page_map_entry = page_map_[pm_idx];
1152 if (kTraceRosAlloc) {
1153 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1154 << std::dec << pm_idx
1155 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1156 }
1157 if (LIKELY(page_map_entry == kPageMapRun)) {
1158 free_from_run = true;
1159 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001160 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001161 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1162 free_from_run = true;
1163 size_t pi = pm_idx;
1164 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1165 // Find the beginning of the run.
1166 while (page_map_[pi] != kPageMapRun) {
1167 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -07001168 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001169 }
Ian Rogers5d057052014-03-12 14:32:27 -07001170 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001171 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001172 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001173 } else if (page_map_entry == kPageMapLargeObject) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001174 freed_bytes += FreePages(self, ptr) * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001175 } else {
1176 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1177 }
1178 }
1179 if (LIKELY(free_from_run)) {
1180 DCHECK(run != NULL);
1181 // Set the bit in the bulk free bit map.
1182 run->MarkBulkFreeBitMap(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001183 freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001184#ifdef HAVE_ANDROID_OS
1185 if (!run->to_be_bulk_freed_) {
1186 run->to_be_bulk_freed_ = true;
1187 runs.push_back(run);
1188 }
1189#else
1190 runs.insert(run);
1191#endif
1192 }
1193 }
1194 }
1195
1196 // Now, iterate over the affected runs and update the alloc bit map
1197 // based on the bulk free bit map (for non-thread-local runs) and
1198 // union the bulk free bit map into the thread-local free bit map
1199 // (for thread-local runs.)
1200#ifdef HAVE_ANDROID_OS
1201 typedef std::vector<Run*>::iterator It;
1202#else
1203 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1204#endif
1205 for (It it = runs.begin(); it != runs.end(); ++it) {
1206 Run* run = *it;
1207#ifdef HAVE_ANDROID_OS
1208 DCHECK(run->to_be_bulk_freed_);
1209 run->to_be_bulk_freed_ = false;
1210#endif
1211 size_t idx = run->size_bracket_idx_;
1212 MutexLock mu(self, *size_bracket_locks_[idx]);
1213 if (run->is_thread_local_ != 0) {
1214 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1215 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1216 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1217 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1218 if (kTraceRosAlloc) {
1219 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1220 << std::hex << reinterpret_cast<intptr_t>(run);
1221 }
1222 DCHECK_NE(run->is_thread_local_, 0);
1223 // A thread local run will be kept as a thread local even if
1224 // it's become all free.
1225 } else {
1226 bool run_was_full = run->IsFull();
1227 run->MergeBulkFreeBitMapIntoAllocBitMap();
1228 if (kTraceRosAlloc) {
1229 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1230 << reinterpret_cast<intptr_t>(run);
1231 }
1232 // Check if the run should be moved to non_full_runs_ or
1233 // free_page_runs_.
1234 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1235 hash_set<Run*, hash_run, eq_run>* full_runs =
1236 kIsDebugBuild ? &full_runs_[idx] : NULL;
1237 if (run->IsAllFree()) {
1238 // It has just become completely free. Free the pages of the
1239 // run.
1240 bool run_was_current = run == current_runs_[idx];
1241 if (run_was_current) {
1242 DCHECK(full_runs->find(run) == full_runs->end());
1243 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1244 // If it was a current run, reuse it.
1245 } else if (run_was_full) {
1246 // If it was full, remove it from the full run set (debug
1247 // only.)
1248 if (kIsDebugBuild) {
1249 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1250 DCHECK(pos != full_runs->end());
1251 full_runs->erase(pos);
1252 if (kTraceRosAlloc) {
1253 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1254 << reinterpret_cast<intptr_t>(run)
1255 << " from full_runs_";
1256 }
1257 DCHECK(full_runs->find(run) == full_runs->end());
1258 }
1259 } else {
1260 // If it was in a non full run set, remove it from the set.
1261 DCHECK(full_runs->find(run) == full_runs->end());
1262 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1263 non_full_runs->erase(run);
1264 if (kTraceRosAlloc) {
1265 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1266 << reinterpret_cast<intptr_t>(run)
1267 << " from non_full_runs_";
1268 }
1269 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1270 }
1271 if (!run_was_current) {
1272 MutexLock mu(self, lock_);
1273 FreePages(self, run);
1274 }
1275 } else {
1276 // It is not completely free. If it wasn't the current run or
1277 // already in the non-full run set (i.e., it was full) insert
1278 // it into the non-full run set.
1279 if (run == current_runs_[idx]) {
1280 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1281 DCHECK(full_runs->find(run) == full_runs->end());
1282 // If it was a current run, keep it.
1283 } else if (run_was_full) {
1284 // If it was full, remove it from the full run set (debug
1285 // only) and insert into the non-full run set.
1286 DCHECK(full_runs->find(run) != full_runs->end());
1287 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1288 if (kIsDebugBuild) {
1289 full_runs->erase(run);
1290 if (kTraceRosAlloc) {
1291 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1292 << reinterpret_cast<intptr_t>(run)
1293 << " from full_runs_";
1294 }
1295 }
1296 non_full_runs->insert(run);
1297 if (kTraceRosAlloc) {
1298 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1299 << reinterpret_cast<intptr_t>(run)
1300 << " into non_full_runs_[" << std::dec << idx;
1301 }
1302 } else {
1303 // If it was not full, so leave it in the non full run set.
1304 DCHECK(full_runs->find(run) == full_runs->end());
1305 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1306 }
1307 }
1308 }
1309 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001310 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001311}
1312
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001313std::string RosAlloc::DumpPageMap() {
1314 std::ostringstream stream;
1315 stream << "RosAlloc PageMap: " << std::endl;
1316 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001317 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001318 FreePageRun* curr_fpr = NULL;
1319 size_t curr_fpr_size = 0;
1320 size_t remaining_curr_fpr_size = 0;
1321 size_t num_running_empty_pages = 0;
1322 for (size_t i = 0; i < end; ++i) {
1323 byte pm = page_map_[i];
1324 switch (pm) {
1325 case kPageMapEmpty: {
1326 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1327 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1328 // Encountered a fresh free page run.
1329 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1330 DCHECK(fpr->IsFree());
1331 DCHECK(curr_fpr == NULL);
1332 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1333 curr_fpr = fpr;
1334 curr_fpr_size = fpr->ByteSize(this);
1335 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1336 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001337 stream << "[" << i << "]=Empty (FPR start)"
1338 << " fpr_size=" << curr_fpr_size
1339 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001340 if (remaining_curr_fpr_size == 0) {
1341 // Reset at the end of the current free page run.
1342 curr_fpr = NULL;
1343 curr_fpr_size = 0;
1344 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001345 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001346 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1347 } else {
1348 // Still part of the current free page run.
1349 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1350 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1351 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1352 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1353 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001354 stream << "[" << i << "]=Empty (FPR part)"
1355 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001356 if (remaining_curr_fpr_size == 0) {
1357 // Reset at the end of the current free page run.
1358 curr_fpr = NULL;
1359 curr_fpr_size = 0;
1360 }
1361 }
1362 num_running_empty_pages++;
1363 break;
1364 }
1365 case kPageMapLargeObject: {
1366 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1367 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001368 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001369 break;
1370 }
1371 case kPageMapLargeObjectPart:
1372 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1373 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001374 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001375 break;
1376 case kPageMapRun: {
1377 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1378 num_running_empty_pages = 0;
1379 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1380 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001381 stream << "[" << i << "]=Run (start)"
1382 << " idx=" << idx
1383 << " numOfPages=" << numOfPages[idx]
1384 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1385 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1386 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001387 break;
1388 }
1389 case kPageMapRunPart:
1390 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1391 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001392 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001393 break;
1394 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001395 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001396 break;
1397 }
1398 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001399 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001400}
1401
1402size_t RosAlloc::UsableSize(void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001403 DCHECK_LE(base_, ptr);
1404 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001405 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1406 MutexLock mu(Thread::Current(), lock_);
1407 switch (page_map_[pm_idx]) {
1408 case kPageMapEmpty:
1409 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1410 << reinterpret_cast<intptr_t>(ptr);
1411 break;
1412 case kPageMapLargeObject: {
1413 size_t num_pages = 1;
1414 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001415 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001416 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1417 num_pages++;
1418 idx++;
1419 }
1420 return num_pages * kPageSize;
1421 }
1422 case kPageMapLargeObjectPart:
1423 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1424 << reinterpret_cast<intptr_t>(ptr);
1425 break;
1426 case kPageMapRun:
1427 case kPageMapRunPart: {
1428 // Find the beginning of the run.
1429 while (page_map_[pm_idx] != kPageMapRun) {
1430 pm_idx--;
Ian Rogers5d057052014-03-12 14:32:27 -07001431 DCHECK_LT(pm_idx, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001432 }
Ian Rogers5d057052014-03-12 14:32:27 -07001433 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001434 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001435 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001436 size_t idx = run->size_bracket_idx_;
1437 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1438 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1439 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1440 return IndexToBracketSize(idx);
1441 }
1442 default:
1443 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1444 break;
1445 }
1446 return 0;
1447}
1448
1449bool RosAlloc::Trim() {
1450 MutexLock mu(Thread::Current(), lock_);
1451 FreePageRun* last_free_page_run;
1452 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1453 auto it = free_page_runs_.rbegin();
1454 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1455 // Remove the last free page run, if any.
1456 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -07001457 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001458 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1459 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1460 free_page_runs_.erase(last_free_page_run);
1461 size_t decrement = last_free_page_run->ByteSize(this);
1462 size_t new_footprint = footprint_ - decrement;
1463 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1464 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001465 DCHECK_GE(page_map_size_, new_num_of_pages);
1466 // Zero out the tail of the page map.
1467 byte* zero_begin = page_map_ + new_num_of_pages;
1468 byte* madvise_begin = AlignUp(zero_begin, kPageSize);
1469 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1470 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1471 if (madvise_size > 0) {
1472 DCHECK_ALIGNED(madvise_begin, kPageSize);
1473 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1474 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1475 }
1476 if (madvise_begin - zero_begin) {
1477 memset(zero_begin, 0, madvise_begin - zero_begin);
1478 }
1479 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001480 free_page_run_size_map_.resize(new_num_of_pages);
1481 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1482 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1483 if (kTraceRosAlloc) {
1484 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1485 << footprint_ << " to " << new_footprint;
1486 }
1487 DCHECK_LT(new_footprint, footprint_);
1488 DCHECK_LT(new_footprint, capacity_);
1489 footprint_ = new_footprint;
1490 return true;
1491 }
1492 return false;
1493}
1494
1495void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1496 void* arg) {
1497 // Note: no need to use this to release pages as we already do so in FreePages().
1498 if (handler == NULL) {
1499 return;
1500 }
1501 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001502 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001503 size_t i = 0;
1504 while (i < pm_end) {
1505 byte pm = page_map_[i];
1506 switch (pm) {
1507 case kPageMapEmpty: {
1508 // The start of a free page run.
1509 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1510 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1511 size_t fpr_size = fpr->ByteSize(this);
1512 DCHECK(IsAligned<kPageSize>(fpr_size));
1513 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001514 if (kIsDebugBuild) {
1515 // In the debug build, the first page of a free page run
1516 // contains a magic number for debugging. Exclude it.
1517 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1518 }
1519 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001520 handler(start, end, 0, arg);
1521 size_t num_pages = fpr_size / kPageSize;
1522 if (kIsDebugBuild) {
1523 for (size_t j = i + 1; j < i + num_pages; ++j) {
1524 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1525 }
1526 }
1527 i += fpr_size / kPageSize;
1528 DCHECK_LE(i, pm_end);
1529 break;
1530 }
1531 case kPageMapLargeObject: {
1532 // The start of a large object.
1533 size_t num_pages = 1;
1534 size_t idx = i + 1;
1535 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1536 num_pages++;
1537 idx++;
1538 }
1539 void* start = base_ + i * kPageSize;
1540 void* end = base_ + (i + num_pages) * kPageSize;
1541 size_t used_bytes = num_pages * kPageSize;
1542 handler(start, end, used_bytes, arg);
1543 if (kIsDebugBuild) {
1544 for (size_t j = i + 1; j < i + num_pages; ++j) {
1545 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1546 }
1547 }
1548 i += num_pages;
1549 DCHECK_LE(i, pm_end);
1550 break;
1551 }
1552 case kPageMapLargeObjectPart:
1553 LOG(FATAL) << "Unreachable - page map type: " << pm;
1554 break;
1555 case kPageMapRun: {
1556 // The start of a run.
1557 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001558 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001559 run->InspectAllSlots(handler, arg);
1560 size_t num_pages = numOfPages[run->size_bracket_idx_];
1561 if (kIsDebugBuild) {
1562 for (size_t j = i + 1; j < i + num_pages; ++j) {
1563 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1564 }
1565 }
1566 i += num_pages;
1567 DCHECK_LE(i, pm_end);
1568 break;
1569 }
1570 case kPageMapRunPart:
1571 LOG(FATAL) << "Unreachable - page map type: " << pm;
1572 break;
1573 default:
1574 LOG(FATAL) << "Unreachable - page map type: " << pm;
1575 break;
1576 }
1577 }
1578}
1579
1580size_t RosAlloc::Footprint() {
1581 MutexLock mu(Thread::Current(), lock_);
1582 return footprint_;
1583}
1584
1585size_t RosAlloc::FootprintLimit() {
1586 MutexLock mu(Thread::Current(), lock_);
1587 return capacity_;
1588}
1589
1590void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1591 MutexLock mu(Thread::Current(), lock_);
1592 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1593 // Only growing is supported here. But Trim() is supported.
1594 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001595 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001596 capacity_ = new_capacity;
1597 VLOG(heap) << "new capacity=" << capacity_;
1598 }
1599}
1600
1601void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1602 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001603 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1604 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001605 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1606 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001607 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001608 if (thread_local_run != NULL) {
1609 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1610 DCHECK_NE(thread_local_run->is_thread_local_, 0);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001611 thread->SetRosAllocRun(idx, nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001612 // Note the thread local run may not be full here.
1613 bool dont_care;
1614 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1615 thread_local_run->is_thread_local_ = 0;
1616 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1617 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1618 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1619 if (thread_local_run->IsFull()) {
1620 if (kIsDebugBuild) {
1621 full_runs_[idx].insert(thread_local_run);
1622 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1623 if (kTraceRosAlloc) {
1624 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1625 << reinterpret_cast<intptr_t>(thread_local_run)
1626 << " into full_runs_[" << std::dec << idx << "]";
1627 }
1628 }
1629 } else if (thread_local_run->IsAllFree()) {
1630 MutexLock mu(self, lock_);
1631 FreePages(self, thread_local_run);
1632 } else {
1633 non_full_runs_[idx].insert(thread_local_run);
1634 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1635 if (kTraceRosAlloc) {
1636 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1637 << reinterpret_cast<intptr_t>(thread_local_run)
1638 << " into non_full_runs_[" << std::dec << idx << "]";
1639 }
1640 }
1641 }
1642 }
1643}
1644
1645void RosAlloc::RevokeAllThreadLocalRuns() {
1646 // This is called when a mutator thread won't allocate such as at
1647 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001648 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1649 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001650 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1651 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1652 Thread* t = *it;
1653 RevokeThreadLocalRuns(t);
1654 }
1655}
1656
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001657void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1658 if (kIsDebugBuild) {
1659 Thread* self = Thread::Current();
1660 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1661 WriterMutexLock wmu(self, bulk_free_lock_);
1662 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1663 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001664 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001665 DCHECK(thread_local_run == nullptr);
1666 }
1667 }
1668}
1669
1670void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1671 if (kIsDebugBuild) {
1672 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1673 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
1674 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1675 for (Thread* t : thread_list) {
1676 AssertThreadLocalRunsAreRevoked(t);
1677 }
1678 }
1679}
1680
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001681void RosAlloc::Initialize() {
1682 // Check the consistency of the number of size brackets.
1683 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1684 // bracketSizes.
1685 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1686 if (i < kNumOfSizeBrackets - 2) {
1687 bracketSizes[i] = 16 * (i + 1);
1688 } else if (i == kNumOfSizeBrackets - 2) {
1689 bracketSizes[i] = 1 * KB;
1690 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001691 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001692 bracketSizes[i] = 2 * KB;
1693 }
1694 if (kTraceRosAlloc) {
1695 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1696 }
1697 }
1698 // numOfPages.
1699 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1700 if (i < 4) {
1701 numOfPages[i] = 1;
1702 } else if (i < 8) {
1703 numOfPages[i] = 2;
1704 } else if (i < 16) {
1705 numOfPages[i] = 4;
1706 } else if (i < 32) {
1707 numOfPages[i] = 8;
1708 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001709 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001710 numOfPages[i] = 16;
1711 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001712 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001713 numOfPages[i] = 32;
1714 }
1715 if (kTraceRosAlloc) {
1716 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1717 }
1718 }
1719 // Compute numOfSlots and slotOffsets.
1720 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1721 size_t bracket_size = bracketSizes[i];
1722 size_t run_size = kPageSize * numOfPages[i];
1723 size_t max_num_of_slots = run_size / bracket_size;
1724 // Compute the actual number of slots by taking the header and
1725 // alignment into account.
1726 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1727 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1728 size_t header_size = 0;
1729 size_t bulk_free_bit_map_offset = 0;
1730 size_t thread_local_free_bit_map_offset = 0;
1731 size_t num_of_slots = 0;
1732 // Search for the maximum number of slots that allows enough space
1733 // for the header (including the bit maps.)
1734 for (int s = max_num_of_slots; s >= 0; s--) {
1735 size_t tmp_slots_size = bracket_size * s;
1736 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1737 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1738 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1739 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1740 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1741 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1742 // Align up the unaligned header size. bracket_size may not be a power of two.
1743 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1744 tmp_unaligned_header_size :
1745 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1746 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1747 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1748 if (tmp_slots_size + tmp_header_size <= run_size) {
1749 // Found the right number of slots, that is, there was enough
1750 // space for the header (including the bit maps.)
1751 num_of_slots = s;
1752 header_size = tmp_header_size;
1753 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1754 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1755 break;
1756 }
1757 }
1758 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1759 // Add the padding for the alignment remainder.
1760 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001761 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001762 numOfSlots[i] = num_of_slots;
1763 headerSizes[i] = header_size;
1764 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1765 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1766 if (kTraceRosAlloc) {
1767 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1768 << ", headerSizes[" << i << "]=" << headerSizes[i]
1769 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1770 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1771 }
1772 }
1773}
1774
1775void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1776 if (used_bytes == 0) {
1777 return;
1778 }
1779 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1780 *bytes_allocated += used_bytes;
1781}
1782
1783void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1784 if (used_bytes == 0) {
1785 return;
1786 }
1787 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1788 ++(*objects_allocated);
1789}
1790
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001791void RosAlloc::Verify() {
1792 Thread* self = Thread::Current();
1793 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1794 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1795 MutexLock mu(self, *Locks::thread_list_lock_);
1796 WriterMutexLock wmu(self, bulk_free_lock_);
1797 std::vector<Run*> runs;
1798 {
1799 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001800 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001801 size_t i = 0;
1802 while (i < pm_end) {
1803 byte pm = page_map_[i];
1804 switch (pm) {
1805 case kPageMapEmpty: {
1806 // The start of a free page run.
1807 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001808 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001809 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1810 << "An empty page must belong to the free page run set";
1811 size_t fpr_size = fpr->ByteSize(this);
1812 CHECK(IsAligned<kPageSize>(fpr_size))
1813 << "A free page run size isn't page-aligned : " << fpr_size;
1814 size_t num_pages = fpr_size / kPageSize;
1815 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1816 << "A free page run size must be > 0 : " << fpr_size;
1817 for (size_t j = i + 1; j < i + num_pages; ++j) {
1818 CHECK_EQ(page_map_[j], kPageMapEmpty)
1819 << "A mismatch between the page map table for kPageMapEmpty "
1820 << " at page index " << j
1821 << " and the free page run size : page index range : "
1822 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1823 }
1824 i += num_pages;
1825 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1826 << std::endl << DumpPageMap();
1827 break;
1828 }
1829 case kPageMapLargeObject: {
1830 // The start of a large object.
1831 size_t num_pages = 1;
1832 size_t idx = i + 1;
1833 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1834 num_pages++;
1835 idx++;
1836 }
1837 void* start = base_ + i * kPageSize;
1838 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1839 size_t obj_size = obj->SizeOf();
Ian Rogers5d057052014-03-12 14:32:27 -07001840 CHECK_GT(obj_size, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001841 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1842 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1843 << "A rosalloc large object size " << obj_size
1844 << " does not match the page map table " << (num_pages * kPageSize)
1845 << std::endl << DumpPageMap();
1846 i += num_pages;
1847 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1848 << std::endl << DumpPageMap();
1849 break;
1850 }
1851 case kPageMapLargeObjectPart:
1852 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1853 break;
1854 case kPageMapRun: {
1855 // The start of a run.
1856 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001857 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001858 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001859 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001860 size_t num_pages = numOfPages[idx];
1861 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1862 << "Run size must be > 0 : " << num_pages;
1863 for (size_t j = i + 1; j < i + num_pages; ++j) {
1864 CHECK_EQ(page_map_[j], kPageMapRunPart)
1865 << "A mismatch between the page map table for kPageMapRunPart "
1866 << " at page index " << j
1867 << " and the run size : page index range " << i << " to " << (i + num_pages)
1868 << std::endl << DumpPageMap();
1869 }
1870 runs.push_back(run);
1871 i += num_pages;
1872 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1873 << std::endl << DumpPageMap();
1874 break;
1875 }
1876 case kPageMapRunPart:
1877 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1878 break;
1879 default:
1880 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1881 break;
1882 }
1883 }
1884 }
1885
1886 // Call Verify() here for the lock order.
1887 for (auto& run : runs) {
1888 run->Verify(self, this);
1889 }
1890}
1891
1892void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
Ian Rogers5d057052014-03-12 14:32:27 -07001893 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001894 size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001895 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001896 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1897 size_t num_slots = numOfSlots[idx];
1898 size_t bracket_size = IndexToBracketSize(idx);
1899 CHECK_EQ(slot_base + num_slots * bracket_size,
1900 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1901 << "Mismatch in the end address of the run " << Dump();
1902 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1903 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
1904 // Check the bump index mode, if it's on.
1905 if (top_slot_idx_ < num_slots) {
1906 // If the bump index mode is on (top_slot_idx_ < num_slots), then
1907 // all of the slots after the top index must be free.
1908 for (size_t i = top_slot_idx_; i < num_slots; ++i) {
1909 size_t vec_idx = i / 32;
1910 size_t vec_off = i % 32;
1911 uint32_t vec = alloc_bit_map_[vec_idx];
1912 CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
1913 << "A slot >= top_slot_idx_ isn't free " << Dump();
1914 }
1915 } else {
1916 CHECK_EQ(top_slot_idx_, num_slots)
1917 << "If the bump index mode is off, the top index == the number of slots "
1918 << Dump();
1919 }
1920 // Check the thread local runs, the current runs, and the run sets.
1921 if (is_thread_local_) {
1922 // If it's a thread local run, then it must be pointed to by an owner thread.
1923 bool owner_found = false;
1924 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1925 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1926 Thread* thread = *it;
1927 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1928 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001929 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001930 if (thread_local_run == this) {
1931 CHECK(!owner_found)
1932 << "A thread local run has more than one owner thread " << Dump();
1933 CHECK_EQ(i, idx)
1934 << "A mismatching size bracket index in a thread local run " << Dump();
1935 owner_found = true;
1936 }
1937 }
1938 }
1939 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1940 } else {
1941 // If it's not thread local, check that the thread local free bitmap is clean.
1942 CHECK(IsThreadLocalFreeBitmapClean())
1943 << "A non-thread-local run's thread local free bitmap isn't clean "
1944 << Dump();
1945 // Check if it's a current run for the size bucket.
1946 bool is_current_run = false;
1947 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1948 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1949 Run* current_run = rosalloc->current_runs_[i];
1950 if (idx == i) {
1951 if (this == current_run) {
1952 is_current_run = true;
1953 }
1954 } else {
1955 // If the size bucket index does not match, then it must not
1956 // be a current run.
1957 CHECK_NE(this, current_run)
1958 << "A current run points to a run with a wrong size bracket index " << Dump();
1959 }
1960 }
1961 // If it's neither a thread local or current run, then it must be
1962 // in a run set.
1963 if (!is_current_run) {
1964 MutexLock mu(self, rosalloc->lock_);
1965 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1966 // If it's all free, it must be a free page run rather than a run.
1967 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1968 if (!IsFull()) {
1969 // If it's not full, it must in the non-full run set.
1970 CHECK(non_full_runs.find(this) != non_full_runs.end())
1971 << "A non-full run isn't in the non-full run set " << Dump();
1972 } else {
1973 // If it's full, it must in the full run set (debug build only.)
1974 if (kIsDebugBuild) {
1975 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
1976 CHECK(full_runs.find(this) != full_runs.end())
1977 << " A full run isn't in the full run set " << Dump();
1978 }
1979 }
1980 }
1981 }
1982 // Check each slot.
1983 size_t num_vec = RoundUp(num_slots, 32) / 32;
1984 size_t slots = 0;
1985 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001986 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001987 uint32_t vec = alloc_bit_map_[v];
1988 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
1989 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1990 for (size_t i = 0; i < end; ++i) {
1991 bool is_allocated = ((vec >> i) & 0x1) != 0;
1992 // If a thread local run, slots may be marked freed in the
1993 // thread local free bitmap.
1994 bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
1995 if (is_allocated && !is_thread_local_freed) {
1996 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1997 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1998 size_t obj_size = obj->SizeOf();
1999 CHECK_LE(obj_size, kLargeSizeThreshold)
2000 << "A run slot contains a large object " << Dump();
2001 CHECK_EQ(SizeToIndex(obj_size), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002002 << PrettyTypeOf(obj) << " "
2003 << "obj_size=" << obj_size << ", idx=" << idx << " "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002004 << "A run slot contains an object with wrong size " << Dump();
2005 }
2006 }
2007 }
2008}
2009
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002010size_t RosAlloc::ReleasePages() {
2011 VLOG(heap) << "RosAlloc::ReleasePages()";
2012 DCHECK(!DoesReleaseAllPages());
2013 Thread* self = Thread::Current();
2014 size_t reclaimed_bytes = 0;
2015 size_t i = 0;
2016 while (true) {
2017 MutexLock mu(self, lock_);
2018 // Check the page map size which might have changed due to grow/shrink.
2019 size_t pm_end = page_map_size_;
2020 if (i >= pm_end) {
2021 // Reached the end.
2022 break;
2023 }
2024 byte pm = page_map_[i];
2025 switch (pm) {
2026 case kPageMapEmpty: {
2027 // The start of a free page run. Release pages.
2028 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
2029 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
2030 size_t fpr_size = fpr->ByteSize(this);
2031 DCHECK(IsAligned<kPageSize>(fpr_size));
2032 byte* start = reinterpret_cast<byte*>(fpr);
2033 if (kIsDebugBuild) {
2034 // In the debug build, the first page of a free page run
2035 // contains a magic number for debugging. Exclude it.
2036 start = reinterpret_cast<byte*>(fpr) + kPageSize;
2037 }
2038 byte* end = reinterpret_cast<byte*>(fpr) + fpr_size;
2039 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2040 reclaimed_bytes += fpr_size;
2041 size_t num_pages = fpr_size / kPageSize;
2042 if (kIsDebugBuild) {
2043 for (size_t j = i + 1; j < i + num_pages; ++j) {
2044 DCHECK_EQ(page_map_[j], kPageMapEmpty);
2045 }
2046 }
2047 i += num_pages;
2048 DCHECK_LE(i, pm_end);
2049 break;
2050 }
2051 case kPageMapLargeObject: // Fall through.
2052 case kPageMapLargeObjectPart: // Fall through.
2053 case kPageMapRun: // Fall through.
2054 case kPageMapRunPart: // Fall through.
2055 ++i;
2056 break; // Skip.
2057 default:
2058 LOG(FATAL) << "Unreachable - page map type: " << pm;
2059 break;
2060 }
2061 }
2062 return reclaimed_bytes;
2063}
2064
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002065} // namespace allocator
2066} // namespace gc
2067} // namespace art