blob: 469b0985fdffa8bf5d5af008737765606209ebbd [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080018#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070019#include "thread_list.h"
20#include "rosalloc.h"
21
22#include <map>
23#include <list>
24#include <vector>
25
26namespace art {
27namespace gc {
28namespace allocator {
29
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070030extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
31
32size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
33size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
34size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
35size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
36size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
37size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
38bool RosAlloc::initialized_ = false;
39
40RosAlloc::RosAlloc(void* base, size_t capacity)
41 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
42 capacity_(capacity),
43 lock_("rosalloc global lock", kRosAllocGlobalLock),
44 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock) {
45 DCHECK(RoundUp(capacity, kPageSize) == capacity);
46 if (!initialized_) {
47 Initialize();
48 }
49 VLOG(heap) << "RosAlloc base="
50 << std::hex << (intptr_t)base_ << ", end="
51 << std::hex << (intptr_t)(base_ + capacity_)
52 << ", capacity=" << std::dec << capacity_;
53 memset(current_runs_, 0, sizeof(current_runs_));
54 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
55 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
56 kRosAllocBracketLock);
57 }
58 size_t num_of_pages = capacity_ / kPageSize;
59 page_map_.resize(num_of_pages);
60 free_page_run_size_map_.resize(num_of_pages);
61
62 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
63 if (kIsDebugBuild) {
64 free_pages->magic_num_ = kMagicNumFree;
65 }
66 free_pages->SetByteSize(this, capacity_);
67 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
68 free_pages->ReleasePages(this);
69 free_page_runs_.insert(free_pages);
70 if (kTraceRosAlloc) {
71 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
72 << reinterpret_cast<intptr_t>(free_pages)
73 << " into free_page_runs_";
74 }
75}
76
77void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
78 lock_.AssertHeld(self);
79 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
80 FreePageRun* res = NULL;
81 size_t req_byte_size = num_pages * kPageSize;
82 // Find the lowest address free page run that's large enough.
83 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
84 FreePageRun* fpr = *it;
85 DCHECK(fpr->IsFree());
86 size_t fpr_byte_size = fpr->ByteSize(this);
87 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
88 if (req_byte_size <= fpr_byte_size) {
89 // Found one.
90 free_page_runs_.erase(it++);
91 if (kTraceRosAlloc) {
92 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
93 << std::hex << reinterpret_cast<intptr_t>(fpr)
94 << " from free_page_runs_";
95 }
96 if (req_byte_size < fpr_byte_size) {
97 // Split.
98 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
99 if (kIsDebugBuild) {
100 remainder->magic_num_ = kMagicNumFree;
101 }
102 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
103 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
104 // Don't need to call madvise on remainder here.
105 free_page_runs_.insert(remainder);
106 if (kTraceRosAlloc) {
107 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
108 << reinterpret_cast<intptr_t>(remainder)
109 << " into free_page_runs_";
110 }
111 fpr->SetByteSize(this, req_byte_size);
112 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
113 }
114 res = fpr;
115 break;
116 } else {
117 ++it;
118 }
119 }
120
121 // Failed to allocate pages. Grow the footprint, if possible.
122 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
123 FreePageRun* last_free_page_run = NULL;
124 size_t last_free_page_run_size;
125 auto it = free_page_runs_.rbegin();
126 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
127 // There is a free page run at the end.
128 DCHECK(last_free_page_run->IsFree());
129 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
130 last_free_page_run_size = last_free_page_run->ByteSize(this);
131 } else {
132 // There is no free page run at the end.
133 last_free_page_run_size = 0;
134 }
135 DCHECK_LT(last_free_page_run_size, req_byte_size);
136 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
137 // If we grow the heap, we can allocate it.
138 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
139 capacity_ - footprint_);
140 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
141 size_t new_footprint = footprint_ + increment;
142 size_t new_num_of_pages = new_footprint / kPageSize;
143 DCHECK_LT(page_map_.size(), new_num_of_pages);
144 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
145 page_map_.resize(new_num_of_pages);
146 free_page_run_size_map_.resize(new_num_of_pages);
147 art_heap_rosalloc_morecore(this, increment);
148 if (last_free_page_run_size > 0) {
149 // There was a free page run at the end. Expand its size.
150 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
151 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
152 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
153 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
154 } else {
155 // Otherwise, insert a new free page run at the end.
156 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
157 if (kIsDebugBuild) {
158 new_free_page_run->magic_num_ = kMagicNumFree;
159 }
160 new_free_page_run->SetByteSize(this, increment);
161 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
162 free_page_runs_.insert(new_free_page_run);
163 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
164 if (kTraceRosAlloc) {
165 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
166 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
167 << " into free_page_runs_";
168 }
169 }
170 DCHECK_LE(footprint_ + increment, capacity_);
171 if (kTraceRosAlloc) {
172 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
173 << footprint_ << " to " << new_footprint;
174 }
175 footprint_ = new_footprint;
176
177 // And retry the last free page run.
178 it = free_page_runs_.rbegin();
179 DCHECK(it != free_page_runs_.rend());
180 FreePageRun* fpr = *it;
181 if (kIsDebugBuild && last_free_page_run_size > 0) {
182 DCHECK(last_free_page_run != NULL);
183 DCHECK_EQ(last_free_page_run, fpr);
184 }
185 size_t fpr_byte_size = fpr->ByteSize(this);
186 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
187 DCHECK_LE(req_byte_size, fpr_byte_size);
188 free_page_runs_.erase(fpr);
189 if (kTraceRosAlloc) {
190 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
191 << " from free_page_runs_";
192 }
193 if (req_byte_size < fpr_byte_size) {
194 // Split if there's a remainder.
195 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
196 if (kIsDebugBuild) {
197 remainder->magic_num_ = kMagicNumFree;
198 }
199 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
200 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
201 free_page_runs_.insert(remainder);
202 if (kTraceRosAlloc) {
203 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
204 << reinterpret_cast<intptr_t>(remainder)
205 << " into free_page_runs_";
206 }
207 fpr->SetByteSize(this, req_byte_size);
208 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
209 }
210 res = fpr;
211 }
212 }
213 if (LIKELY(res != NULL)) {
214 // Update the page map.
215 size_t page_map_idx = ToPageMapIndex(res);
216 for (size_t i = 0; i < num_pages; i++) {
217 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
218 }
219 switch (page_map_type) {
220 case kPageMapRun:
221 page_map_[page_map_idx] = kPageMapRun;
222 for (size_t i = 1; i < num_pages; i++) {
223 page_map_[page_map_idx + i] = kPageMapRunPart;
224 }
225 break;
226 case kPageMapLargeObject:
227 page_map_[page_map_idx] = kPageMapLargeObject;
228 for (size_t i = 1; i < num_pages; i++) {
229 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
230 }
231 break;
232 default:
233 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
234 break;
235 }
236 if (kIsDebugBuild) {
237 // Clear the first page which isn't madvised away in the debug
238 // build for the magic number.
239 memset(res, 0, kPageSize);
240 }
241 if (kTraceRosAlloc) {
242 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
243 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
244 << "(" << std::dec << (num_pages * kPageSize) << ")";
245 }
246 return res;
247 }
248
249 // Fail.
250 if (kTraceRosAlloc) {
251 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
252 }
253 return nullptr;
254}
255
256void RosAlloc::FreePages(Thread* self, void* ptr) {
257 lock_.AssertHeld(self);
258 size_t pm_idx = ToPageMapIndex(ptr);
259 DCHECK(pm_idx < page_map_.size());
260 byte pm_type = page_map_[pm_idx];
261 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
262 byte pm_part_type;
263 switch (pm_type) {
264 case kPageMapRun:
265 pm_part_type = kPageMapRunPart;
266 break;
267 case kPageMapLargeObject:
268 pm_part_type = kPageMapLargeObjectPart;
269 break;
270 default:
271 pm_part_type = kPageMapEmpty;
272 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
273 << static_cast<int>(pm_type) << ", ptr=" << std::hex
274 << reinterpret_cast<intptr_t>(ptr);
275 return;
276 }
277 // Update the page map and count the number of pages.
278 size_t num_pages = 1;
279 page_map_[pm_idx] = kPageMapEmpty;
280 size_t idx = pm_idx + 1;
281 size_t end = page_map_.size();
282 while (idx < end && page_map_[idx] == pm_part_type) {
283 page_map_[idx] = kPageMapEmpty;
284 num_pages++;
285 idx++;
286 }
287
288 if (kTraceRosAlloc) {
289 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
290 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
291 << "(" << std::dec << (num_pages * kPageSize) << ")";
292 }
293
294 // Turn it into a free run.
295 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
296 if (kIsDebugBuild) {
297 fpr->magic_num_ = kMagicNumFree;
298 }
299 fpr->SetByteSize(this, num_pages * kPageSize);
300 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
301
302 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
303 if (!free_page_runs_.empty()) {
304 // Try to coalesce in the higher address direction.
305 if (kTraceRosAlloc) {
306 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
307 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
308 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
309 << (fpr->End(this) == End() ? page_map_.size() : ToPageMapIndex(fpr->End(this))) << "]";
310 }
311 auto higher_it = free_page_runs_.upper_bound(fpr);
312 if (higher_it != free_page_runs_.end()) {
313 for (auto it = higher_it; it != free_page_runs_.end(); ) {
314 FreePageRun* h = *it;
315 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
316 if (kTraceRosAlloc) {
317 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
318 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
319 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
320 << (h->End(this) == End() ? page_map_.size() : ToPageMapIndex(h->End(this))) << "]";
321 }
322 if (fpr->End(this) == h->Begin()) {
323 if (kTraceRosAlloc) {
324 LOG(INFO) << "Success";
325 }
326 free_page_runs_.erase(it++);
327 if (kTraceRosAlloc) {
328 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
329 << reinterpret_cast<intptr_t>(h)
330 << " from free_page_runs_";
331 }
332 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
333 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
334 } else {
335 // Not adjacent. Stop.
336 if (kTraceRosAlloc) {
337 LOG(INFO) << "Fail";
338 }
339 break;
340 }
341 }
342 }
343 // Try to coalesce in the lower address direction.
344 auto lower_it = free_page_runs_.upper_bound(fpr);
345 if (lower_it != free_page_runs_.begin()) {
346 --lower_it;
347 for (auto it = lower_it; ; ) {
348 // We want to try to coalesce with the first element but
349 // there's no "<=" operator for the iterator.
350 bool to_exit_loop = it == free_page_runs_.begin();
351
352 FreePageRun* l = *it;
353 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
354 if (kTraceRosAlloc) {
355 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
356 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
357 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
358 << (l->End(this) == End() ? page_map_.size() : ToPageMapIndex(l->End(this))) << "]";
359 }
360 if (l->End(this) == fpr->Begin()) {
361 if (kTraceRosAlloc) {
362 LOG(INFO) << "Success";
363 }
364 free_page_runs_.erase(it--);
365 if (kTraceRosAlloc) {
366 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
367 << reinterpret_cast<intptr_t>(l)
368 << " from free_page_runs_";
369 }
370 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
371 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
372 fpr = l;
373 } else {
374 // Not adjacent. Stop.
375 if (kTraceRosAlloc) {
376 LOG(INFO) << "Fail";
377 }
378 break;
379 }
380 if (to_exit_loop) {
381 break;
382 }
383 }
384 }
385 }
386
387 // Insert it.
388 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
389 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
390 fpr->ReleasePages(this);
391 free_page_runs_.insert(fpr);
392 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
393 if (kTraceRosAlloc) {
394 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
395 << " into free_page_runs_";
396 }
397}
398
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800399void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
400 DCHECK(size > kLargeSizeThreshold);
401 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
402 void* r;
403 {
404 MutexLock mu(self, lock_);
405 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700406 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800407 if (bytes_allocated != NULL) {
408 *bytes_allocated = num_pages * kPageSize;
409 }
410 if (kTraceRosAlloc) {
411 if (r != NULL) {
412 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
413 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
414 << "(" << std::dec << (num_pages * kPageSize) << ")";
415 } else {
416 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
417 }
418 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 // Check if the returned memory is really all zero.
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800420 if (kCheckZeroMemory && r != NULL) {
421 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700422 for (size_t i = 0; i < size; ++i) {
423 DCHECK_EQ(bytes[i], 0);
424 }
425 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800426 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700427}
428
429void RosAlloc::FreeInternal(Thread* self, void* ptr) {
430 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
431 size_t pm_idx = RoundDownToPageMapIndex(ptr);
432 bool free_from_run = false;
433 Run* run = NULL;
434 {
435 MutexLock mu(self, lock_);
436 DCHECK(pm_idx < page_map_.size());
437 byte page_map_entry = page_map_[pm_idx];
438 if (kTraceRosAlloc) {
439 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
440 << ", page_map_entry=" << static_cast<int>(page_map_entry);
441 }
442 switch (page_map_[pm_idx]) {
443 case kPageMapEmpty:
444 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
445 return;
446 case kPageMapLargeObject:
447 FreePages(self, ptr);
448 return;
449 case kPageMapLargeObjectPart:
450 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
451 return;
452 case kPageMapRun:
453 case kPageMapRunPart: {
454 free_from_run = true;
455 size_t pi = pm_idx;
456 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
457 // Find the beginning of the run.
458 while (page_map_[pi] != kPageMapRun) {
459 pi--;
460 DCHECK(pi < capacity_ / kPageSize);
461 }
462 DCHECK(page_map_[pi] == kPageMapRun);
463 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
464 DCHECK(run->magic_num_ == kMagicNum);
465 break;
466 }
467 default:
468 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
469 return;
470 }
471 }
472 if (LIKELY(free_from_run)) {
473 DCHECK(run != NULL);
474 FreeFromRun(self, ptr, run);
475 }
476}
477
478void RosAlloc::Free(Thread* self, void* ptr) {
479 ReaderMutexLock rmu(self, bulk_free_lock_);
480 FreeInternal(self, ptr);
481}
482
483RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
484 Run* new_run;
485 size_t num_pages = numOfPages[idx];
486 // Get the lowest address non-full run from the binary tree.
487 Run* temp = NULL;
488 std::set<Run*>* bt = &non_full_runs_[idx];
489 std::set<Run*>::iterator found = bt->lower_bound(temp);
490 if (found != bt->end()) {
491 // If there's one, use it as the current run.
492 Run* non_full_run = *found;
493 DCHECK(non_full_run != NULL);
494 new_run = non_full_run;
495 DCHECK_EQ(new_run->is_thread_local_, 0);
496 bt->erase(found);
497 DCHECK_EQ(non_full_run->is_thread_local_, 0);
498 } else {
499 // If there's none, allocate a new run and use it as the
500 // current run.
501 {
502 MutexLock mu(self, lock_);
503 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
504 }
505 if (new_run == NULL) {
506 return NULL;
507 }
508 if (kIsDebugBuild) {
509 new_run->magic_num_ = kMagicNum;
510 }
511 new_run->size_bracket_idx_ = idx;
512 new_run->top_slot_idx_ = 0;
513 new_run->ClearBitMaps();
514 new_run->to_be_bulk_freed_ = false;
515 }
516 return new_run;
517}
518
519void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
520 DCHECK(size <= kLargeSizeThreshold);
521 size_t bracket_size;
522 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
523 DCHECK_EQ(idx, SizeToIndex(size));
524 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
525 DCHECK_EQ(bracket_size, bracketSizes[idx]);
526 DCHECK(size <= bracket_size);
527 DCHECK(size > 512 || bracket_size - size < 16);
528
529 void* slot_addr;
530
531 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
532 // Use a thread-local run.
533 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
534 if (UNLIKELY(thread_local_run == NULL)) {
535 MutexLock mu(self, *size_bracket_locks_[idx]);
536 thread_local_run = RefillRun(self, idx);
537 if (UNLIKELY(thread_local_run == NULL)) {
538 return NULL;
539 }
540 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
541 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
542 thread_local_run->is_thread_local_ = 1;
543 self->rosalloc_runs_[idx] = thread_local_run;
544 DCHECK(!thread_local_run->IsFull());
545 }
546
547 DCHECK(thread_local_run != NULL);
548 DCHECK_NE(thread_local_run->is_thread_local_, 0);
549 slot_addr = thread_local_run->AllocSlot();
550
551 if (UNLIKELY(slot_addr == NULL)) {
552 // The run got full. Try to free slots.
553 DCHECK(thread_local_run->IsFull());
554 MutexLock mu(self, *size_bracket_locks_[idx]);
555 bool is_all_free_after_merge;
556 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
557 // Some slot got freed. Keep it.
558 DCHECK(!thread_local_run->IsFull());
559 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
560 if (is_all_free_after_merge) {
561 // Reinstate the bump index mode if it's all free.
562 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
563 thread_local_run->top_slot_idx_ = 0;
564 }
565 } else {
566 // No slots got freed. Try to refill the thread-local run.
567 DCHECK(thread_local_run->IsFull());
568 self->rosalloc_runs_[idx] = NULL;
569 thread_local_run->is_thread_local_ = 0;
570 if (kIsDebugBuild) {
571 full_runs_[idx].insert(thread_local_run);
572 if (kTraceRosAlloc) {
573 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
574 << reinterpret_cast<intptr_t>(thread_local_run)
575 << " into full_runs_[" << std::dec << idx << "]";
576 }
577 }
578 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
579 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
580 thread_local_run = RefillRun(self, idx);
581 if (UNLIKELY(thread_local_run == NULL)) {
582 return NULL;
583 }
584 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
585 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
586 thread_local_run->is_thread_local_ = 1;
587 self->rosalloc_runs_[idx] = thread_local_run;
588 DCHECK(!thread_local_run->IsFull());
589 }
590
591 DCHECK(thread_local_run != NULL);
592 DCHECK(!thread_local_run->IsFull());
593 DCHECK_NE(thread_local_run->is_thread_local_, 0);
594 slot_addr = thread_local_run->AllocSlot();
595 // Must succeed now with a new run.
596 DCHECK(slot_addr != NULL);
597 }
598 if (kTraceRosAlloc) {
599 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
600 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
601 << "(" << std::dec << (bracket_size) << ")";
602 }
603 } else {
604 // Use the (shared) current run.
605 MutexLock mu(self, *size_bracket_locks_[idx]);
606 Run* current_run = current_runs_[idx];
607 if (UNLIKELY(current_run == NULL)) {
608 current_run = RefillRun(self, idx);
609 if (UNLIKELY(current_run == NULL)) {
610 return NULL;
611 }
612 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
613 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
614 current_run->is_thread_local_ = 0;
615 current_runs_[idx] = current_run;
616 DCHECK(!current_run->IsFull());
617 }
618 DCHECK(current_run != NULL);
619 slot_addr = current_run->AllocSlot();
620 if (UNLIKELY(slot_addr == NULL)) {
621 // The current run got full. Try to refill it.
622 DCHECK(current_run->IsFull());
623 current_runs_[idx] = NULL;
624 if (kIsDebugBuild) {
625 // Insert it into full_runs and set the current run to NULL.
626 full_runs_[idx].insert(current_run);
627 if (kTraceRosAlloc) {
628 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
629 << " into full_runs_[" << std::dec << idx << "]";
630 }
631 }
632 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
633 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
634 current_run = RefillRun(self, idx);
635 if (UNLIKELY(current_run == NULL)) {
636 return NULL;
637 }
638 DCHECK(current_run != NULL);
639 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
640 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
641 current_run->is_thread_local_ = 0;
642 current_runs_[idx] = current_run;
643 DCHECK(!current_run->IsFull());
644 slot_addr = current_run->AllocSlot();
645 // Must succeed now with a new run.
646 DCHECK(slot_addr != NULL);
647 }
648 if (kTraceRosAlloc) {
649 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
650 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
651 << "(" << std::dec << (bracket_size) << ")";
652 }
653 }
654 if (LIKELY(bytes_allocated != NULL)) {
655 *bytes_allocated = bracket_size;
656 }
657 memset(slot_addr, 0, size);
658 return slot_addr;
659}
660
661void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
662 DCHECK(run->magic_num_ == kMagicNum);
663 DCHECK(run < ptr && ptr < run->End());
664 size_t idx = run->size_bracket_idx_;
665 MutexLock mu(self, *size_bracket_locks_[idx]);
666 bool run_was_full = false;
667 if (kIsDebugBuild) {
668 run_was_full = run->IsFull();
669 }
670 if (kTraceRosAlloc) {
671 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
672 }
673 if (LIKELY(run->is_thread_local_ != 0)) {
674 // It's a thread-local run. Just mark the thread-local free bit map and return.
675 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
676 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
677 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
678 run->MarkThreadLocalFreeBitMap(ptr);
679 if (kTraceRosAlloc) {
680 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
681 << reinterpret_cast<intptr_t>(run);
682 }
683 // A thread local run will be kept as a thread local even if it's become all free.
684 return;
685 }
686 // Free the slot in the run.
687 run->FreeSlot(ptr);
688 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
689 if (run->IsAllFree()) {
690 // It has just become completely free. Free the pages of this run.
691 std::set<Run*>::iterator pos = non_full_runs->find(run);
692 if (pos != non_full_runs->end()) {
693 non_full_runs->erase(pos);
694 if (kTraceRosAlloc) {
695 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
696 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
697 }
698 }
699 if (run == current_runs_[idx]) {
700 current_runs_[idx] = NULL;
701 }
702 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
703 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
704 {
705 MutexLock mu(self, lock_);
706 FreePages(self, run);
707 }
708 } else {
709 // It is not completely free. If it wasn't the current run or
710 // already in the non-full run set (i.e., it was full) insert it
711 // into the non-full run set.
712 if (run != current_runs_[idx]) {
713 hash_set<Run*, hash_run, eq_run>* full_runs =
714 kIsDebugBuild ? &full_runs_[idx] : NULL;
715 std::set<Run*>::iterator pos = non_full_runs->find(run);
716 if (pos == non_full_runs->end()) {
717 DCHECK(run_was_full);
718 DCHECK(full_runs->find(run) != full_runs->end());
719 if (kIsDebugBuild) {
720 full_runs->erase(run);
721 if (kTraceRosAlloc) {
722 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
723 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
724 }
725 }
726 non_full_runs->insert(run);
727 DCHECK(!run->IsFull());
728 if (kTraceRosAlloc) {
729 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
730 << reinterpret_cast<intptr_t>(run)
731 << " into non_full_runs_[" << std::dec << idx << "]";
732 }
733 }
734 }
735 }
736}
737
738void RosAlloc::Run::Dump() {
739 size_t idx = size_bracket_idx_;
740 size_t num_slots = numOfSlots[idx];
741 size_t num_vec = RoundUp(num_slots, 32) / 32;
742 std::string bit_map_str;
743 for (size_t v = 0; v < num_vec; v++) {
744 uint32_t vec = alloc_bit_map_[v];
745 if (v != num_vec - 1) {
746 bit_map_str.append(StringPrintf("%x-", vec));
747 } else {
748 bit_map_str.append(StringPrintf("%x", vec));
749 }
750 }
751 LOG(INFO) << "Run : " << std::hex << reinterpret_cast<intptr_t>(this)
752 << std::dec << ", idx=" << idx << ", bit_map=" << bit_map_str;
753}
754
755void* RosAlloc::Run::AllocSlot() {
756 size_t idx = size_bracket_idx_;
757 size_t num_slots = numOfSlots[idx];
758 DCHECK_LE(top_slot_idx_, num_slots);
759 if (LIKELY(top_slot_idx_ < num_slots)) {
760 // If it's in bump index mode, grab the top slot and increment the top index.
761 size_t slot_idx = top_slot_idx_;
762 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
763 if (kTraceRosAlloc) {
764 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
765 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
766 }
767 top_slot_idx_++;
768 size_t vec_idx = slot_idx / 32;
769 size_t vec_off = slot_idx % 32;
770 uint32_t* vec = &alloc_bit_map_[vec_idx];
771 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
772 *vec |= 1 << vec_off;
773 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
774 return slot_addr;
775 }
776 // Not in bump index mode. Search the alloc bit map for an empty slot.
777 size_t num_vec = RoundUp(num_slots, 32) / 32;
778 size_t slot_idx = 0;
779 bool found_slot = false;
780 for (size_t v = 0; v < num_vec; v++) {
781 uint32_t *vecp = &alloc_bit_map_[v];
782 uint32_t ffz1 = __builtin_ffs(~*vecp);
783 uint32_t ffz;
784 // TODO: Use LIKELY or UNLIKELY here?
785 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
786 // Found an empty slot. Set the bit.
787 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
788 *vecp |= (1 << ffz);
789 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
790 slot_idx = ffz + v * 32;
791 found_slot = true;
792 break;
793 }
794 }
795 if (LIKELY(found_slot)) {
796 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
797 if (kTraceRosAlloc) {
798 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
799 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
800 }
801 return slot_addr;
802 }
803 return NULL;
804}
805
806inline void RosAlloc::Run::FreeSlot(void* ptr) {
807 DCHECK_EQ(is_thread_local_, 0);
808 byte idx = size_bracket_idx_;
809 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
810 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
811 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
812 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
813 DCHECK(slot_idx < numOfSlots[idx]);
814 size_t vec_idx = slot_idx / 32;
815 if (kIsDebugBuild) {
816 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
817 DCHECK(vec_idx < num_vec);
818 }
819 size_t vec_off = slot_idx % 32;
820 uint32_t* vec = &alloc_bit_map_[vec_idx];
821 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
822 *vec &= ~(1 << vec_off);
823 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
824 if (kTraceRosAlloc) {
825 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
826 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
827 }
828}
829
830inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
831 DCHECK_NE(is_thread_local_, 0);
832 // Free slots in the alloc bit map based on the thread local free bit map.
833 byte idx = size_bracket_idx_;
834 size_t num_slots = numOfSlots[idx];
835 size_t num_vec = RoundUp(num_slots, 32) / 32;
836 bool changed = false;
837 uint32_t* vecp = &alloc_bit_map_[0];
838 uint32_t* tl_free_vecp = &thread_local_free_bit_map()[0];
839 bool is_all_free_after = true;
840 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
841 uint32_t tl_free_vec = *tl_free_vecp;
842 uint32_t vec_before = *vecp;
843 uint32_t vec_after;
844 if (tl_free_vec != 0) {
845 vec_after = vec_before & ~tl_free_vec;
846 *vecp = vec_after;
847 changed = true;
848 *tl_free_vecp = 0; // clear the thread local free bit map.
849 } else {
850 vec_after = vec_before;
851 }
852 if (vec_after != 0) {
853 is_all_free_after = false;
854 }
855 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
856 }
857 *is_all_free_after_out = is_all_free_after;
858 // Return true if there was at least a bit set in the thread-local
859 // free bit map and at least a bit in the alloc bit map changed.
860 return changed;
861}
862
863inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
864 DCHECK_EQ(is_thread_local_, 0);
865 // Free slots in the alloc bit map based on the bulk free bit map.
866 byte idx = size_bracket_idx_;
867 size_t num_slots = numOfSlots[idx];
868 size_t num_vec = RoundUp(num_slots, 32) / 32;
869 uint32_t* vecp = &alloc_bit_map_[0];
870 uint32_t* free_vecp = &bulk_free_bit_map()[0];
871 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
872 uint32_t free_vec = *free_vecp;
873 if (free_vec != 0) {
874 *vecp &= ~free_vec;
875 *free_vecp = 0; // clear the bulk free bit map.
876 }
877 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
878 }
879}
880
881inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
882 DCHECK_NE(is_thread_local_, 0);
883 // Union the thread local bit map with the bulk free bit map.
884 byte idx = size_bracket_idx_;
885 size_t num_slots = numOfSlots[idx];
886 size_t num_vec = RoundUp(num_slots, 32) / 32;
887 uint32_t* to_vecp = &thread_local_free_bit_map()[0];
888 uint32_t* from_vecp = &bulk_free_bit_map()[0];
889 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
890 uint32_t from_vec = *from_vecp;
891 if (from_vec != 0) {
892 *to_vecp |= from_vec;
893 *from_vecp = 0; // clear the from free bit map.
894 }
895 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
896 }
897}
898
899inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
900 DCHECK_NE(is_thread_local_, 0);
901 MarkFreeBitMapShared(ptr, thread_local_free_bit_map(), "MarkThreadLocalFreeBitMap");
902}
903
904inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
905 MarkFreeBitMapShared(ptr, bulk_free_bit_map(), "MarkFreeBitMap");
906}
907
908inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
909 const char* caller_name) {
910 byte idx = size_bracket_idx_;
911 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
912 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
913 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
914 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
915 DCHECK(slot_idx < numOfSlots[idx]);
916 size_t vec_idx = slot_idx / 32;
917 if (kIsDebugBuild) {
918 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
919 DCHECK(vec_idx < num_vec);
920 }
921 size_t vec_off = slot_idx % 32;
922 uint32_t* vec = &free_bit_map_base[vec_idx];
923 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
924 *vec |= 1 << vec_off;
925 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
926 if (kTraceRosAlloc) {
927 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
928 << reinterpret_cast<intptr_t>(ptr)
929 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
930 }
931}
932
933inline bool RosAlloc::Run::IsAllFree() {
934 byte idx = size_bracket_idx_;
935 size_t num_slots = numOfSlots[idx];
936 size_t num_vec = RoundUp(num_slots, 32) / 32;
937 for (size_t v = 0; v < num_vec; v++) {
938 uint32_t vec = alloc_bit_map_[v];
939 if (vec != 0) {
940 return false;
941 }
942 }
943 return true;
944}
945
946inline bool RosAlloc::Run::IsFull() {
947 byte idx = size_bracket_idx_;
948 size_t num_slots = numOfSlots[idx];
949 size_t num_vec = RoundUp(num_slots, 32) / 32;
950 size_t slots = 0;
951 for (size_t v = 0; v < num_vec; v++, slots += 32) {
952 DCHECK(num_slots >= slots);
953 uint32_t vec = alloc_bit_map_[v];
954 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
955 : (1 << (num_slots - slots)) - 1;
956 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
957 if (vec != mask) {
958 return false;
959 }
960 }
961 return true;
962}
963
964inline void RosAlloc::Run::ClearBitMaps() {
965 byte idx = size_bracket_idx_;
966 size_t num_slots = numOfSlots[idx];
967 size_t num_vec = RoundUp(num_slots, 32) / 32;
968 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
969}
970
971void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
972 void* arg) {
973 size_t idx = size_bracket_idx_;
974 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
975 size_t num_slots = numOfSlots[idx];
976 size_t bracket_size = IndexToBracketSize(idx);
977 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
978 size_t num_vec = RoundUp(num_slots, 32) / 32;
979 size_t slots = 0;
980 for (size_t v = 0; v < num_vec; v++, slots += 32) {
981 DCHECK(num_slots >= slots);
982 uint32_t vec = alloc_bit_map_[v];
983 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
984 for (size_t i = 0; i < end; ++i) {
985 bool is_allocated = ((vec >> i) & 0x1) != 0;
986 byte* slot_addr = slot_base + (slots + i) * bracket_size;
987 if (is_allocated) {
988 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
989 } else {
990 handler(slot_addr, slot_addr + bracket_size, 0, arg);
991 }
992 }
993 }
994}
995
996void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
997 if (false) {
998 // Used only to test Free() as GC uses only BulkFree().
999 for (size_t i = 0; i < num_ptrs; ++i) {
1000 FreeInternal(self, ptrs[i]);
1001 }
1002 return;
1003 }
1004
1005 WriterMutexLock wmu(self, bulk_free_lock_);
1006
1007 // First mark slots to free in the bulk free bit map without locking the
1008 // size bracket locks. On host, hash_set is faster than vector + flag.
1009#ifdef HAVE_ANDROID_OS
1010 std::vector<Run*> runs;
1011#else
1012 hash_set<Run*, hash_run, eq_run> runs;
1013#endif
1014 {
1015 for (size_t i = 0; i < num_ptrs; i++) {
1016 void* ptr = ptrs[i];
1017 ptrs[i] = NULL;
1018 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1019 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1020 bool free_from_run = false;
1021 Run* run = NULL;
1022 {
1023 MutexLock mu(self, lock_);
1024 DCHECK(pm_idx < page_map_.size());
1025 byte page_map_entry = page_map_[pm_idx];
1026 if (kTraceRosAlloc) {
1027 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1028 << std::dec << pm_idx
1029 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1030 }
1031 if (LIKELY(page_map_entry == kPageMapRun)) {
1032 free_from_run = true;
1033 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1034 DCHECK(run->magic_num_ == kMagicNum);
1035 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1036 free_from_run = true;
1037 size_t pi = pm_idx;
1038 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1039 // Find the beginning of the run.
1040 while (page_map_[pi] != kPageMapRun) {
1041 pi--;
1042 DCHECK(pi < capacity_ / kPageSize);
1043 }
1044 DCHECK(page_map_[pi] == kPageMapRun);
1045 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1046 DCHECK(run->magic_num_ == kMagicNum);
1047 } else if (page_map_entry == kPageMapLargeObject) {
1048 FreePages(self, ptr);
1049 } else {
1050 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1051 }
1052 }
1053 if (LIKELY(free_from_run)) {
1054 DCHECK(run != NULL);
1055 // Set the bit in the bulk free bit map.
1056 run->MarkBulkFreeBitMap(ptr);
1057#ifdef HAVE_ANDROID_OS
1058 if (!run->to_be_bulk_freed_) {
1059 run->to_be_bulk_freed_ = true;
1060 runs.push_back(run);
1061 }
1062#else
1063 runs.insert(run);
1064#endif
1065 }
1066 }
1067 }
1068
1069 // Now, iterate over the affected runs and update the alloc bit map
1070 // based on the bulk free bit map (for non-thread-local runs) and
1071 // union the bulk free bit map into the thread-local free bit map
1072 // (for thread-local runs.)
1073#ifdef HAVE_ANDROID_OS
1074 typedef std::vector<Run*>::iterator It;
1075#else
1076 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1077#endif
1078 for (It it = runs.begin(); it != runs.end(); ++it) {
1079 Run* run = *it;
1080#ifdef HAVE_ANDROID_OS
1081 DCHECK(run->to_be_bulk_freed_);
1082 run->to_be_bulk_freed_ = false;
1083#endif
1084 size_t idx = run->size_bracket_idx_;
1085 MutexLock mu(self, *size_bracket_locks_[idx]);
1086 if (run->is_thread_local_ != 0) {
1087 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1088 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1089 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1090 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1091 if (kTraceRosAlloc) {
1092 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1093 << std::hex << reinterpret_cast<intptr_t>(run);
1094 }
1095 DCHECK_NE(run->is_thread_local_, 0);
1096 // A thread local run will be kept as a thread local even if
1097 // it's become all free.
1098 } else {
1099 bool run_was_full = run->IsFull();
1100 run->MergeBulkFreeBitMapIntoAllocBitMap();
1101 if (kTraceRosAlloc) {
1102 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1103 << reinterpret_cast<intptr_t>(run);
1104 }
1105 // Check if the run should be moved to non_full_runs_ or
1106 // free_page_runs_.
1107 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1108 hash_set<Run*, hash_run, eq_run>* full_runs =
1109 kIsDebugBuild ? &full_runs_[idx] : NULL;
1110 if (run->IsAllFree()) {
1111 // It has just become completely free. Free the pages of the
1112 // run.
1113 bool run_was_current = run == current_runs_[idx];
1114 if (run_was_current) {
1115 DCHECK(full_runs->find(run) == full_runs->end());
1116 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1117 // If it was a current run, reuse it.
1118 } else if (run_was_full) {
1119 // If it was full, remove it from the full run set (debug
1120 // only.)
1121 if (kIsDebugBuild) {
1122 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1123 DCHECK(pos != full_runs->end());
1124 full_runs->erase(pos);
1125 if (kTraceRosAlloc) {
1126 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1127 << reinterpret_cast<intptr_t>(run)
1128 << " from full_runs_";
1129 }
1130 DCHECK(full_runs->find(run) == full_runs->end());
1131 }
1132 } else {
1133 // If it was in a non full run set, remove it from the set.
1134 DCHECK(full_runs->find(run) == full_runs->end());
1135 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1136 non_full_runs->erase(run);
1137 if (kTraceRosAlloc) {
1138 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1139 << reinterpret_cast<intptr_t>(run)
1140 << " from non_full_runs_";
1141 }
1142 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1143 }
1144 if (!run_was_current) {
1145 MutexLock mu(self, lock_);
1146 FreePages(self, run);
1147 }
1148 } else {
1149 // It is not completely free. If it wasn't the current run or
1150 // already in the non-full run set (i.e., it was full) insert
1151 // it into the non-full run set.
1152 if (run == current_runs_[idx]) {
1153 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1154 DCHECK(full_runs->find(run) == full_runs->end());
1155 // If it was a current run, keep it.
1156 } else if (run_was_full) {
1157 // If it was full, remove it from the full run set (debug
1158 // only) and insert into the non-full run set.
1159 DCHECK(full_runs->find(run) != full_runs->end());
1160 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1161 if (kIsDebugBuild) {
1162 full_runs->erase(run);
1163 if (kTraceRosAlloc) {
1164 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1165 << reinterpret_cast<intptr_t>(run)
1166 << " from full_runs_";
1167 }
1168 }
1169 non_full_runs->insert(run);
1170 if (kTraceRosAlloc) {
1171 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1172 << reinterpret_cast<intptr_t>(run)
1173 << " into non_full_runs_[" << std::dec << idx;
1174 }
1175 } else {
1176 // If it was not full, so leave it in the non full run set.
1177 DCHECK(full_runs->find(run) == full_runs->end());
1178 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1179 }
1180 }
1181 }
1182 }
1183}
1184
1185void RosAlloc::DumpPageMap(Thread* self) {
1186 MutexLock mu(self, lock_);
1187 size_t end = page_map_.size();
1188 FreePageRun* curr_fpr = NULL;
1189 size_t curr_fpr_size = 0;
1190 size_t remaining_curr_fpr_size = 0;
1191 size_t num_running_empty_pages = 0;
1192 for (size_t i = 0; i < end; ++i) {
1193 byte pm = page_map_[i];
1194 switch (pm) {
1195 case kPageMapEmpty: {
1196 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1197 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1198 // Encountered a fresh free page run.
1199 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1200 DCHECK(fpr->IsFree());
1201 DCHECK(curr_fpr == NULL);
1202 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1203 curr_fpr = fpr;
1204 curr_fpr_size = fpr->ByteSize(this);
1205 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1206 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
1207 LOG(INFO) << "[" << i << "]=Empty (FPR start)"
1208 << " fpr_size=" << curr_fpr_size
1209 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1210 if (remaining_curr_fpr_size == 0) {
1211 // Reset at the end of the current free page run.
1212 curr_fpr = NULL;
1213 curr_fpr_size = 0;
1214 }
1215 LOG(INFO) << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr);
1216 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1217 } else {
1218 // Still part of the current free page run.
1219 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1220 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1221 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1222 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1223 remaining_curr_fpr_size -= kPageSize;
1224 LOG(INFO) << "[" << i << "]=Empty (FPR part)"
1225 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1226 if (remaining_curr_fpr_size == 0) {
1227 // Reset at the end of the current free page run.
1228 curr_fpr = NULL;
1229 curr_fpr_size = 0;
1230 }
1231 }
1232 num_running_empty_pages++;
1233 break;
1234 }
1235 case kPageMapLargeObject: {
1236 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1237 num_running_empty_pages = 0;
1238 LOG(INFO) << "[" << i << "]=Large (start)";
1239 break;
1240 }
1241 case kPageMapLargeObjectPart:
1242 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1243 num_running_empty_pages = 0;
1244 LOG(INFO) << "[" << i << "]=Large (part)";
1245 break;
1246 case kPageMapRun: {
1247 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1248 num_running_empty_pages = 0;
1249 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1250 size_t idx = run->size_bracket_idx_;
1251 LOG(INFO) << "[" << i << "]=Run (start)"
1252 << " idx=" << idx
1253 << " numOfPages=" << numOfPages[idx]
1254 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1255 << " is_all_free=" << (run->IsAllFree() ? 1 : 0);
1256 break;
1257 }
1258 case kPageMapRunPart:
1259 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1260 num_running_empty_pages = 0;
1261 LOG(INFO) << "[" << i << "]=Run (part)";
1262 break;
1263 default:
1264 LOG(FATAL) << "Unreachable - page map type: " << pm;
1265 break;
1266 }
1267 }
1268}
1269
1270size_t RosAlloc::UsableSize(void* ptr) {
1271 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1272 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1273 MutexLock mu(Thread::Current(), lock_);
1274 switch (page_map_[pm_idx]) {
1275 case kPageMapEmpty:
1276 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1277 << reinterpret_cast<intptr_t>(ptr);
1278 break;
1279 case kPageMapLargeObject: {
1280 size_t num_pages = 1;
1281 size_t idx = pm_idx + 1;
1282 size_t end = page_map_.size();
1283 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1284 num_pages++;
1285 idx++;
1286 }
1287 return num_pages * kPageSize;
1288 }
1289 case kPageMapLargeObjectPart:
1290 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1291 << reinterpret_cast<intptr_t>(ptr);
1292 break;
1293 case kPageMapRun:
1294 case kPageMapRunPart: {
1295 // Find the beginning of the run.
1296 while (page_map_[pm_idx] != kPageMapRun) {
1297 pm_idx--;
1298 DCHECK(pm_idx < capacity_ / kPageSize);
1299 }
1300 DCHECK(page_map_[pm_idx] == kPageMapRun);
1301 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1302 DCHECK(run->magic_num_ == kMagicNum);
1303 size_t idx = run->size_bracket_idx_;
1304 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1305 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1306 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1307 return IndexToBracketSize(idx);
1308 }
1309 default:
1310 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1311 break;
1312 }
1313 return 0;
1314}
1315
1316bool RosAlloc::Trim() {
1317 MutexLock mu(Thread::Current(), lock_);
1318 FreePageRun* last_free_page_run;
1319 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1320 auto it = free_page_runs_.rbegin();
1321 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1322 // Remove the last free page run, if any.
1323 DCHECK(last_free_page_run->IsFree());
1324 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1325 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1326 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1327 free_page_runs_.erase(last_free_page_run);
1328 size_t decrement = last_free_page_run->ByteSize(this);
1329 size_t new_footprint = footprint_ - decrement;
1330 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1331 size_t new_num_of_pages = new_footprint / kPageSize;
1332 DCHECK_GE(page_map_.size(), new_num_of_pages);
1333 page_map_.resize(new_num_of_pages);
1334 DCHECK_EQ(page_map_.size(), new_num_of_pages);
1335 free_page_run_size_map_.resize(new_num_of_pages);
1336 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1337 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1338 if (kTraceRosAlloc) {
1339 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1340 << footprint_ << " to " << new_footprint;
1341 }
1342 DCHECK_LT(new_footprint, footprint_);
1343 DCHECK_LT(new_footprint, capacity_);
1344 footprint_ = new_footprint;
1345 return true;
1346 }
1347 return false;
1348}
1349
1350void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1351 void* arg) {
1352 // Note: no need to use this to release pages as we already do so in FreePages().
1353 if (handler == NULL) {
1354 return;
1355 }
1356 MutexLock mu(Thread::Current(), lock_);
1357 size_t pm_end = page_map_.size();
1358 size_t i = 0;
1359 while (i < pm_end) {
1360 byte pm = page_map_[i];
1361 switch (pm) {
1362 case kPageMapEmpty: {
1363 // The start of a free page run.
1364 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1365 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1366 size_t fpr_size = fpr->ByteSize(this);
1367 DCHECK(IsAligned<kPageSize>(fpr_size));
1368 void* start = fpr;
1369 void* end = reinterpret_cast<byte*>(start) + fpr_size;
1370 handler(start, end, 0, arg);
1371 size_t num_pages = fpr_size / kPageSize;
1372 if (kIsDebugBuild) {
1373 for (size_t j = i + 1; j < i + num_pages; ++j) {
1374 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1375 }
1376 }
1377 i += fpr_size / kPageSize;
1378 DCHECK_LE(i, pm_end);
1379 break;
1380 }
1381 case kPageMapLargeObject: {
1382 // The start of a large object.
1383 size_t num_pages = 1;
1384 size_t idx = i + 1;
1385 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1386 num_pages++;
1387 idx++;
1388 }
1389 void* start = base_ + i * kPageSize;
1390 void* end = base_ + (i + num_pages) * kPageSize;
1391 size_t used_bytes = num_pages * kPageSize;
1392 handler(start, end, used_bytes, arg);
1393 if (kIsDebugBuild) {
1394 for (size_t j = i + 1; j < i + num_pages; ++j) {
1395 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1396 }
1397 }
1398 i += num_pages;
1399 DCHECK_LE(i, pm_end);
1400 break;
1401 }
1402 case kPageMapLargeObjectPart:
1403 LOG(FATAL) << "Unreachable - page map type: " << pm;
1404 break;
1405 case kPageMapRun: {
1406 // The start of a run.
1407 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1408 DCHECK(run->magic_num_ == kMagicNum);
1409 run->InspectAllSlots(handler, arg);
1410 size_t num_pages = numOfPages[run->size_bracket_idx_];
1411 if (kIsDebugBuild) {
1412 for (size_t j = i + 1; j < i + num_pages; ++j) {
1413 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1414 }
1415 }
1416 i += num_pages;
1417 DCHECK_LE(i, pm_end);
1418 break;
1419 }
1420 case kPageMapRunPart:
1421 LOG(FATAL) << "Unreachable - page map type: " << pm;
1422 break;
1423 default:
1424 LOG(FATAL) << "Unreachable - page map type: " << pm;
1425 break;
1426 }
1427 }
1428}
1429
1430size_t RosAlloc::Footprint() {
1431 MutexLock mu(Thread::Current(), lock_);
1432 return footprint_;
1433}
1434
1435size_t RosAlloc::FootprintLimit() {
1436 MutexLock mu(Thread::Current(), lock_);
1437 return capacity_;
1438}
1439
1440void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1441 MutexLock mu(Thread::Current(), lock_);
1442 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1443 // Only growing is supported here. But Trim() is supported.
1444 if (capacity_ < new_capacity) {
1445 capacity_ = new_capacity;
1446 VLOG(heap) << "new capacity=" << capacity_;
1447 }
1448}
1449
1450void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1451 Thread* self = Thread::Current();
1452 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1453 MutexLock mu(self, *size_bracket_locks_[idx]);
1454 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1455 if (thread_local_run != NULL) {
1456 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1457 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1458 thread->rosalloc_runs_[idx] = NULL;
1459 // Note the thread local run may not be full here.
1460 bool dont_care;
1461 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1462 thread_local_run->is_thread_local_ = 0;
1463 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1464 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1465 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1466 if (thread_local_run->IsFull()) {
1467 if (kIsDebugBuild) {
1468 full_runs_[idx].insert(thread_local_run);
1469 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1470 if (kTraceRosAlloc) {
1471 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1472 << reinterpret_cast<intptr_t>(thread_local_run)
1473 << " into full_runs_[" << std::dec << idx << "]";
1474 }
1475 }
1476 } else if (thread_local_run->IsAllFree()) {
1477 MutexLock mu(self, lock_);
1478 FreePages(self, thread_local_run);
1479 } else {
1480 non_full_runs_[idx].insert(thread_local_run);
1481 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1482 if (kTraceRosAlloc) {
1483 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1484 << reinterpret_cast<intptr_t>(thread_local_run)
1485 << " into non_full_runs_[" << std::dec << idx << "]";
1486 }
1487 }
1488 }
1489 }
1490}
1491
1492void RosAlloc::RevokeAllThreadLocalRuns() {
1493 // This is called when a mutator thread won't allocate such as at
1494 // the Zygote creation time or during the GC pause.
1495 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1496 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1497 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1498 Thread* t = *it;
1499 RevokeThreadLocalRuns(t);
1500 }
1501}
1502
1503void RosAlloc::Initialize() {
1504 // Check the consistency of the number of size brackets.
1505 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1506 // bracketSizes.
1507 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1508 if (i < kNumOfSizeBrackets - 2) {
1509 bracketSizes[i] = 16 * (i + 1);
1510 } else if (i == kNumOfSizeBrackets - 2) {
1511 bracketSizes[i] = 1 * KB;
1512 } else {
1513 DCHECK(i == kNumOfSizeBrackets - 1);
1514 bracketSizes[i] = 2 * KB;
1515 }
1516 if (kTraceRosAlloc) {
1517 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1518 }
1519 }
1520 // numOfPages.
1521 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1522 if (i < 4) {
1523 numOfPages[i] = 1;
1524 } else if (i < 8) {
1525 numOfPages[i] = 2;
1526 } else if (i < 16) {
1527 numOfPages[i] = 4;
1528 } else if (i < 32) {
1529 numOfPages[i] = 8;
1530 } else if (i == 32) {
1531 DCHECK(i = kNumOfSizeBrackets - 2);
1532 numOfPages[i] = 16;
1533 } else {
1534 DCHECK(i = kNumOfSizeBrackets - 1);
1535 numOfPages[i] = 32;
1536 }
1537 if (kTraceRosAlloc) {
1538 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1539 }
1540 }
1541 // Compute numOfSlots and slotOffsets.
1542 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1543 size_t bracket_size = bracketSizes[i];
1544 size_t run_size = kPageSize * numOfPages[i];
1545 size_t max_num_of_slots = run_size / bracket_size;
1546 // Compute the actual number of slots by taking the header and
1547 // alignment into account.
1548 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1549 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1550 size_t header_size = 0;
1551 size_t bulk_free_bit_map_offset = 0;
1552 size_t thread_local_free_bit_map_offset = 0;
1553 size_t num_of_slots = 0;
1554 // Search for the maximum number of slots that allows enough space
1555 // for the header (including the bit maps.)
1556 for (int s = max_num_of_slots; s >= 0; s--) {
1557 size_t tmp_slots_size = bracket_size * s;
1558 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1559 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1560 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1561 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1562 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1563 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1564 // Align up the unaligned header size. bracket_size may not be a power of two.
1565 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1566 tmp_unaligned_header_size :
1567 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1568 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1569 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1570 if (tmp_slots_size + tmp_header_size <= run_size) {
1571 // Found the right number of slots, that is, there was enough
1572 // space for the header (including the bit maps.)
1573 num_of_slots = s;
1574 header_size = tmp_header_size;
1575 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1576 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1577 break;
1578 }
1579 }
1580 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1581 // Add the padding for the alignment remainder.
1582 header_size += run_size % bracket_size;
1583 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1584 numOfSlots[i] = num_of_slots;
1585 headerSizes[i] = header_size;
1586 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1587 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1588 if (kTraceRosAlloc) {
1589 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1590 << ", headerSizes[" << i << "]=" << headerSizes[i]
1591 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1592 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1593 }
1594 }
1595}
1596
1597void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1598 if (used_bytes == 0) {
1599 return;
1600 }
1601 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1602 *bytes_allocated += used_bytes;
1603}
1604
1605void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1606 if (used_bytes == 0) {
1607 return;
1608 }
1609 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1610 ++(*objects_allocated);
1611}
1612
1613} // namespace allocator
1614} // namespace gc
1615} // namespace art