blob: 213be1733b9bd99d6eba66fa5521363389a90c64 [file] [log] [blame]
Colin Crossbcb4ed32016-01-14 15:35:40 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// Header page:
18//
19// For minimum allocation size (8 bytes), bitmap can store used allocations for
20// up to 4032*8*8=258048, which is 256KiB minus the header page
21
22#include <assert.h>
23#include <stdlib.h>
24
25#include <sys/cdefs.h>
26#include <sys/mman.h>
27
28#include <cmath>
29#include <cstddef>
30#include <cstdint>
31#include <memory>
32#include <mutex>
33
34#include "android-base/macros.h"
35
Colin Crossbcb4ed32016-01-14 15:35:40 -080036#include "Allocator.h"
37#include "LinkedList.h"
Colin Crossa83881e2017-06-22 10:50:05 -070038#include "anon_vma_naming.h"
Colin Crossbcb4ed32016-01-14 15:35:40 -080039
Colin Crossa9939e92017-06-21 13:13:00 -070040namespace android {
41
Colin Crossbcb4ed32016-01-14 15:35:40 -080042// runtime interfaces used:
43// abort
44// assert - fprintf + mmap
45// mmap
46// munmap
47// prctl
48
49constexpr size_t const_log2(size_t n, size_t p = 0) {
50 return (n <= 1) ? p : const_log2(n / 2, p + 1);
51}
52
53constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
54 return (x + y - 1) / y;
55}
56
Colin Crossbcb4ed32016-01-14 15:35:40 -080057static constexpr size_t kPageSize = 4096;
58static constexpr size_t kChunkSize = 256 * 1024;
59static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
60static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
61static constexpr size_t kMinBucketAllocationSize = 8;
Colin Crossa83881e2017-06-22 10:50:05 -070062static constexpr unsigned int kNumBuckets =
63 const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
64static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
Colin Crossbcb4ed32016-01-14 15:35:40 -080065
66std::atomic<int> heap_count;
67
68class Chunk;
69
70class HeapImpl {
71 public:
72 HeapImpl();
73 ~HeapImpl();
74 void* operator new(std::size_t count) noexcept;
75 void operator delete(void* ptr);
76
77 void* Alloc(size_t size);
78 void Free(void* ptr);
79 bool Empty();
80
81 void MoveToFullList(Chunk* chunk, int bucket_);
82 void MoveToFreeList(Chunk* chunk, int bucket_);
83
84 private:
85 DISALLOW_COPY_AND_ASSIGN(HeapImpl);
86
87 LinkedList<Chunk*> free_chunks_[kNumBuckets];
88 LinkedList<Chunk*> full_chunks_[kNumBuckets];
89
90 void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
91 void* MapAlloc(size_t size);
92 void MapFree(void* ptr);
93 void* AllocLocked(size_t size);
94 void FreeLocked(void* ptr);
95
96 struct MapAllocation {
Colin Crossa83881e2017-06-22 10:50:05 -070097 void* ptr;
Colin Crossbcb4ed32016-01-14 15:35:40 -080098 size_t size;
99 MapAllocation* next;
100 };
101 MapAllocation* map_allocation_list_;
102 std::mutex m_;
103};
104
105// Integer log 2, rounds down
106static inline unsigned int log2(size_t n) {
107 return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
108}
109
110static inline unsigned int size_to_bucket(size_t size) {
Colin Crossa83881e2017-06-22 10:50:05 -0700111 if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800112 return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
113}
114
115static inline size_t bucket_to_size(unsigned int bucket) {
116 return kMinBucketAllocationSize << bucket;
117}
118
119static void* MapAligned(size_t size, size_t align) {
120 const int prot = PROT_READ | PROT_WRITE;
121 const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
122
123 size = (size + kPageSize - 1) & ~(kPageSize - 1);
124
125 // Over-allocate enough to align
126 size_t map_size = size + align - kPageSize;
127 if (map_size < size) {
128 return nullptr;
129 }
130
131 void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
132 if (ptr == MAP_FAILED) {
133 return nullptr;
134 }
135
136 size_t aligned_size = map_size;
137 void* aligned_ptr = ptr;
138
139 std::align(align, size, aligned_ptr, aligned_size);
140
141 // Trim beginning
142 if (aligned_ptr != ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700143 ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800144 munmap(ptr, extra);
145 map_size -= extra;
146 ptr = aligned_ptr;
147 }
148
149 // Trim end
150 if (map_size != size) {
151 assert(map_size > size);
152 assert(ptr != NULL);
Colin Crossa83881e2017-06-22 10:50:05 -0700153 munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800154 }
155
Colin Crossa83881e2017-06-22 10:50:05 -0700156#define PR_SET_VMA 0x53564d41
157#define PR_SET_VMA_ANON_NAME 0
158 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
159 "leak_detector_malloc");
Colin Crossbcb4ed32016-01-14 15:35:40 -0800160
161 return ptr;
162}
163
164class Chunk {
165 public:
166 static void* operator new(std::size_t count) noexcept;
167 static void operator delete(void* ptr);
168 Chunk(HeapImpl* heap, int bucket);
169 ~Chunk() {}
170
Colin Crossa83881e2017-06-22 10:50:05 -0700171 void* Alloc();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800172 void Free(void* ptr);
173 void Purge();
174 bool Empty();
175
176 static Chunk* ptr_to_chunk(void* ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700177 return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800178 }
179 static bool is_chunk(void* ptr) {
180 return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
181 }
182
Colin Crossa83881e2017-06-22 10:50:05 -0700183 unsigned int free_count() { return free_count_; }
184 HeapImpl* heap() { return heap_; }
185 LinkedList<Chunk*> node_; // linked list sorted by minimum free count
Colin Crossbcb4ed32016-01-14 15:35:40 -0800186
187 private:
188 DISALLOW_COPY_AND_ASSIGN(Chunk);
189 HeapImpl* heap_;
190 unsigned int bucket_;
Colin Crossa83881e2017-06-22 10:50:05 -0700191 unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
192 unsigned int max_allocations_; // maximum number of allocations in the chunk
193 unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
194 unsigned int free_count_; // number of available allocations
195 unsigned int frees_since_purge_; // number of calls to Free since last Purge
Colin Crossbcb4ed32016-01-14 15:35:40 -0800196
197 // bitmap of pages that have been dirtied
198 uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
199
200 // bitmap of free allocations.
201 uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
202
203 char data_[0];
204
205 unsigned int ptr_to_n(void* ptr) {
Colin Crossa83881e2017-06-22 10:50:05 -0700206 ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800207 return offset / allocation_size_;
208 }
Colin Crossa83881e2017-06-22 10:50:05 -0700209 void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
Colin Crossbcb4ed32016-01-14 15:35:40 -0800210};
211static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
212
213// Override new operator on chunk to use mmap to allocate kChunkSize
214void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
215 assert(count == sizeof(Chunk));
216 void* mem = MapAligned(kChunkSize, kChunkSize);
217 if (!mem) {
Colin Crossa83881e2017-06-22 10:50:05 -0700218 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800219 }
220
221 return mem;
222}
223
224// Override new operator on chunk to use mmap to allocate kChunkSize
Colin Crossa83881e2017-06-22 10:50:05 -0700225void Chunk::operator delete(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800226 assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
227 munmap(ptr, kChunkSize);
228}
229
Colin Crossa83881e2017-06-22 10:50:05 -0700230Chunk::Chunk(HeapImpl* heap, int bucket)
231 : node_(this),
232 heap_(heap),
233 bucket_(bucket),
234 allocation_size_(bucket_to_size(bucket)),
235 max_allocations_(kUsableChunkSize / allocation_size_),
236 first_free_bitmap_(0),
237 free_count_(max_allocations_),
238 frees_since_purge_(0) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800239 memset(dirty_pages_, 0, sizeof(dirty_pages_));
240 memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
241}
242
243bool Chunk::Empty() {
244 return free_count_ == max_allocations_;
245}
246
247void* Chunk::Alloc() {
248 assert(free_count_ > 0);
249
250 unsigned int i = first_free_bitmap_;
Colin Crossa83881e2017-06-22 10:50:05 -0700251 while (free_bitmap_[i] == 0) i++;
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700252 assert(i < arraysize(free_bitmap_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800253 unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
254 assert(free_bitmap_[i] & (1U << bit));
255 free_bitmap_[i] &= ~(1U << bit);
256 unsigned int n = i * 32 + bit;
257 assert(n < max_allocations_);
258
259 unsigned int page = n * allocation_size_ / kPageSize;
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700260 assert(page / 32 < arraysize(dirty_pages_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800261 dirty_pages_[page / 32] |= 1U << (page % 32);
262
263 free_count_--;
264 if (free_count_ == 0) {
265 heap_->MoveToFullList(this, bucket_);
266 }
267
268 return n_to_ptr(n);
269}
270
271void Chunk::Free(void* ptr) {
272 assert(is_chunk(ptr));
273 assert(ptr_to_chunk(ptr) == this);
274
275 unsigned int n = ptr_to_n(ptr);
276 unsigned int i = n / 32;
277 unsigned int bit = n % 32;
278
Elliott Hughes749ae2d2016-06-28 14:48:45 -0700279 assert(i < arraysize(free_bitmap_));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800280 assert(!(free_bitmap_[i] & (1U << bit)));
281 free_bitmap_[i] |= 1U << bit;
282 free_count_++;
283
284 if (i < first_free_bitmap_) {
285 first_free_bitmap_ = i;
286 }
287
288 if (free_count_ == 1) {
289 heap_->MoveToFreeList(this, bucket_);
290 } else {
291 // TODO(ccross): move down free list if necessary
292 }
293
294 if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
295 Purge();
296 }
297}
298
299void Chunk::Purge() {
300 frees_since_purge_ = 0;
301
Colin Crossa83881e2017-06-22 10:50:05 -0700302 // unsigned int allocsPerPage = kPageSize / allocation_size_;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800303}
304
305// Override new operator on HeapImpl to use mmap to allocate a page
Colin Crossa83881e2017-06-22 10:50:05 -0700306void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800307 assert(count == sizeof(HeapImpl));
308 void* mem = MapAligned(kPageSize, kPageSize);
309 if (!mem) {
Colin Crossa83881e2017-06-22 10:50:05 -0700310 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800311 }
312
313 heap_count++;
314 return mem;
315}
316
Colin Crossa83881e2017-06-22 10:50:05 -0700317void HeapImpl::operator delete(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800318 munmap(ptr, kPageSize);
319}
320
Colin Crossa83881e2017-06-22 10:50:05 -0700321HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
Colin Crossbcb4ed32016-01-14 15:35:40 -0800322
323bool HeapImpl::Empty() {
324 for (unsigned int i = 0; i < kNumBuckets; i++) {
Colin Crossa83881e2017-06-22 10:50:05 -0700325 for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800326 if (!it->data()->Empty()) {
327 return false;
328 }
329 }
Colin Crossa83881e2017-06-22 10:50:05 -0700330 for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800331 if (!it->data()->Empty()) {
332 return false;
333 }
334 }
335 }
336
337 return true;
338}
339
340HeapImpl::~HeapImpl() {
341 for (unsigned int i = 0; i < kNumBuckets; i++) {
342 while (!free_chunks_[i].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700343 Chunk* chunk = free_chunks_[i].next()->data();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800344 chunk->node_.remove();
345 delete chunk;
346 }
347 while (!full_chunks_[i].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700348 Chunk* chunk = full_chunks_[i].next()->data();
Colin Crossbcb4ed32016-01-14 15:35:40 -0800349 chunk->node_.remove();
350 delete chunk;
351 }
352 }
353}
354
355void* HeapImpl::Alloc(size_t size) {
356 std::lock_guard<std::mutex> lk(m_);
357 return AllocLocked(size);
358}
359
360void* HeapImpl::AllocLocked(size_t size) {
Colin Cross54a16102016-03-02 17:52:56 -0800361 if (size > kMaxBucketAllocationSize) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800362 return MapAlloc(size);
363 }
364 int bucket = size_to_bucket(size);
Colin Cross54a16102016-03-02 17:52:56 -0800365 if (free_chunks_[bucket].empty()) {
Colin Crossa83881e2017-06-22 10:50:05 -0700366 Chunk* chunk = new Chunk(this, bucket);
Colin Crossbcb4ed32016-01-14 15:35:40 -0800367 free_chunks_[bucket].insert(chunk->node_);
368 }
369 return free_chunks_[bucket].next()->data()->Alloc();
370}
371
Colin Crossa83881e2017-06-22 10:50:05 -0700372void HeapImpl::Free(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800373 std::lock_guard<std::mutex> lk(m_);
374 FreeLocked(ptr);
375}
376
Colin Crossa83881e2017-06-22 10:50:05 -0700377void HeapImpl::FreeLocked(void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800378 if (!Chunk::is_chunk(ptr)) {
379 HeapImpl::MapFree(ptr);
380 } else {
381 Chunk* chunk = Chunk::ptr_to_chunk(ptr);
382 assert(chunk->heap() == this);
383 chunk->Free(ptr);
384 }
385}
386
387void* HeapImpl::MapAlloc(size_t size) {
388 size = (size + kPageSize - 1) & ~(kPageSize - 1);
389
Colin Crossa83881e2017-06-22 10:50:05 -0700390 MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
Colin Crossbcb4ed32016-01-14 15:35:40 -0800391 void* ptr = MapAligned(size, kChunkSize);
392 if (!ptr) {
393 FreeLocked(allocation);
Colin Crossa83881e2017-06-22 10:50:05 -0700394 abort(); // throw std::bad_alloc;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800395 }
396 allocation->ptr = ptr;
397 allocation->size = size;
398 allocation->next = map_allocation_list_;
399 map_allocation_list_ = allocation;
400
401 return ptr;
402}
403
Colin Crossa83881e2017-06-22 10:50:05 -0700404void HeapImpl::MapFree(void* ptr) {
405 MapAllocation** allocation = &map_allocation_list_;
406 while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800407
408 assert(*allocation != nullptr);
409
410 munmap((*allocation)->ptr, (*allocation)->size);
411 FreeLocked(*allocation);
412
413 *allocation = (*allocation)->next;
414}
415
Colin Crossa83881e2017-06-22 10:50:05 -0700416void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800417 MoveToList(chunk, &free_chunks_[bucket]);
418}
419
Colin Crossa83881e2017-06-22 10:50:05 -0700420void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800421 MoveToList(chunk, &full_chunks_[bucket]);
422}
423
Colin Crossa83881e2017-06-22 10:50:05 -0700424void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800425 // Remove from old list
426 chunk->node_.remove();
427
Colin Crossa83881e2017-06-22 10:50:05 -0700428 LinkedList<Chunk*>* node = head;
Colin Crossbcb4ed32016-01-14 15:35:40 -0800429 // Insert into new list, sorted by lowest free count
Colin Crossa83881e2017-06-22 10:50:05 -0700430 while (node->next() != head && node->data() != nullptr &&
431 node->data()->free_count() < chunk->free_count())
Colin Crossbcb4ed32016-01-14 15:35:40 -0800432 node = node->next();
433
434 node->insert(chunk->node_);
435}
436
437Heap::Heap() {
438 // HeapImpl overloads the operator new in order to mmap itself instead of
439 // allocating with new.
440 // Can't use a shared_ptr to store the result because shared_ptr needs to
441 // allocate, and Allocator<T> is still being constructed.
442 impl_ = new HeapImpl();
443 owns_impl_ = true;
444}
445
446Heap::~Heap() {
447 if (owns_impl_) {
448 delete impl_;
449 }
450}
451
452void* Heap::allocate(size_t size) {
453 return impl_->Alloc(size);
454}
455
456void Heap::deallocate(void* ptr) {
457 impl_->Free(ptr);
458}
459
Colin Crossa83881e2017-06-22 10:50:05 -0700460void Heap::deallocate(HeapImpl* impl, void* ptr) {
Colin Crossbcb4ed32016-01-14 15:35:40 -0800461 impl->Free(ptr);
462}
463
464bool Heap::empty() {
465 return impl_->Empty();
466}
Colin Crossa9939e92017-06-21 13:13:00 -0700467
468} // namespace android