blob: 8e5ce06d4c3d5b5a0b704d24f13310cb63415c38 [file] [log] [blame]
Stephen Hines2d1fdb22014-05-28 23:58:16 -07001//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A fast memory allocator that does not support free() nor realloc().
11// All allocations are forever.
12//===----------------------------------------------------------------------===//
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080013
Stephen Hines2d1fdb22014-05-28 23:58:16 -070014#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
15#define SANITIZER_PERSISTENT_ALLOCATOR_H
16
17#include "sanitizer_internal_defs.h"
18#include "sanitizer_mutex.h"
19#include "sanitizer_atomic.h"
20#include "sanitizer_common.h"
21
22namespace __sanitizer {
23
24class PersistentAllocator {
25 public:
26 void *alloc(uptr size);
27
28 private:
29 void *tryAlloc(uptr size);
30 StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
31 atomic_uintptr_t region_pos; // Region allocator for Node's.
32 atomic_uintptr_t region_end;
33};
34
35inline void *PersistentAllocator::tryAlloc(uptr size) {
36 // Optimisic lock-free allocation, essentially try to bump the region ptr.
37 for (;;) {
38 uptr cmp = atomic_load(&region_pos, memory_order_acquire);
39 uptr end = atomic_load(&region_end, memory_order_acquire);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080040 if (cmp == 0 || cmp + size > end) return nullptr;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070041 if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
42 memory_order_acquire))
43 return (void *)cmp;
44 }
45}
46
47inline void *PersistentAllocator::alloc(uptr size) {
48 // First, try to allocate optimisitically.
49 void *s = tryAlloc(size);
50 if (s) return s;
51 // If failed, lock, retry and alloc new superblock.
52 SpinMutexLock l(&mtx);
53 for (;;) {
54 s = tryAlloc(size);
55 if (s) return s;
56 atomic_store(&region_pos, 0, memory_order_relaxed);
57 uptr allocsz = 64 * 1024;
58 if (allocsz < size) allocsz = size;
59 uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
60 atomic_store(&region_end, mem + allocsz, memory_order_release);
61 atomic_store(&region_pos, mem, memory_order_release);
62 }
63}
64
65extern PersistentAllocator thePersistentAllocator;
66inline void *PersistentAlloc(uptr sz) {
67 return thePersistentAllocator.alloc(sz);
68}
69
70} // namespace __sanitizer
71
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080072#endif // SANITIZER_PERSISTENT_ALLOCATOR_H