Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 1 | //=-- lsan_allocator.cc ---------------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of LeakSanitizer. |
| 11 | // See lsan_allocator.h for details. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "lsan_allocator.h" |
| 16 | |
| 17 | #include "sanitizer_common/sanitizer_allocator.h" |
| 18 | #include "sanitizer_common/sanitizer_internal_defs.h" |
| 19 | #include "sanitizer_common/sanitizer_stackdepot.h" |
| 20 | #include "sanitizer_common/sanitizer_stacktrace.h" |
| 21 | #include "lsan_common.h" |
| 22 | |
| 23 | namespace __lsan { |
| 24 | |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 25 | static const uptr kMaxAllowedMallocSize = 8UL << 30; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 26 | static const uptr kAllocatorSpace = 0x600000000000ULL; |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 27 | static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 28 | |
| 29 | struct ChunkMetadata { |
| 30 | bool allocated : 8; // Must be first. |
| 31 | ChunkTag tag : 2; |
| 32 | uptr requested_size : 54; |
| 33 | u32 stack_trace_id; |
| 34 | }; |
| 35 | |
| 36 | typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, |
| 37 | sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator; |
| 38 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
| 39 | typedef LargeMmapAllocator<> SecondaryAllocator; |
| 40 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 41 | SecondaryAllocator> Allocator; |
| 42 | |
| 43 | static Allocator allocator; |
| 44 | static THREADLOCAL AllocatorCache cache; |
| 45 | |
| 46 | void InitializeAllocator() { |
| 47 | allocator.Init(); |
| 48 | } |
| 49 | |
| 50 | void AllocatorThreadFinish() { |
| 51 | allocator.SwallowCache(&cache); |
| 52 | } |
| 53 | |
| 54 | static ChunkMetadata *Metadata(void *p) { |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 55 | return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { |
| 59 | if (!p) return; |
| 60 | ChunkMetadata *m = Metadata(p); |
| 61 | CHECK(m); |
Sergey Matveev | b94d5e2 | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 62 | m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 63 | m->stack_trace_id = StackDepotPut(stack.trace, stack.size); |
| 64 | m->requested_size = size; |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 65 | atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static void RegisterDeallocation(void *p) { |
| 69 | if (!p) return; |
| 70 | ChunkMetadata *m = Metadata(p); |
| 71 | CHECK(m); |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 72 | atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | void *Allocate(const StackTrace &stack, uptr size, uptr alignment, |
| 76 | bool cleared) { |
| 77 | if (size == 0) |
| 78 | size = 1; |
| 79 | if (size > kMaxAllowedMallocSize) { |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 80 | Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); |
| 81 | return 0; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 82 | } |
| 83 | void *p = allocator.Allocate(&cache, size, alignment, cleared); |
| 84 | RegisterAllocation(stack, p, size); |
| 85 | return p; |
| 86 | } |
| 87 | |
| 88 | void Deallocate(void *p) { |
| 89 | RegisterDeallocation(p); |
| 90 | allocator.Deallocate(&cache, p); |
| 91 | } |
| 92 | |
| 93 | void *Reallocate(const StackTrace &stack, void *p, uptr new_size, |
| 94 | uptr alignment) { |
| 95 | RegisterDeallocation(p); |
| 96 | if (new_size > kMaxAllowedMallocSize) { |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 97 | Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); |
| 98 | allocator.Deallocate(&cache, p); |
| 99 | return 0; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 100 | } |
| 101 | p = allocator.Reallocate(&cache, p, new_size, alignment); |
| 102 | RegisterAllocation(stack, p, new_size); |
| 103 | return p; |
| 104 | } |
| 105 | |
| 106 | void GetAllocatorCacheRange(uptr *begin, uptr *end) { |
| 107 | *begin = (uptr)&cache; |
| 108 | *end = *begin + sizeof(cache); |
| 109 | } |
| 110 | |
| 111 | uptr GetMallocUsableSize(void *p) { |
| 112 | ChunkMetadata *m = Metadata(p); |
| 113 | if (!m) return 0; |
| 114 | return m->requested_size; |
| 115 | } |
| 116 | |
| 117 | ///// Interface to the common LSan module. ///// |
| 118 | |
| 119 | void LockAllocator() { |
| 120 | allocator.ForceLock(); |
| 121 | } |
| 122 | |
| 123 | void UnlockAllocator() { |
| 124 | allocator.ForceUnlock(); |
| 125 | } |
| 126 | |
| 127 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { |
| 128 | *begin = (uptr)&allocator; |
| 129 | *end = *begin + sizeof(allocator); |
| 130 | } |
| 131 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 132 | uptr PointsIntoChunk(void* p) { |
| 133 | uptr addr = reinterpret_cast<uptr>(p); |
| 134 | uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 135 | if (!chunk) return 0; |
| 136 | // LargeMmapAllocator considers pointers to the meta-region of a chunk to be |
| 137 | // valid, but we don't want that. |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 138 | if (addr < chunk) return 0; |
| 139 | ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 140 | CHECK(m); |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 141 | if (m->allocated && addr < chunk + m->requested_size) |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 142 | return chunk; |
| 143 | return 0; |
| 144 | } |
| 145 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 146 | uptr GetUserBegin(uptr chunk) { |
| 147 | return chunk; |
Sergey Matveev | bcfd838 | 2013-05-20 13:08:23 +0000 | [diff] [blame] | 148 | } |
| 149 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 150 | LsanMetadata::LsanMetadata(uptr chunk) { |
| 151 | metadata_ = Metadata(reinterpret_cast<void *>(chunk)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 152 | CHECK(metadata_); |
| 153 | } |
| 154 | |
| 155 | bool LsanMetadata::allocated() const { |
| 156 | return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; |
| 157 | } |
| 158 | |
| 159 | ChunkTag LsanMetadata::tag() const { |
| 160 | return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; |
| 161 | } |
| 162 | |
| 163 | void LsanMetadata::set_tag(ChunkTag value) { |
| 164 | reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; |
| 165 | } |
| 166 | |
| 167 | uptr LsanMetadata::requested_size() const { |
| 168 | return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; |
| 169 | } |
| 170 | |
| 171 | u32 LsanMetadata::stack_trace_id() const { |
| 172 | return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; |
| 173 | } |
| 174 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame^] | 175 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
| 176 | allocator.ForEachChunk(callback, arg); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 177 | } |
| 178 | |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 179 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { |
| 180 | void *chunk = allocator.GetBlockBegin(p); |
| 181 | if (!chunk || p < chunk) return kIgnoreObjectInvalid; |
| 182 | ChunkMetadata *m = Metadata(chunk); |
| 183 | CHECK(m); |
| 184 | if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { |
Sergey Matveev | 978460c | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 185 | if (m->tag == kIgnored) |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 186 | return kIgnoreObjectAlreadyIgnored; |
Sergey Matveev | 978460c | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 187 | m->tag = kIgnored; |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 188 | return kIgnoreObjectSuccess; |
| 189 | } else { |
| 190 | return kIgnoreObjectInvalid; |
| 191 | } |
| 192 | } |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 193 | } // namespace __lsan |