Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 1 | //=-- lsan_allocator.cc ---------------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of LeakSanitizer. |
| 11 | // See lsan_allocator.h for details. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "lsan_allocator.h" |
| 16 | |
| 17 | #include "sanitizer_common/sanitizer_allocator.h" |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 18 | #include "sanitizer_common/sanitizer_allocator_interface.h" |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 19 | #include "sanitizer_common/sanitizer_internal_defs.h" |
| 20 | #include "sanitizer_common/sanitizer_stackdepot.h" |
| 21 | #include "sanitizer_common/sanitizer_stacktrace.h" |
| 22 | #include "lsan_common.h" |
| 23 | |
Sergey Matveev | 1054868 | 2013-11-24 14:28:18 +0000 | [diff] [blame] | 24 | extern "C" void *memset(void *ptr, int value, uptr num); |
| 25 | |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 26 | namespace __lsan { |
| 27 | |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 28 | struct ChunkMetadata { |
Evgeniy Stepanov | 10c9ea5 | 2015-09-16 19:54:36 +0000 | [diff] [blame] | 29 | u8 allocated : 8; // Must be first. |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 30 | ChunkTag tag : 2; |
| 31 | uptr requested_size : 54; |
| 32 | u32 stack_trace_id; |
| 33 | }; |
| 34 | |
Adhemerval Zanella | b0d5dd0 | 2015-10-21 13:08:06 +0000 | [diff] [blame] | 35 | #if defined(__mips64) || defined(__aarch64__) |
Mohit K. Bhakkad | 36f974d | 2015-02-19 07:30:39 +0000 | [diff] [blame] | 36 | static const uptr kMaxAllowedMallocSize = 4UL << 30; |
| 37 | static const uptr kRegionSizeLog = 20; |
| 38 | static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; |
| 39 | typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; |
| 40 | typedef CompactSizeClassMap SizeClassMap; |
| 41 | typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, |
| 42 | sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> |
| 43 | PrimaryAllocator; |
| 44 | #else |
| 45 | static const uptr kMaxAllowedMallocSize = 8UL << 30; |
| 46 | static const uptr kAllocatorSpace = 0x600000000000ULL; |
| 47 | static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 48 | typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, |
Sergey Matveev | 1054868 | 2013-11-24 14:28:18 +0000 | [diff] [blame] | 49 | sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; |
Mohit K. Bhakkad | 36f974d | 2015-02-19 07:30:39 +0000 | [diff] [blame] | 50 | #endif |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 51 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
| 52 | typedef LargeMmapAllocator<> SecondaryAllocator; |
| 53 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 54 | SecondaryAllocator> Allocator; |
| 55 | |
| 56 | static Allocator allocator; |
| 57 | static THREADLOCAL AllocatorCache cache; |
| 58 | |
| 59 | void InitializeAllocator() { |
Sergey Matveev | 8cd909e | 2014-12-19 14:45:19 +0000 | [diff] [blame] | 60 | allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | void AllocatorThreadFinish() { |
| 64 | allocator.SwallowCache(&cache); |
| 65 | } |
| 66 | |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 67 | static ChunkMetadata *Metadata(const void *p) { |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 68 | return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { |
| 72 | if (!p) return; |
| 73 | ChunkMetadata *m = Metadata(p); |
| 74 | CHECK(m); |
Sergey Matveev | b94d5e2 | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 75 | m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; |
Alexey Samsonov | 3741ab8 | 2014-10-26 06:23:07 +0000 | [diff] [blame] | 76 | m->stack_trace_id = StackDepotPut(stack); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 77 | m->requested_size = size; |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 78 | atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | static void RegisterDeallocation(void *p) { |
| 82 | if (!p) return; |
| 83 | ChunkMetadata *m = Metadata(p); |
| 84 | CHECK(m); |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 85 | atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | void *Allocate(const StackTrace &stack, uptr size, uptr alignment, |
| 89 | bool cleared) { |
| 90 | if (size == 0) |
| 91 | size = 1; |
| 92 | if (size > kMaxAllowedMallocSize) { |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 93 | Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); |
Vedant Kumar | 59ba7b8 | 2015-10-01 00:22:21 +0000 | [diff] [blame] | 94 | return nullptr; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 95 | } |
Sergey Matveev | 1054868 | 2013-11-24 14:28:18 +0000 | [diff] [blame] | 96 | void *p = allocator.Allocate(&cache, size, alignment, false); |
| 97 | // Do not rely on the allocator to clear the memory (it's slow). |
| 98 | if (cleared && allocator.FromPrimary(p)) |
| 99 | memset(p, 0, size); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 100 | RegisterAllocation(stack, p, size); |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 101 | if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); |
Kostya Serebryany | bf6a04f | 2016-06-16 20:06:06 +0000 | [diff] [blame^] | 102 | RunMallocHooks(p, size); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 103 | return p; |
| 104 | } |
| 105 | |
| 106 | void Deallocate(void *p) { |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 107 | if (&__sanitizer_free_hook) __sanitizer_free_hook(p); |
Kostya Serebryany | bf6a04f | 2016-06-16 20:06:06 +0000 | [diff] [blame^] | 108 | RunFreeHooks(p); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 109 | RegisterDeallocation(p); |
| 110 | allocator.Deallocate(&cache, p); |
| 111 | } |
| 112 | |
| 113 | void *Reallocate(const StackTrace &stack, void *p, uptr new_size, |
| 114 | uptr alignment) { |
| 115 | RegisterDeallocation(p); |
| 116 | if (new_size > kMaxAllowedMallocSize) { |
Sergey Matveev | d28c03c | 2013-06-21 15:10:20 +0000 | [diff] [blame] | 117 | Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); |
| 118 | allocator.Deallocate(&cache, p); |
Vedant Kumar | 59ba7b8 | 2015-10-01 00:22:21 +0000 | [diff] [blame] | 119 | return nullptr; |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 120 | } |
| 121 | p = allocator.Reallocate(&cache, p, new_size, alignment); |
| 122 | RegisterAllocation(stack, p, new_size); |
| 123 | return p; |
| 124 | } |
| 125 | |
| 126 | void GetAllocatorCacheRange(uptr *begin, uptr *end) { |
| 127 | *begin = (uptr)&cache; |
| 128 | *end = *begin + sizeof(cache); |
| 129 | } |
| 130 | |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 131 | uptr GetMallocUsableSize(const void *p) { |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 132 | ChunkMetadata *m = Metadata(p); |
| 133 | if (!m) return 0; |
| 134 | return m->requested_size; |
| 135 | } |
| 136 | |
| 137 | ///// Interface to the common LSan module. ///// |
| 138 | |
| 139 | void LockAllocator() { |
| 140 | allocator.ForceLock(); |
| 141 | } |
| 142 | |
| 143 | void UnlockAllocator() { |
| 144 | allocator.ForceUnlock(); |
| 145 | } |
| 146 | |
| 147 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { |
| 148 | *begin = (uptr)&allocator; |
| 149 | *end = *begin + sizeof(allocator); |
| 150 | } |
| 151 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 152 | uptr PointsIntoChunk(void* p) { |
| 153 | uptr addr = reinterpret_cast<uptr>(p); |
| 154 | uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 155 | if (!chunk) return 0; |
| 156 | // LargeMmapAllocator considers pointers to the meta-region of a chunk to be |
| 157 | // valid, but we don't want that. |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 158 | if (addr < chunk) return 0; |
| 159 | ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 160 | CHECK(m); |
Kostya Serebryany | 2b76278 | 2014-01-10 10:48:01 +0000 | [diff] [blame] | 161 | if (!m->allocated) |
| 162 | return 0; |
| 163 | if (addr < chunk + m->requested_size) |
| 164 | return chunk; |
| 165 | if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 166 | return chunk; |
| 167 | return 0; |
| 168 | } |
| 169 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 170 | uptr GetUserBegin(uptr chunk) { |
| 171 | return chunk; |
Sergey Matveev | bcfd838 | 2013-05-20 13:08:23 +0000 | [diff] [blame] | 172 | } |
| 173 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 174 | LsanMetadata::LsanMetadata(uptr chunk) { |
| 175 | metadata_ = Metadata(reinterpret_cast<void *>(chunk)); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 176 | CHECK(metadata_); |
| 177 | } |
| 178 | |
| 179 | bool LsanMetadata::allocated() const { |
| 180 | return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; |
| 181 | } |
| 182 | |
| 183 | ChunkTag LsanMetadata::tag() const { |
| 184 | return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; |
| 185 | } |
| 186 | |
| 187 | void LsanMetadata::set_tag(ChunkTag value) { |
| 188 | reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; |
| 189 | } |
| 190 | |
| 191 | uptr LsanMetadata::requested_size() const { |
| 192 | return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; |
| 193 | } |
| 194 | |
| 195 | u32 LsanMetadata::stack_trace_id() const { |
| 196 | return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; |
| 197 | } |
| 198 | |
Sergey Matveev | 4e0215a | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 199 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
| 200 | allocator.ForEachChunk(callback, arg); |
Sergey Matveev | 866abfb | 2013-05-20 10:54:00 +0000 | [diff] [blame] | 201 | } |
| 202 | |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 203 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { |
| 204 | void *chunk = allocator.GetBlockBegin(p); |
| 205 | if (!chunk || p < chunk) return kIgnoreObjectInvalid; |
| 206 | ChunkMetadata *m = Metadata(chunk); |
| 207 | CHECK(m); |
| 208 | if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { |
Sergey Matveev | 978460c | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 209 | if (m->tag == kIgnored) |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 210 | return kIgnoreObjectAlreadyIgnored; |
Sergey Matveev | 978460c | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 211 | m->tag = kIgnored; |
Sergey Matveev | ecc4f5b | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 212 | return kIgnoreObjectSuccess; |
| 213 | } else { |
| 214 | return kIgnoreObjectInvalid; |
| 215 | } |
| 216 | } |
Vedant Kumar | 59ba7b8 | 2015-10-01 00:22:21 +0000 | [diff] [blame] | 217 | } // namespace __lsan |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 218 | |
| 219 | using namespace __lsan; |
| 220 | |
| 221 | extern "C" { |
| 222 | SANITIZER_INTERFACE_ATTRIBUTE |
| 223 | uptr __sanitizer_get_current_allocated_bytes() { |
| 224 | uptr stats[AllocatorStatCount]; |
| 225 | allocator.GetStats(stats); |
| 226 | return stats[AllocatorStatAllocated]; |
| 227 | } |
| 228 | |
| 229 | SANITIZER_INTERFACE_ATTRIBUTE |
| 230 | uptr __sanitizer_get_heap_size() { |
| 231 | uptr stats[AllocatorStatCount]; |
| 232 | allocator.GetStats(stats); |
| 233 | return stats[AllocatorStatMapped]; |
| 234 | } |
| 235 | |
| 236 | SANITIZER_INTERFACE_ATTRIBUTE |
| 237 | uptr __sanitizer_get_free_bytes() { return 0; } |
| 238 | |
| 239 | SANITIZER_INTERFACE_ATTRIBUTE |
| 240 | uptr __sanitizer_get_unmapped_bytes() { return 0; } |
| 241 | |
| 242 | SANITIZER_INTERFACE_ATTRIBUTE |
| 243 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } |
| 244 | |
| 245 | SANITIZER_INTERFACE_ATTRIBUTE |
Vedant Kumar | 59ba7b8 | 2015-10-01 00:22:21 +0000 | [diff] [blame] | 246 | int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } |
Sergey Matveev | 08347ca | 2014-08-26 14:28:28 +0000 | [diff] [blame] | 247 | |
| 248 | SANITIZER_INTERFACE_ATTRIBUTE |
| 249 | uptr __sanitizer_get_allocated_size(const void *p) { |
| 250 | return GetMallocUsableSize(p); |
| 251 | } |
Vedant Kumar | 59ba7b8 | 2015-10-01 00:22:21 +0000 | [diff] [blame] | 252 | } // extern "C" |