Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 1 | //===-- asan_allocator2.cc ------------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of AddressSanitizer, an address sanity checker. |
| 11 | // |
| 12 | // Implementation of ASan's memory allocator, 2-nd version. |
| 13 | // This variant uses the allocator from sanitizer_common, i.e. the one shared |
| 14 | // with ThreadSanitizer and MemorySanitizer. |
| 15 | // |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 16 | //===----------------------------------------------------------------------===// |
| 17 | #include "asan_allocator.h" |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 18 | |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 19 | #include "asan_mapping.h" |
Alexey Samsonov | 7e84349 | 2013-03-28 15:42:43 +0000 | [diff] [blame] | 20 | #include "asan_poisoning.h" |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 21 | #include "asan_report.h" |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 22 | #include "asan_thread.h" |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 23 | #include "sanitizer_common/sanitizer_allocator.h" |
Sergey Matveev | ed20ebe | 2013-05-06 11:27:58 +0000 | [diff] [blame] | 24 | #include "sanitizer_common/sanitizer_flags.h" |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 25 | #include "sanitizer_common/sanitizer_internal_defs.h" |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 26 | #include "sanitizer_common/sanitizer_list.h" |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 27 | #include "sanitizer_common/sanitizer_stackdepot.h" |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 28 | #include "sanitizer_common/sanitizer_quarantine.h" |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 29 | #include "lsan/lsan_common.h" |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 30 | |
| 31 | namespace __asan { |
| 32 | |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 33 | struct AsanMapUnmapCallback { |
| 34 | void OnMap(uptr p, uptr size) const { |
| 35 | PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); |
Kostya Serebryany | e309119 | 2012-12-19 14:56:38 +0000 | [diff] [blame] | 36 | // Statistics. |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 37 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | e309119 | 2012-12-19 14:56:38 +0000 | [diff] [blame] | 38 | thread_stats.mmaps++; |
| 39 | thread_stats.mmaped += size; |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 40 | } |
| 41 | void OnUnmap(uptr p, uptr size) const { |
| 42 | PoisonShadow(p, size, 0); |
Kostya Serebryany | a54aec8 | 2012-12-27 07:37:24 +0000 | [diff] [blame] | 43 | // We are about to unmap a chunk of user memory. |
| 44 | // Mark the corresponding shadow memory as not needed. |
| 45 | // Since asan's mapping is compacting, the shadow chunk may be |
| 46 | // not page-aligned, so we only flush the page-aligned portion. |
| 47 | uptr page_size = GetPageSizeCached(); |
| 48 | uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); |
| 49 | uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); |
| 50 | FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); |
Kostya Serebryany | e309119 | 2012-12-19 14:56:38 +0000 | [diff] [blame] | 51 | // Statistics. |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 52 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | e309119 | 2012-12-19 14:56:38 +0000 | [diff] [blame] | 53 | thread_stats.munmaps++; |
| 54 | thread_stats.munmaped += size; |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 55 | } |
| 56 | }; |
| 57 | |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 58 | #if SANITIZER_WORDSIZE == 64 |
Kostya Serebryany | e5ab968 | 2013-01-23 13:27:43 +0000 | [diff] [blame] | 59 | #if defined(__powerpc64__) |
| 60 | const uptr kAllocatorSpace = 0xa0000000000ULL; |
Kostya Serebryany | f931da8 | 2013-05-15 12:36:29 +0000 | [diff] [blame] | 61 | const uptr kAllocatorSize = 0x20000000000ULL; // 2T. |
Kostya Serebryany | e5ab968 | 2013-01-23 13:27:43 +0000 | [diff] [blame] | 62 | #else |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 63 | const uptr kAllocatorSpace = 0x600000000000ULL; |
Kostya Serebryany | af9297b | 2013-03-12 09:04:49 +0000 | [diff] [blame] | 64 | const uptr kAllocatorSize = 0x40000000000ULL; // 4T. |
Kostya Serebryany | f931da8 | 2013-05-15 12:36:29 +0000 | [diff] [blame] | 65 | #endif |
Kostya Serebryany | e11c5c5 | 2012-12-21 12:26:31 +0000 | [diff] [blame] | 66 | typedef DefaultSizeClassMap SizeClassMap; |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 67 | typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, |
Kostya Serebryany | e11c5c5 | 2012-12-21 12:26:31 +0000 | [diff] [blame] | 68 | SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 69 | #elif SANITIZER_WORDSIZE == 32 |
| 70 | static const u64 kAddressSpaceSize = 1ULL << 32; |
Kostya Serebryany | e11c5c5 | 2012-12-21 12:26:31 +0000 | [diff] [blame] | 71 | typedef CompactSizeClassMap SizeClassMap; |
Kostya Serebryany | 1b54cbf | 2013-05-20 07:29:21 +0000 | [diff] [blame] | 72 | static const uptr kRegionSizeLog = 20; |
| 73 | static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 74 | typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, |
Kostya Serebryany | 1b54cbf | 2013-05-20 07:29:21 +0000 | [diff] [blame] | 75 | SizeClassMap, kRegionSizeLog, |
| 76 | FlatByteMap<kFlatByteMapSize>, |
| 77 | AsanMapUnmapCallback> PrimaryAllocator; |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 78 | #endif |
| 79 | |
| 80 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 81 | typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 82 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 83 | SecondaryAllocator> Allocator; |
| 84 | |
Kostya Serebryany | c523d17 | 2012-12-17 13:43:47 +0000 | [diff] [blame] | 85 | // We can not use THREADLOCAL because it is not supported on some of the |
| 86 | // platforms we care about (OSX 10.6, Android). |
| 87 | // static THREADLOCAL AllocatorCache cache; |
| 88 | AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { |
| 89 | CHECK(ms); |
| 90 | CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); |
| 91 | return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); |
| 92 | } |
| 93 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 94 | static Allocator allocator; |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 95 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 96 | static const uptr kMaxAllowedMallocSize = |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 97 | FIRST_32_SECOND_64(3UL << 30, 8UL << 30); |
| 98 | |
| 99 | static const uptr kMaxThreadLocalQuarantine = |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 100 | FIRST_32_SECOND_64(1 << 18, 1 << 20); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 101 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 102 | // Every chunk of memory allocated by this allocator can be in one of 3 states: |
| 103 | // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. |
| 104 | // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. |
| 105 | // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. |
| 106 | enum { |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 107 | CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 108 | CHUNK_ALLOCATED = 2, |
| 109 | CHUNK_QUARANTINE = 3 |
| 110 | }; |
| 111 | |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 112 | // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. |
| 113 | // We use adaptive redzones: for larger allocation larger redzones are used. |
| 114 | static u32 RZLog2Size(u32 rz_log) { |
| 115 | CHECK_LT(rz_log, 8); |
| 116 | return 16 << rz_log; |
| 117 | } |
| 118 | |
| 119 | static u32 RZSize2Log(u32 rz_size) { |
| 120 | CHECK_GE(rz_size, 16); |
| 121 | CHECK_LE(rz_size, 2048); |
| 122 | CHECK(IsPowerOfTwo(rz_size)); |
Timur Iskhodzhanov | 2b10d39 | 2013-02-08 12:02:00 +0000 | [diff] [blame] | 123 | u32 res = Log2(rz_size) - 4; |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 124 | CHECK_EQ(rz_size, RZLog2Size(res)); |
| 125 | return res; |
| 126 | } |
| 127 | |
| 128 | static uptr ComputeRZLog(uptr user_requested_size) { |
| 129 | u32 rz_log = |
| 130 | user_requested_size <= 64 - 16 ? 0 : |
| 131 | user_requested_size <= 128 - 32 ? 1 : |
| 132 | user_requested_size <= 512 - 64 ? 2 : |
| 133 | user_requested_size <= 4096 - 128 ? 3 : |
| 134 | user_requested_size <= (1 << 14) - 256 ? 4 : |
| 135 | user_requested_size <= (1 << 15) - 512 ? 5 : |
| 136 | user_requested_size <= (1 << 16) - 1024 ? 6 : 7; |
| 137 | return Max(rz_log, RZSize2Log(flags()->redzone)); |
| 138 | } |
| 139 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 140 | // The memory chunk allocated from the underlying allocator looks like this: |
| 141 | // L L L L L L H H U U U U U U R R |
| 142 | // L -- left redzone words (0 or more bytes) |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 143 | // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 144 | // U -- user memory. |
| 145 | // R -- right redzone (0 or more bytes) |
| 146 | // ChunkBase consists of ChunkHeader and other bytes that overlap with user |
| 147 | // memory. |
| 148 | |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 149 | // If the left redzone is greater than the ChunkHeader size we store a magic |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 150 | // value in the first uptr word of the memory block and store the address of |
| 151 | // ChunkBase in the next uptr. |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 152 | // M B L L L L L L L L L H H U U U U U U |
| 153 | // | ^ |
| 154 | // ---------------------| |
| 155 | // M -- magic value kAllocBegMagic |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 156 | // B -- address of ChunkHeader pointing to the first 'H' |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 157 | static const uptr kAllocBegMagic = 0xCC6E96B9; |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 158 | |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 159 | struct ChunkHeader { |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 160 | // 1-st 8 bytes. |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 161 | u32 chunk_state : 8; // Must be first. |
| 162 | u32 alloc_tid : 24; |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 163 | |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 164 | u32 free_tid : 24; |
| 165 | u32 from_memalign : 1; |
| 166 | u32 alloc_type : 2; |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 167 | u32 rz_log : 3; |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 168 | u32 lsan_tag : 2; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 169 | // 2-nd 8 bytes |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 170 | // This field is used for small sizes. For large sizes it is equal to |
| 171 | // SizeClassMap::kMaxSize and the actual size is stored in the |
| 172 | // SecondaryAllocator's metadata. |
| 173 | u32 user_requested_size; |
| 174 | u32 alloc_context_id; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 175 | }; |
| 176 | |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 177 | struct ChunkBase : ChunkHeader { |
| 178 | // Header2, intersects with user memory. |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 179 | u32 free_context_id; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 180 | }; |
| 181 | |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 182 | static const uptr kChunkHeaderSize = sizeof(ChunkHeader); |
| 183 | static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; |
| 184 | COMPILER_CHECK(kChunkHeaderSize == 16); |
| 185 | COMPILER_CHECK(kChunkHeader2Size <= 16); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 186 | |
| 187 | struct AsanChunk: ChunkBase { |
| 188 | uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 189 | uptr UsedSize() { |
| 190 | if (user_requested_size != SizeClassMap::kMaxSize) |
| 191 | return user_requested_size; |
| 192 | return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); |
| 193 | } |
| 194 | void *AllocBeg() { |
| 195 | if (from_memalign) |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 196 | return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 197 | return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 198 | } |
Timur Iskhodzhanov | 7ce8de1 | 2013-05-20 08:20:17 +0000 | [diff] [blame] | 199 | // If we don't use stack depot, we store the alloc/free stack traces |
| 200 | // in the chunk itself. |
Kostya Serebryany | 0a504ec | 2012-12-17 06:31:53 +0000 | [diff] [blame] | 201 | u32 *AllocStackBeg() { |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 202 | return (u32*)(Beg() - RZLog2Size(rz_log)); |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 203 | } |
| 204 | uptr AllocStackSize() { |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 205 | CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); |
| 206 | return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 207 | } |
Kostya Serebryany | 0a504ec | 2012-12-17 06:31:53 +0000 | [diff] [blame] | 208 | u32 *FreeStackBeg() { |
| 209 | return (u32*)(Beg() + kChunkHeader2Size); |
| 210 | } |
| 211 | uptr FreeStackSize() { |
Kostya Serebryany | 709a33e | 2012-12-26 12:20:35 +0000 | [diff] [blame] | 212 | if (user_requested_size < kChunkHeader2Size) return 0; |
| 213 | uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); |
Kostya Serebryany | 0a504ec | 2012-12-17 06:31:53 +0000 | [diff] [blame] | 214 | return (available - kChunkHeader2Size) / sizeof(u32); |
| 215 | } |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 216 | bool AddrIsInside(uptr addr) { |
| 217 | return (addr >= Beg()) && (addr < Beg() + UsedSize()); |
| 218 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 219 | }; |
| 220 | |
| 221 | uptr AsanChunkView::Beg() { return chunk_->Beg(); } |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 222 | uptr AsanChunkView::End() { return Beg() + UsedSize(); } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 223 | uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } |
| 224 | uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } |
| 225 | uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 226 | |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 227 | static void GetStackTraceFromId(u32 id, StackTrace *stack) { |
| 228 | CHECK(id); |
| 229 | uptr size = 0; |
| 230 | const uptr *trace = StackDepotGet(id, &size); |
| 231 | CHECK_LT(size, kStackTraceMax); |
| 232 | internal_memcpy(stack->trace, trace, sizeof(uptr) * size); |
| 233 | stack->size = size; |
| 234 | } |
| 235 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 236 | void AsanChunkView::GetAllocStack(StackTrace *stack) { |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 237 | if (flags()->use_stack_depot) |
| 238 | GetStackTraceFromId(chunk_->alloc_context_id, stack); |
| 239 | else |
| 240 | StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), |
| 241 | chunk_->AllocStackSize()); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | void AsanChunkView::GetFreeStack(StackTrace *stack) { |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 245 | if (flags()->use_stack_depot) |
| 246 | GetStackTraceFromId(chunk_->free_context_id, stack); |
| 247 | else |
| 248 | StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), |
| 249 | chunk_->FreeStackSize()); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 250 | } |
| 251 | |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 252 | struct QuarantineCallback; |
| 253 | typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; |
| 254 | typedef AsanQuarantine::Cache QuarantineCache; |
| 255 | static AsanQuarantine quarantine(LINKER_INITIALIZED); |
| 256 | static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); |
| 257 | static AllocatorCache fallback_allocator_cache; |
| 258 | static SpinMutex fallback_mutex; |
| 259 | |
| 260 | QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { |
Dmitry Vyukov | a61ec81 | 2013-01-11 11:03:35 +0000 | [diff] [blame] | 261 | CHECK(ms); |
| 262 | CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 263 | return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); |
| 264 | } |
| 265 | |
| 266 | struct QuarantineCallback { |
| 267 | explicit QuarantineCallback(AllocatorCache *cache) |
| 268 | : cache_(cache) { |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 269 | } |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 270 | |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 271 | void Recycle(AsanChunk *m) { |
Timur Iskhodzhanov | fb98d3d | 2013-05-18 18:27:33 +0000 | [diff] [blame] | 272 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); |
Sergey Matveev | e7ca056 | 2013-05-20 11:25:18 +0000 | [diff] [blame] | 273 | atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 274 | CHECK_NE(m->alloc_tid, kInvalidTid); |
| 275 | CHECK_NE(m->free_tid, kInvalidTid); |
| 276 | PoisonShadow(m->Beg(), |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 277 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 278 | kAsanHeapLeftRedzoneMagic); |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 279 | void *p = reinterpret_cast<void *>(m->AllocBeg()); |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 280 | if (p != m) { |
| 281 | uptr *alloc_magic = reinterpret_cast<uptr *>(p); |
| 282 | CHECK_EQ(alloc_magic[0], kAllocBegMagic); |
Alexey Samsonov | a292152 | 2013-06-20 08:13:06 +0000 | [diff] [blame] | 283 | // Clear the magic value, as allocator internals may overwrite the |
| 284 | // contents of deallocated chunk, confusing GetAsanChunk lookup. |
| 285 | alloc_magic[0] = 0; |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 286 | CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 287 | } |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 288 | |
| 289 | // Statistics. |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 290 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 291 | thread_stats.real_frees++; |
| 292 | thread_stats.really_freed += m->UsedSize(); |
| 293 | |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 294 | allocator.Deallocate(cache_, p); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 295 | } |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 296 | |
| 297 | void *Allocate(uptr size) { |
Dmitry Vyukov | a61ec81 | 2013-01-11 11:03:35 +0000 | [diff] [blame] | 298 | return allocator.Allocate(cache_, size, 1, false); |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | void Deallocate(void *p) { |
| 302 | allocator.Deallocate(cache_, p); |
| 303 | } |
| 304 | |
| 305 | AllocatorCache *cache_; |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 306 | }; |
| 307 | |
Kostya Serebryany | b478260 | 2013-01-28 08:05:47 +0000 | [diff] [blame] | 308 | void InitializeAllocator() { |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 309 | allocator.Init(); |
| 310 | quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 311 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 312 | |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 313 | static void *Allocate(uptr size, uptr alignment, StackTrace *stack, |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 314 | AllocType alloc_type, bool can_fill) { |
Kostya Serebryany | b478260 | 2013-01-28 08:05:47 +0000 | [diff] [blame] | 315 | if (!asan_inited) |
| 316 | __asan_init(); |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 317 | Flags &fl = *flags(); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 318 | CHECK(stack); |
Kostya Serebryany | 709a33e | 2012-12-26 12:20:35 +0000 | [diff] [blame] | 319 | const uptr min_alignment = SHADOW_GRANULARITY; |
| 320 | if (alignment < min_alignment) |
| 321 | alignment = min_alignment; |
Kostya Serebryany | cab6133 | 2012-12-21 14:54:46 +0000 | [diff] [blame] | 322 | if (size == 0) { |
Alexey Samsonov | d916993 | 2013-01-29 07:51:34 +0000 | [diff] [blame] | 323 | // We'd be happy to avoid allocating memory for zero-size requests, but |
| 324 | // some programs/tests depend on this behavior and assume that malloc would |
| 325 | // not return NULL even for zero-size allocations. Moreover, it looks like |
| 326 | // operator new should never return NULL, and results of consecutive "new" |
| 327 | // calls must be different even if the allocated size is zero. |
| 328 | size = 1; |
Kostya Serebryany | cab6133 | 2012-12-21 14:54:46 +0000 | [diff] [blame] | 329 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 330 | CHECK(IsPowerOfTwo(alignment)); |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 331 | uptr rz_log = ComputeRZLog(size); |
| 332 | uptr rz_size = RZLog2Size(rz_log); |
Timur Iskhodzhanov | 7ce8de1 | 2013-05-20 08:20:17 +0000 | [diff] [blame] | 333 | uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 334 | uptr needed_size = rounded_size + rz_size; |
Kostya Serebryany | 709a33e | 2012-12-26 12:20:35 +0000 | [diff] [blame] | 335 | if (alignment > min_alignment) |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 336 | needed_size += alignment; |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 337 | bool using_primary_allocator = true; |
Kostya Serebryany | f1877cf | 2012-12-25 09:40:20 +0000 | [diff] [blame] | 338 | // If we are allocating from the secondary allocator, there will be no |
| 339 | // automatic right redzone, so add the right redzone manually. |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 340 | if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { |
Kostya Serebryany | f1877cf | 2012-12-25 09:40:20 +0000 | [diff] [blame] | 341 | needed_size += rz_size; |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 342 | using_primary_allocator = false; |
| 343 | } |
Kostya Serebryany | 709a33e | 2012-12-26 12:20:35 +0000 | [diff] [blame] | 344 | CHECK(IsAligned(needed_size, min_alignment)); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 345 | if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { |
| 346 | Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", |
| 347 | (void*)size); |
Kostya Serebryany | 9150f39 | 2013-09-06 09:25:11 +0000 | [diff] [blame] | 348 | return AllocatorReturnNull(); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 349 | } |
| 350 | |
Alexey Samsonov | 89c1384 | 2013-03-20 09:23:28 +0000 | [diff] [blame] | 351 | AsanThread *t = GetCurrentThread(); |
Dmitry Vyukov | ce17384 | 2013-01-11 11:15:48 +0000 | [diff] [blame] | 352 | void *allocated; |
| 353 | if (t) { |
| 354 | AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); |
| 355 | allocated = allocator.Allocate(cache, needed_size, 8, false); |
| 356 | } else { |
| 357 | SpinMutexLock l(&fallback_mutex); |
Kostya Serebryany | 9327e78 | 2013-01-11 11:27:42 +0000 | [diff] [blame] | 358 | AllocatorCache *cache = &fallback_allocator_cache; |
Dmitry Vyukov | ce17384 | 2013-01-11 11:15:48 +0000 | [diff] [blame] | 359 | allocated = allocator.Allocate(cache, needed_size, 8, false); |
| 360 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 361 | uptr alloc_beg = reinterpret_cast<uptr>(allocated); |
| 362 | uptr alloc_end = alloc_beg + needed_size; |
| 363 | uptr beg_plus_redzone = alloc_beg + rz_size; |
| 364 | uptr user_beg = beg_plus_redzone; |
| 365 | if (!IsAligned(user_beg, alignment)) |
| 366 | user_beg = RoundUpTo(user_beg, alignment); |
| 367 | uptr user_end = user_beg + size; |
| 368 | CHECK_LE(user_end, alloc_end); |
| 369 | uptr chunk_beg = user_beg - kChunkHeaderSize; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 370 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 371 | m->alloc_type = alloc_type; |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 372 | m->rz_log = rz_log; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 373 | u32 alloc_tid = t ? t->tid() : 0; |
| 374 | m->alloc_tid = alloc_tid; |
| 375 | CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 376 | m->free_tid = kInvalidTid; |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 377 | m->from_memalign = user_beg != beg_plus_redzone; |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 378 | if (alloc_beg != chunk_beg) { |
| 379 | CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); |
| 380 | reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; |
| 381 | reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; |
Kostya Serebryany | b34cf49 | 2012-12-20 14:35:06 +0000 | [diff] [blame] | 382 | } |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 383 | if (using_primary_allocator) { |
| 384 | CHECK(size); |
| 385 | m->user_requested_size = size; |
| 386 | CHECK(allocator.FromPrimary(allocated)); |
| 387 | } else { |
| 388 | CHECK(!allocator.FromPrimary(allocated)); |
| 389 | m->user_requested_size = SizeClassMap::kMaxSize; |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 390 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); |
| 391 | meta[0] = size; |
| 392 | meta[1] = chunk_beg; |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 393 | } |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 394 | |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 395 | if (fl.use_stack_depot) { |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 396 | m->alloc_context_id = StackDepotPut(stack->trace, stack->size); |
| 397 | } else { |
| 398 | m->alloc_context_id = 0; |
| 399 | StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); |
| 400 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 401 | |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 402 | uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); |
| 403 | // Unpoison the bulk of the memory region. |
| 404 | if (size_rounded_down_to_granularity) |
| 405 | PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); |
| 406 | // Deal with the end of the region if size is not aligned to granularity. |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 407 | if (size != size_rounded_down_to_granularity && fl.poison_heap) { |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 408 | u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); |
| 409 | *shadow = size & (SHADOW_GRANULARITY - 1); |
| 410 | } |
| 411 | |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 412 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 413 | thread_stats.mallocs++; |
| 414 | thread_stats.malloced += size; |
Kostya Serebryany | e11c5c5 | 2012-12-21 12:26:31 +0000 | [diff] [blame] | 415 | thread_stats.malloced_redzones += needed_size - size; |
| 416 | uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); |
| 417 | thread_stats.malloced_by_size[class_id]++; |
| 418 | if (needed_size > SizeClassMap::kMaxSize) |
| 419 | thread_stats.malloc_large++; |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 420 | |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 421 | void *res = reinterpret_cast<void *>(user_beg); |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 422 | if (can_fill && fl.max_malloc_fill_size) { |
| 423 | uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); |
| 424 | REAL(memset)(res, fl.malloc_fill_byte, fill_size); |
| 425 | } |
Sergey Matveev | 21e024e | 2013-06-21 15:50:49 +0000 | [diff] [blame] | 426 | #if CAN_SANITIZE_LEAKS |
Sergey Matveev | 200afbd | 2013-06-21 14:51:52 +0000 | [diff] [blame] | 427 | m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored |
| 428 | : __lsan::kDirectlyLeaked; |
Sergey Matveev | 21e024e | 2013-06-21 15:50:49 +0000 | [diff] [blame] | 429 | #endif |
Sergey Matveev | e7ca056 | 2013-05-20 11:25:18 +0000 | [diff] [blame] | 430 | // Must be the last mutation of metadata in this function. |
| 431 | atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 432 | ASAN_MALLOC_HOOK(res, size); |
| 433 | return res; |
| 434 | } |
| 435 | |
Alexey Samsonov | 049c919 | 2013-06-06 08:25:31 +0000 | [diff] [blame] | 436 | static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) { |
| 437 | if (chunk_state == CHUNK_QUARANTINE) |
| 438 | ReportDoubleFree((uptr)ptr, stack); |
| 439 | else |
| 440 | ReportFreeNotMalloced((uptr)ptr, stack); |
| 441 | } |
| 442 | |
Timur Iskhodzhanov | 41d69f4 | 2013-05-20 13:05:58 +0000 | [diff] [blame] | 443 | static void AtomicallySetQuarantineFlag(AsanChunk *m, |
| 444 | void *ptr, StackTrace *stack) { |
Alexey Samsonov | 7dd282c | 2013-03-22 07:40:34 +0000 | [diff] [blame] | 445 | u8 old_chunk_state = CHUNK_ALLOCATED; |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 446 | // Flip the chunk_state atomically to avoid race on double-free. |
Timur Iskhodzhanov | a05af3d | 2013-03-22 18:16:57 +0000 | [diff] [blame] | 447 | if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, |
Alexey Samsonov | 049c919 | 2013-06-06 08:25:31 +0000 | [diff] [blame] | 448 | CHUNK_QUARANTINE, memory_order_acquire)) |
| 449 | ReportInvalidFree(ptr, old_chunk_state, stack); |
Alexey Samsonov | 7dd282c | 2013-03-22 07:40:34 +0000 | [diff] [blame] | 450 | CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); |
Timur Iskhodzhanov | 41d69f4 | 2013-05-20 13:05:58 +0000 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | // Expects the chunk to already be marked as quarantined by using |
| 454 | // AtomicallySetQuarantineFlag. |
| 455 | static void QuarantineChunk(AsanChunk *m, void *ptr, |
| 456 | StackTrace *stack, AllocType alloc_type) { |
| 457 | CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); |
| 458 | |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 459 | if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) |
| 460 | ReportAllocTypeMismatch((uptr)ptr, stack, |
| 461 | (AllocType)m->alloc_type, (AllocType)alloc_type); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 462 | |
Kostya Serebryany | 0a504ec | 2012-12-17 06:31:53 +0000 | [diff] [blame] | 463 | CHECK_GE(m->alloc_tid, 0); |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 464 | if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. |
| 465 | CHECK_EQ(m->free_tid, kInvalidTid); |
Alexey Samsonov | 89c1384 | 2013-03-20 09:23:28 +0000 | [diff] [blame] | 466 | AsanThread *t = GetCurrentThread(); |
Kostya Serebryany | 0a504ec | 2012-12-17 06:31:53 +0000 | [diff] [blame] | 467 | m->free_tid = t ? t->tid() : 0; |
Kostya Serebryany | 9e3bd38 | 2012-12-26 06:30:02 +0000 | [diff] [blame] | 468 | if (flags()->use_stack_depot) { |
| 469 | m->free_context_id = StackDepotPut(stack->trace, stack->size); |
| 470 | } else { |
| 471 | m->free_context_id = 0; |
| 472 | StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); |
| 473 | } |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 474 | // Poison the region. |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 475 | PoisonShadow(m->Beg(), |
Kostya Serebryany | 111a071 | 2012-12-26 04:52:07 +0000 | [diff] [blame] | 476 | RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), |
Kostya Serebryany | bc9940e | 2012-12-14 12:15:09 +0000 | [diff] [blame] | 477 | kAsanHeapFreeMagic); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 478 | |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 479 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 480 | thread_stats.frees++; |
| 481 | thread_stats.freed += m->UsedSize(); |
| 482 | |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 483 | // Push into quarantine. |
| 484 | if (t) { |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 485 | AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); |
| 486 | AllocatorCache *ac = GetAllocatorCache(ms); |
Dmitry Vyukov | a61ec81 | 2013-01-11 11:03:35 +0000 | [diff] [blame] | 487 | quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), |
| 488 | m, m->UsedSize()); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 489 | } else { |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 490 | SpinMutexLock l(&fallback_mutex); |
| 491 | AllocatorCache *ac = &fallback_allocator_cache; |
Dmitry Vyukov | a61ec81 | 2013-01-11 11:03:35 +0000 | [diff] [blame] | 492 | quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), |
| 493 | m, m->UsedSize()); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 494 | } |
Kostya Serebryany | 84a996f | 2012-12-11 14:41:31 +0000 | [diff] [blame] | 495 | } |
| 496 | |
Timur Iskhodzhanov | 41d69f4 | 2013-05-20 13:05:58 +0000 | [diff] [blame] | 497 | static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { |
| 498 | uptr p = reinterpret_cast<uptr>(ptr); |
| 499 | if (p == 0) return; |
| 500 | |
| 501 | uptr chunk_beg = p - kChunkHeaderSize; |
| 502 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); |
Alexey Samsonov | 87bd39a | 2013-06-04 12:19:31 +0000 | [diff] [blame] | 503 | ASAN_FREE_HOOK(ptr); |
Timur Iskhodzhanov | 41d69f4 | 2013-05-20 13:05:58 +0000 | [diff] [blame] | 504 | // Must mark the chunk as quarantined before any changes to its metadata. |
| 505 | AtomicallySetQuarantineFlag(m, ptr, stack); |
| 506 | QuarantineChunk(m, ptr, stack, alloc_type); |
| 507 | } |
| 508 | |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 509 | static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { |
| 510 | CHECK(old_ptr && new_size); |
| 511 | uptr p = reinterpret_cast<uptr>(old_ptr); |
| 512 | uptr chunk_beg = p - kChunkHeaderSize; |
| 513 | AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); |
| 514 | |
Alexey Samsonov | c25e62b | 2013-03-20 10:11:24 +0000 | [diff] [blame] | 515 | AsanStats &thread_stats = GetCurrentThreadStats(); |
Kostya Serebryany | e11c5c5 | 2012-12-21 12:26:31 +0000 | [diff] [blame] | 516 | thread_stats.reallocs++; |
| 517 | thread_stats.realloced += new_size; |
| 518 | |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 519 | void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 520 | if (new_ptr) { |
Alexey Samsonov | 049c919 | 2013-06-06 08:25:31 +0000 | [diff] [blame] | 521 | u8 chunk_state = m->chunk_state; |
| 522 | if (chunk_state != CHUNK_ALLOCATED) |
| 523 | ReportInvalidFree(old_ptr, chunk_state, stack); |
Kostya Serebryany | f155fcc | 2013-02-26 12:59:06 +0000 | [diff] [blame] | 524 | CHECK_NE(REAL(memcpy), (void*)0); |
Alexey Samsonov | 87bd39a | 2013-06-04 12:19:31 +0000 | [diff] [blame] | 525 | uptr memcpy_size = Min(new_size, m->UsedSize()); |
| 526 | // If realloc() races with free(), we may start copying freed memory. |
| 527 | // However, we will report racy double-free later anyway. |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 528 | REAL(memcpy)(new_ptr, old_ptr, memcpy_size); |
Alexey Samsonov | 87bd39a | 2013-06-04 12:19:31 +0000 | [diff] [blame] | 529 | Deallocate(old_ptr, stack, FROM_MALLOC); |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 530 | } |
| 531 | return new_ptr; |
| 532 | } |
| 533 | |
Sergey Matveev | ba2169a | 2013-05-31 11:13:45 +0000 | [diff] [blame] | 534 | // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). |
| 535 | static AsanChunk *GetAsanChunk(void *alloc_beg) { |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 536 | if (!alloc_beg) return 0; |
Sergey Matveev | ba2169a | 2013-05-31 11:13:45 +0000 | [diff] [blame] | 537 | if (!allocator.FromPrimary(alloc_beg)) { |
| 538 | uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 539 | AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); |
| 540 | return m; |
| 541 | } |
Kostya Serebryany | 5d43b5a | 2013-06-10 10:46:27 +0000 | [diff] [blame] | 542 | uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); |
| 543 | if (alloc_magic[0] == kAllocBegMagic) |
| 544 | return reinterpret_cast<AsanChunk *>(alloc_magic[1]); |
| 545 | return reinterpret_cast<AsanChunk *>(alloc_beg); |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 546 | } |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 547 | |
Sergey Matveev | ba2169a | 2013-05-31 11:13:45 +0000 | [diff] [blame] | 548 | static AsanChunk *GetAsanChunkByAddr(uptr p) { |
| 549 | void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); |
| 550 | return GetAsanChunk(alloc_beg); |
| 551 | } |
| 552 | |
| 553 | // Allocator must be locked when this function is called. |
| 554 | static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { |
| 555 | void *alloc_beg = |
| 556 | allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); |
| 557 | return GetAsanChunk(alloc_beg); |
| 558 | } |
| 559 | |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 560 | static uptr AllocationSize(uptr p) { |
| 561 | AsanChunk *m = GetAsanChunkByAddr(p); |
| 562 | if (!m) return 0; |
| 563 | if (m->chunk_state != CHUNK_ALLOCATED) return 0; |
| 564 | if (m->Beg() != p) return 0; |
| 565 | return m->UsedSize(); |
| 566 | } |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 567 | |
Kostya Serebryany | 2592d76 | 2012-12-19 08:32:50 +0000 | [diff] [blame] | 568 | // We have an address between two chunks, and we want to report just one. |
| 569 | AsanChunk *ChooseChunk(uptr addr, |
| 570 | AsanChunk *left_chunk, AsanChunk *right_chunk) { |
Kostya Serebryany | c35314a | 2012-12-26 10:41:24 +0000 | [diff] [blame] | 571 | // Prefer an allocated chunk over freed chunk and freed chunk |
| 572 | // over available chunk. |
| 573 | if (left_chunk->chunk_state != right_chunk->chunk_state) { |
| 574 | if (left_chunk->chunk_state == CHUNK_ALLOCATED) |
| 575 | return left_chunk; |
| 576 | if (right_chunk->chunk_state == CHUNK_ALLOCATED) |
| 577 | return right_chunk; |
| 578 | if (left_chunk->chunk_state == CHUNK_QUARANTINE) |
| 579 | return left_chunk; |
| 580 | if (right_chunk->chunk_state == CHUNK_QUARANTINE) |
| 581 | return right_chunk; |
| 582 | } |
| 583 | // Same chunk_state: choose based on offset. |
Evgeniy Stepanov | 589dcda | 2013-02-05 14:32:03 +0000 | [diff] [blame] | 584 | sptr l_offset = 0, r_offset = 0; |
Kostya Serebryany | 2592d76 | 2012-12-19 08:32:50 +0000 | [diff] [blame] | 585 | CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); |
| 586 | CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); |
| 587 | if (l_offset < r_offset) |
| 588 | return left_chunk; |
| 589 | return right_chunk; |
| 590 | } |
| 591 | |
| 592 | AsanChunkView FindHeapChunkByAddress(uptr addr) { |
| 593 | AsanChunk *m1 = GetAsanChunkByAddr(addr); |
| 594 | if (!m1) return AsanChunkView(m1); |
Evgeniy Stepanov | 589dcda | 2013-02-05 14:32:03 +0000 | [diff] [blame] | 595 | sptr offset = 0; |
Kostya Serebryany | 2592d76 | 2012-12-19 08:32:50 +0000 | [diff] [blame] | 596 | if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { |
| 597 | // The address is in the chunk's left redzone, so maybe it is actually |
| 598 | // a right buffer overflow from the other chunk to the left. |
| 599 | // Search a bit to the left to see if there is another chunk. |
| 600 | AsanChunk *m2 = 0; |
| 601 | for (uptr l = 1; l < GetPageSizeCached(); l++) { |
| 602 | m2 = GetAsanChunkByAddr(addr - l); |
| 603 | if (m2 == m1) continue; // Still the same chunk. |
Kostya Serebryany | 2592d76 | 2012-12-19 08:32:50 +0000 | [diff] [blame] | 604 | break; |
| 605 | } |
| 606 | if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) |
| 607 | m1 = ChooseChunk(addr, m2, m1); |
| 608 | } |
| 609 | return AsanChunkView(m1); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 610 | } |
| 611 | |
| 612 | void AsanThreadLocalMallocStorage::CommitBack() { |
Dmitry Vyukov | 9fc0df8 | 2013-01-11 08:07:43 +0000 | [diff] [blame] | 613 | AllocatorCache *ac = GetAllocatorCache(this); |
| 614 | quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 615 | allocator.SwallowCache(GetAllocatorCache(this)); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 616 | } |
| 617 | |
Kostya Serebryany | 4b48f45 | 2012-12-27 14:09:19 +0000 | [diff] [blame] | 618 | void PrintInternalAllocatorStats() { |
| 619 | allocator.PrintStats(); |
| 620 | } |
| 621 | |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 622 | SANITIZER_INTERFACE_ATTRIBUTE |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 623 | void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, |
| 624 | AllocType alloc_type) { |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 625 | return Allocate(size, alignment, stack, alloc_type, true); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 626 | } |
| 627 | |
| 628 | SANITIZER_INTERFACE_ATTRIBUTE |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 629 | void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { |
| 630 | Deallocate(ptr, stack, alloc_type); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | SANITIZER_INTERFACE_ATTRIBUTE |
| 634 | void *asan_malloc(uptr size, StackTrace *stack) { |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 635 | return Allocate(size, 8, stack, FROM_MALLOC, true); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 636 | } |
| 637 | |
| 638 | void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { |
Kostya Serebryany | 48b7ee9 | 2013-09-06 09:51:50 +0000 | [diff] [blame] | 639 | if (CallocShouldReturnNullDueToOverflow(size, nmemb)) |
| 640 | return AllocatorReturnNull(); |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 641 | void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); |
Kostya Serebryany | b9e1319 | 2013-02-27 13:38:19 +0000 | [diff] [blame] | 642 | // If the memory comes from the secondary allocator no need to clear it |
| 643 | // as it comes directly from mmap. |
| 644 | if (ptr && allocator.FromPrimary(ptr)) |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 645 | REAL(memset)(ptr, 0, nmemb * size); |
Kostya Serebryany | c523d17 | 2012-12-17 13:43:47 +0000 | [diff] [blame] | 646 | return ptr; |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 647 | } |
| 648 | |
| 649 | void *asan_realloc(void *p, uptr size, StackTrace *stack) { |
Alexey Samsonov | d916993 | 2013-01-29 07:51:34 +0000 | [diff] [blame] | 650 | if (p == 0) |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 651 | return Allocate(size, 8, stack, FROM_MALLOC, true); |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 652 | if (size == 0) { |
Kostya Serebryany | fe6d916 | 2012-12-21 08:53:59 +0000 | [diff] [blame] | 653 | Deallocate(p, stack, FROM_MALLOC); |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 654 | return 0; |
| 655 | } |
Kostya Serebryany | d4d2594 | 2012-12-17 09:06:25 +0000 | [diff] [blame] | 656 | return Reallocate(p, size, stack); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | void *asan_valloc(uptr size, StackTrace *stack) { |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 660 | return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | void *asan_pvalloc(uptr size, StackTrace *stack) { |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 664 | uptr PageSize = GetPageSizeCached(); |
| 665 | size = RoundUpTo(size, PageSize); |
| 666 | if (size == 0) { |
| 667 | // pvalloc(0) should allocate one page. |
| 668 | size = PageSize; |
| 669 | } |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 670 | return Allocate(size, PageSize, stack, FROM_MALLOC, true); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 671 | } |
| 672 | |
| 673 | int asan_posix_memalign(void **memptr, uptr alignment, uptr size, |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 674 | StackTrace *stack) { |
Kostya Serebryany | 2a3619e | 2013-04-04 11:17:14 +0000 | [diff] [blame] | 675 | void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); |
Kostya Serebryany | 1503e9b | 2012-12-14 13:16:19 +0000 | [diff] [blame] | 676 | CHECK(IsAligned((uptr)ptr, alignment)); |
| 677 | *memptr = ptr; |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 678 | return 0; |
| 679 | } |
| 680 | |
Kostya Serebryany | 72efa94 | 2013-06-10 13:28:33 +0000 | [diff] [blame] | 681 | SANITIZER_INTERFACE_ATTRIBUTE |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 682 | uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { |
Kostya Serebryany | a93c02c | 2012-12-17 14:57:25 +0000 | [diff] [blame] | 683 | CHECK(stack); |
| 684 | if (ptr == 0) return 0; |
| 685 | uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); |
| 686 | if (flags()->check_malloc_usable_size && (usable_size == 0)) |
| 687 | ReportMallocUsableSizeNotOwned((uptr)ptr, stack); |
| 688 | return usable_size; |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 689 | } |
| 690 | |
| 691 | uptr asan_mz_size(const void *ptr) { |
Alexander Potapenko | 6a11cc1 | 2013-02-07 11:40:03 +0000 | [diff] [blame] | 692 | return AllocationSize(reinterpret_cast<uptr>(ptr)); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | void asan_mz_force_lock() { |
Alexander Potapenko | 6a11cc1 | 2013-02-07 11:40:03 +0000 | [diff] [blame] | 696 | allocator.ForceLock(); |
| 697 | fallback_mutex.Lock(); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | void asan_mz_force_unlock() { |
Alexander Potapenko | 6a11cc1 | 2013-02-07 11:40:03 +0000 | [diff] [blame] | 701 | fallback_mutex.Unlock(); |
| 702 | allocator.ForceUnlock(); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Kostya Serebryany | 8b0a7ce | 2012-12-10 13:52:55 +0000 | [diff] [blame] | 705 | } // namespace __asan |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 706 | |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 707 | // --- Implementation of LSan-specific functions --- {{{1 |
| 708 | namespace __lsan { |
| 709 | void LockAllocator() { |
| 710 | __asan::allocator.ForceLock(); |
| 711 | } |
| 712 | |
| 713 | void UnlockAllocator() { |
| 714 | __asan::allocator.ForceUnlock(); |
| 715 | } |
| 716 | |
| 717 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { |
| 718 | *begin = (uptr)&__asan::allocator; |
| 719 | *end = *begin + sizeof(__asan::allocator); |
| 720 | } |
| 721 | |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 722 | uptr PointsIntoChunk(void* p) { |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 723 | uptr addr = reinterpret_cast<uptr>(p); |
Sergey Matveev | ba2169a | 2013-05-31 11:13:45 +0000 | [diff] [blame] | 724 | __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 725 | if (!m) return 0; |
| 726 | uptr chunk = m->Beg(); |
| 727 | if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 728 | return chunk; |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 729 | return 0; |
| 730 | } |
| 731 | |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 732 | uptr GetUserBegin(uptr chunk) { |
Sergey Matveev | ba2169a | 2013-05-31 11:13:45 +0000 | [diff] [blame] | 733 | __asan::AsanChunk *m = |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 734 | __asan::GetAsanChunkByAddrFastLocked(chunk); |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 735 | CHECK(m); |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 736 | return m->Beg(); |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 737 | } |
| 738 | |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 739 | LsanMetadata::LsanMetadata(uptr chunk) { |
| 740 | metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | bool LsanMetadata::allocated() const { |
| 744 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); |
| 745 | return m->chunk_state == __asan::CHUNK_ALLOCATED; |
| 746 | } |
| 747 | |
| 748 | ChunkTag LsanMetadata::tag() const { |
| 749 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); |
| 750 | return static_cast<ChunkTag>(m->lsan_tag); |
| 751 | } |
| 752 | |
| 753 | void LsanMetadata::set_tag(ChunkTag value) { |
| 754 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); |
| 755 | m->lsan_tag = value; |
| 756 | } |
| 757 | |
| 758 | uptr LsanMetadata::requested_size() const { |
| 759 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); |
| 760 | return m->UsedSize(); |
| 761 | } |
| 762 | |
| 763 | u32 LsanMetadata::stack_trace_id() const { |
| 764 | __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); |
| 765 | return m->alloc_context_id; |
| 766 | } |
| 767 | |
Sergey Matveev | ac78d00 | 2013-06-24 08:34:50 +0000 | [diff] [blame] | 768 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
| 769 | __asan::allocator.ForEachChunk(callback, arg); |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 770 | } |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 771 | |
| 772 | IgnoreObjectResult IgnoreObjectLocked(const void *p) { |
| 773 | uptr addr = reinterpret_cast<uptr>(p); |
| 774 | __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); |
| 775 | if (!m) return kIgnoreObjectInvalid; |
| 776 | if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 777 | if (m->lsan_tag == kIgnored) |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 778 | return kIgnoreObjectAlreadyIgnored; |
Sergey Matveev | b3b46da | 2013-06-11 15:26:20 +0000 | [diff] [blame] | 779 | m->lsan_tag = __lsan::kIgnored; |
Sergey Matveev | cd571e0 | 2013-06-06 14:17:56 +0000 | [diff] [blame] | 780 | return kIgnoreObjectSuccess; |
| 781 | } else { |
| 782 | return kIgnoreObjectInvalid; |
| 783 | } |
| 784 | } |
Sergey Matveev | 79367ad | 2013-05-21 13:46:41 +0000 | [diff] [blame] | 785 | } // namespace __lsan |
| 786 | |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 787 | // ---------------------- Interface ---------------- {{{1 |
| 788 | using namespace __asan; // NOLINT |
| 789 | |
| 790 | // ASan allocator doesn't reserve extra bytes, so normally we would |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 791 | // just return "size". We don't want to expose our redzone sizes, etc here. |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 792 | uptr __asan_get_estimated_allocated_size(uptr size) { |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 793 | return size; |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | bool __asan_get_ownership(const void *p) { |
Alexey Samsonov | 220ba2f | 2013-01-17 13:25:17 +0000 | [diff] [blame] | 797 | uptr ptr = reinterpret_cast<uptr>(p); |
Alexey Samsonov | d916993 | 2013-01-29 07:51:34 +0000 | [diff] [blame] | 798 | return (AllocationSize(ptr) > 0); |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | uptr __asan_get_allocated_size(const void *p) { |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 802 | if (p == 0) return 0; |
Alexey Samsonov | 220ba2f | 2013-01-17 13:25:17 +0000 | [diff] [blame] | 803 | uptr ptr = reinterpret_cast<uptr>(p); |
| 804 | uptr allocated_size = AllocationSize(ptr); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 805 | // Die if p is not malloced or if it is already freed. |
Alexey Samsonov | d916993 | 2013-01-29 07:51:34 +0000 | [diff] [blame] | 806 | if (allocated_size == 0) { |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 807 | GET_STACK_TRACE_FATAL_HERE; |
Alexey Samsonov | 220ba2f | 2013-01-17 13:25:17 +0000 | [diff] [blame] | 808 | ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); |
Kostya Serebryany | 376bab8 | 2012-12-20 08:53:41 +0000 | [diff] [blame] | 809 | } |
| 810 | return allocated_size; |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 811 | } |
| 812 | |
| 813 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS |
| 814 | // Provide default (no-op) implementation of malloc hooks. |
| 815 | extern "C" { |
Timur Iskhodzhanov | 3c80c6c | 2013-08-13 11:42:45 +0000 | [diff] [blame] | 816 | SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 817 | void __asan_malloc_hook(void *ptr, uptr size) { |
| 818 | (void)ptr; |
| 819 | (void)size; |
| 820 | } |
Timur Iskhodzhanov | 3c80c6c | 2013-08-13 11:42:45 +0000 | [diff] [blame] | 821 | SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE |
Kostya Serebryany | 321e125 | 2012-12-11 09:02:36 +0000 | [diff] [blame] | 822 | void __asan_free_hook(void *ptr) { |
| 823 | (void)ptr; |
| 824 | } |
| 825 | } // extern "C" |
| 826 | #endif |