blob: ee28bd8f18df86e7465ed5c43f1d771e57c8f230 [file] [log] [blame]
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +00001//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000016//===----------------------------------------------------------------------===//
17#include "asan_allocator.h"
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000018
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000019#include "asan_mapping.h"
Alexey Samsonov7e843492013-03-28 15:42:43 +000020#include "asan_poisoning.h"
Kostya Serebryanyd4d25942012-12-17 09:06:25 +000021#include "asan_report.h"
Kostya Serebryany84a996f2012-12-11 14:41:31 +000022#include "asan_thread.h"
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000023#include "sanitizer_common/sanitizer_allocator.h"
Sergey Matveeved20ebe2013-05-06 11:27:58 +000024#include "sanitizer_common/sanitizer_flags.h"
Kostya Serebryany321e1252012-12-11 09:02:36 +000025#include "sanitizer_common/sanitizer_internal_defs.h"
Kostya Serebryanyd4d25942012-12-17 09:06:25 +000026#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany9e3bd382012-12-26 06:30:02 +000027#include "sanitizer_common/sanitizer_stackdepot.h"
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000028#include "sanitizer_common/sanitizer_quarantine.h"
Sergey Matveev79367ad2013-05-21 13:46:41 +000029#include "lsan/lsan_common.h"
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000030
31namespace __asan {
32
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000033struct AsanMapUnmapCallback {
34 void OnMap(uptr p, uptr size) const {
35 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
Kostya Serebryanye3091192012-12-19 14:56:38 +000036 // Statistics.
Alexey Samsonovc25e62b2013-03-20 10:11:24 +000037 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryanye3091192012-12-19 14:56:38 +000038 thread_stats.mmaps++;
39 thread_stats.mmaped += size;
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000040 }
41 void OnUnmap(uptr p, uptr size) const {
42 PoisonShadow(p, size, 0);
Kostya Serebryanya54aec82012-12-27 07:37:24 +000043 // We are about to unmap a chunk of user memory.
44 // Mark the corresponding shadow memory as not needed.
45 // Since asan's mapping is compacting, the shadow chunk may be
46 // not page-aligned, so we only flush the page-aligned portion.
47 uptr page_size = GetPageSizeCached();
48 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
49 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
50 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
Kostya Serebryanye3091192012-12-19 14:56:38 +000051 // Statistics.
Alexey Samsonovc25e62b2013-03-20 10:11:24 +000052 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryanye3091192012-12-19 14:56:38 +000053 thread_stats.munmaps++;
54 thread_stats.munmaped += size;
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000055 }
56};
57
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000058#if SANITIZER_WORDSIZE == 64
Kostya Serebryanye5ab9682013-01-23 13:27:43 +000059#if defined(__powerpc64__)
60const uptr kAllocatorSpace = 0xa0000000000ULL;
Kostya Serebryanyf931da82013-05-15 12:36:29 +000061const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
Kostya Serebryanye5ab9682013-01-23 13:27:43 +000062#else
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000063const uptr kAllocatorSpace = 0x600000000000ULL;
Kostya Serebryanyaf9297b2013-03-12 09:04:49 +000064const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Kostya Serebryanyf931da82013-05-15 12:36:29 +000065#endif
Kostya Serebryanye11c5c52012-12-21 12:26:31 +000066typedef DefaultSizeClassMap SizeClassMap;
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000067typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
Kostya Serebryanye11c5c52012-12-21 12:26:31 +000068 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000069#elif SANITIZER_WORDSIZE == 32
70static const u64 kAddressSpaceSize = 1ULL << 32;
Kostya Serebryanye11c5c52012-12-21 12:26:31 +000071typedef CompactSizeClassMap SizeClassMap;
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000072static const uptr kRegionSizeLog = 20;
73static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000074typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000075 SizeClassMap, kRegionSizeLog,
76 FlatByteMap<kFlatByteMapSize>,
77 AsanMapUnmapCallback> PrimaryAllocator;
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000078#endif
79
80typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
Kostya Serebryanybc9940e2012-12-14 12:15:09 +000081typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000082typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
83 SecondaryAllocator> Allocator;
84
Kostya Serebryanyc523d172012-12-17 13:43:47 +000085// We can not use THREADLOCAL because it is not supported on some of the
86// platforms we care about (OSX 10.6, Android).
87// static THREADLOCAL AllocatorCache cache;
88AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
89 CHECK(ms);
90 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
91 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
92}
93
Kostya Serebryany84a996f2012-12-11 14:41:31 +000094static Allocator allocator;
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000095
Kostya Serebryany84a996f2012-12-11 14:41:31 +000096static const uptr kMaxAllowedMallocSize =
Kostya Serebryanyd4d25942012-12-17 09:06:25 +000097 FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
98
99static const uptr kMaxThreadLocalQuarantine =
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000100 FIRST_32_SECOND_64(1 << 18, 1 << 20);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000101
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000102// Every chunk of memory allocated by this allocator can be in one of 3 states:
103// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
104// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
105// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
106enum {
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000107 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000108 CHUNK_ALLOCATED = 2,
109 CHUNK_QUARANTINE = 3
110};
111
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000112// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
113// We use adaptive redzones: for larger allocation larger redzones are used.
114static u32 RZLog2Size(u32 rz_log) {
115 CHECK_LT(rz_log, 8);
116 return 16 << rz_log;
117}
118
119static u32 RZSize2Log(u32 rz_size) {
120 CHECK_GE(rz_size, 16);
121 CHECK_LE(rz_size, 2048);
122 CHECK(IsPowerOfTwo(rz_size));
Timur Iskhodzhanov2b10d392013-02-08 12:02:00 +0000123 u32 res = Log2(rz_size) - 4;
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000124 CHECK_EQ(rz_size, RZLog2Size(res));
125 return res;
126}
127
128static uptr ComputeRZLog(uptr user_requested_size) {
129 u32 rz_log =
130 user_requested_size <= 64 - 16 ? 0 :
131 user_requested_size <= 128 - 32 ? 1 :
132 user_requested_size <= 512 - 64 ? 2 :
133 user_requested_size <= 4096 - 128 ? 3 :
134 user_requested_size <= (1 << 14) - 256 ? 4 :
135 user_requested_size <= (1 << 15) - 512 ? 5 :
136 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
137 return Max(rz_log, RZSize2Log(flags()->redzone));
138}
139
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000140// The memory chunk allocated from the underlying allocator looks like this:
141// L L L L L L H H U U U U U U R R
142// L -- left redzone words (0 or more bytes)
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000143// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000144// U -- user memory.
145// R -- right redzone (0 or more bytes)
146// ChunkBase consists of ChunkHeader and other bytes that overlap with user
147// memory.
148
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000149// If the left redzone is greater than the ChunkHeader size we store a magic
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000150// value in the first uptr word of the memory block and store the address of
151// ChunkBase in the next uptr.
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000152// M B L L L L L L L L L H H U U U U U U
153// | ^
154// ---------------------|
155// M -- magic value kAllocBegMagic
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000156// B -- address of ChunkHeader pointing to the first 'H'
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000157static const uptr kAllocBegMagic = 0xCC6E96B9;
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000158
Kostya Serebryany111a0712012-12-26 04:52:07 +0000159struct ChunkHeader {
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000160 // 1-st 8 bytes.
Kostya Serebryany111a0712012-12-26 04:52:07 +0000161 u32 chunk_state : 8; // Must be first.
162 u32 alloc_tid : 24;
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000163
Kostya Serebryany111a0712012-12-26 04:52:07 +0000164 u32 free_tid : 24;
165 u32 from_memalign : 1;
166 u32 alloc_type : 2;
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000167 u32 rz_log : 3;
Sergey Matveev79367ad2013-05-21 13:46:41 +0000168 u32 lsan_tag : 2;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000169 // 2-nd 8 bytes
Kostya Serebryany111a0712012-12-26 04:52:07 +0000170 // This field is used for small sizes. For large sizes it is equal to
171 // SizeClassMap::kMaxSize and the actual size is stored in the
172 // SecondaryAllocator's metadata.
173 u32 user_requested_size;
174 u32 alloc_context_id;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000175};
176
Kostya Serebryany111a0712012-12-26 04:52:07 +0000177struct ChunkBase : ChunkHeader {
178 // Header2, intersects with user memory.
Kostya Serebryany111a0712012-12-26 04:52:07 +0000179 u32 free_context_id;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000180};
181
Kostya Serebryany111a0712012-12-26 04:52:07 +0000182static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
183static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
184COMPILER_CHECK(kChunkHeaderSize == 16);
185COMPILER_CHECK(kChunkHeader2Size <= 16);
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000186
187struct AsanChunk: ChunkBase {
188 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
Kostya Serebryany111a0712012-12-26 04:52:07 +0000189 uptr UsedSize() {
190 if (user_requested_size != SizeClassMap::kMaxSize)
191 return user_requested_size;
192 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
193 }
194 void *AllocBeg() {
195 if (from_memalign)
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000196 return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000197 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
Kostya Serebryany111a0712012-12-26 04:52:07 +0000198 }
Timur Iskhodzhanov7ce8de12013-05-20 08:20:17 +0000199 // If we don't use stack depot, we store the alloc/free stack traces
200 // in the chunk itself.
Kostya Serebryany0a504ec2012-12-17 06:31:53 +0000201 u32 *AllocStackBeg() {
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000202 return (u32*)(Beg() - RZLog2Size(rz_log));
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000203 }
204 uptr AllocStackSize() {
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000205 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
206 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000207 }
Kostya Serebryany0a504ec2012-12-17 06:31:53 +0000208 u32 *FreeStackBeg() {
209 return (u32*)(Beg() + kChunkHeader2Size);
210 }
211 uptr FreeStackSize() {
Kostya Serebryany709a33e2012-12-26 12:20:35 +0000212 if (user_requested_size < kChunkHeader2Size) return 0;
213 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
Kostya Serebryany0a504ec2012-12-17 06:31:53 +0000214 return (available - kChunkHeader2Size) / sizeof(u32);
215 }
Sergey Matveev79367ad2013-05-21 13:46:41 +0000216 bool AddrIsInside(uptr addr) {
217 return (addr >= Beg()) && (addr < Beg() + UsedSize());
218 }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000219};
220
221uptr AsanChunkView::Beg() { return chunk_->Beg(); }
Kostya Serebryany321e1252012-12-11 09:02:36 +0000222uptr AsanChunkView::End() { return Beg() + UsedSize(); }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000223uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
224uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
225uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
Kostya Serebryany321e1252012-12-11 09:02:36 +0000226
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000227static void GetStackTraceFromId(u32 id, StackTrace *stack) {
228 CHECK(id);
229 uptr size = 0;
230 const uptr *trace = StackDepotGet(id, &size);
231 CHECK_LT(size, kStackTraceMax);
232 internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
233 stack->size = size;
234}
235
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000236void AsanChunkView::GetAllocStack(StackTrace *stack) {
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000237 if (flags()->use_stack_depot)
238 GetStackTraceFromId(chunk_->alloc_context_id, stack);
239 else
240 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
241 chunk_->AllocStackSize());
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000242}
243
244void AsanChunkView::GetFreeStack(StackTrace *stack) {
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000245 if (flags()->use_stack_depot)
246 GetStackTraceFromId(chunk_->free_context_id, stack);
247 else
248 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
249 chunk_->FreeStackSize());
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000250}
251
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000252struct QuarantineCallback;
253typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
254typedef AsanQuarantine::Cache QuarantineCache;
255static AsanQuarantine quarantine(LINKER_INITIALIZED);
256static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
257static AllocatorCache fallback_allocator_cache;
258static SpinMutex fallback_mutex;
259
260QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000261 CHECK(ms);
262 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000263 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
264}
265
266struct QuarantineCallback {
267 explicit QuarantineCallback(AllocatorCache *cache)
268 : cache_(cache) {
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000269 }
Kostya Serebryany376bab82012-12-20 08:53:41 +0000270
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000271 void Recycle(AsanChunk *m) {
Timur Iskhodzhanovfb98d3d2013-05-18 18:27:33 +0000272 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
Sergey Matveeve7ca0562013-05-20 11:25:18 +0000273 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000274 CHECK_NE(m->alloc_tid, kInvalidTid);
275 CHECK_NE(m->free_tid, kInvalidTid);
276 PoisonShadow(m->Beg(),
Kostya Serebryany111a0712012-12-26 04:52:07 +0000277 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000278 kAsanHeapLeftRedzoneMagic);
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000279 void *p = reinterpret_cast<void *>(m->AllocBeg());
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000280 if (p != m) {
281 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
282 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
Alexey Samsonova2921522013-06-20 08:13:06 +0000283 // Clear the magic value, as allocator internals may overwrite the
284 // contents of deallocated chunk, confusing GetAsanChunk lookup.
285 alloc_magic[0] = 0;
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000286 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000287 }
Kostya Serebryany376bab82012-12-20 08:53:41 +0000288
289 // Statistics.
Alexey Samsonovc25e62b2013-03-20 10:11:24 +0000290 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryany376bab82012-12-20 08:53:41 +0000291 thread_stats.real_frees++;
292 thread_stats.really_freed += m->UsedSize();
293
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000294 allocator.Deallocate(cache_, p);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000295 }
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000296
297 void *Allocate(uptr size) {
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000298 return allocator.Allocate(cache_, size, 1, false);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000299 }
300
301 void Deallocate(void *p) {
302 allocator.Deallocate(cache_, p);
303 }
304
305 AllocatorCache *cache_;
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000306};
307
Kostya Serebryanyb4782602013-01-28 08:05:47 +0000308void InitializeAllocator() {
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000309 allocator.Init();
310 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000311}
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000312
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000313static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000314 AllocType alloc_type, bool can_fill) {
Kostya Serebryanyb4782602013-01-28 08:05:47 +0000315 if (!asan_inited)
316 __asan_init();
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000317 Flags &fl = *flags();
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000318 CHECK(stack);
Kostya Serebryany709a33e2012-12-26 12:20:35 +0000319 const uptr min_alignment = SHADOW_GRANULARITY;
320 if (alignment < min_alignment)
321 alignment = min_alignment;
Kostya Serebryanycab61332012-12-21 14:54:46 +0000322 if (size == 0) {
Alexey Samsonovd9169932013-01-29 07:51:34 +0000323 // We'd be happy to avoid allocating memory for zero-size requests, but
324 // some programs/tests depend on this behavior and assume that malloc would
325 // not return NULL even for zero-size allocations. Moreover, it looks like
326 // operator new should never return NULL, and results of consecutive "new"
327 // calls must be different even if the allocated size is zero.
328 size = 1;
Kostya Serebryanycab61332012-12-21 14:54:46 +0000329 }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000330 CHECK(IsPowerOfTwo(alignment));
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000331 uptr rz_log = ComputeRZLog(size);
332 uptr rz_size = RZLog2Size(rz_log);
Timur Iskhodzhanov7ce8de12013-05-20 08:20:17 +0000333 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000334 uptr needed_size = rounded_size + rz_size;
Kostya Serebryany709a33e2012-12-26 12:20:35 +0000335 if (alignment > min_alignment)
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000336 needed_size += alignment;
Kostya Serebryany111a0712012-12-26 04:52:07 +0000337 bool using_primary_allocator = true;
Kostya Serebryanyf1877cf2012-12-25 09:40:20 +0000338 // If we are allocating from the secondary allocator, there will be no
339 // automatic right redzone, so add the right redzone manually.
Kostya Serebryany111a0712012-12-26 04:52:07 +0000340 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
Kostya Serebryanyf1877cf2012-12-25 09:40:20 +0000341 needed_size += rz_size;
Kostya Serebryany111a0712012-12-26 04:52:07 +0000342 using_primary_allocator = false;
343 }
Kostya Serebryany709a33e2012-12-26 12:20:35 +0000344 CHECK(IsAligned(needed_size, min_alignment));
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000345 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
346 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
347 (void*)size);
Kostya Serebryany9150f392013-09-06 09:25:11 +0000348 return AllocatorReturnNull();
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000349 }
350
Alexey Samsonov89c13842013-03-20 09:23:28 +0000351 AsanThread *t = GetCurrentThread();
Dmitry Vyukovce173842013-01-11 11:15:48 +0000352 void *allocated;
353 if (t) {
354 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
355 allocated = allocator.Allocate(cache, needed_size, 8, false);
356 } else {
357 SpinMutexLock l(&fallback_mutex);
Kostya Serebryany9327e782013-01-11 11:27:42 +0000358 AllocatorCache *cache = &fallback_allocator_cache;
Dmitry Vyukovce173842013-01-11 11:15:48 +0000359 allocated = allocator.Allocate(cache, needed_size, 8, false);
360 }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000361 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
362 uptr alloc_end = alloc_beg + needed_size;
363 uptr beg_plus_redzone = alloc_beg + rz_size;
364 uptr user_beg = beg_plus_redzone;
365 if (!IsAligned(user_beg, alignment))
366 user_beg = RoundUpTo(user_beg, alignment);
367 uptr user_end = user_beg + size;
368 CHECK_LE(user_end, alloc_end);
369 uptr chunk_beg = user_beg - kChunkHeaderSize;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000370 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000371 m->alloc_type = alloc_type;
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000372 m->rz_log = rz_log;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000373 u32 alloc_tid = t ? t->tid() : 0;
374 m->alloc_tid = alloc_tid;
375 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000376 m->free_tid = kInvalidTid;
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000377 m->from_memalign = user_beg != beg_plus_redzone;
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000378 if (alloc_beg != chunk_beg) {
379 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
380 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
381 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
Kostya Serebryanyb34cf492012-12-20 14:35:06 +0000382 }
Kostya Serebryany111a0712012-12-26 04:52:07 +0000383 if (using_primary_allocator) {
384 CHECK(size);
385 m->user_requested_size = size;
386 CHECK(allocator.FromPrimary(allocated));
387 } else {
388 CHECK(!allocator.FromPrimary(allocated));
389 m->user_requested_size = SizeClassMap::kMaxSize;
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000390 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
391 meta[0] = size;
392 meta[1] = chunk_beg;
Kostya Serebryany111a0712012-12-26 04:52:07 +0000393 }
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000394
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000395 if (fl.use_stack_depot) {
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000396 m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
397 } else {
398 m->alloc_context_id = 0;
399 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
400 }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000401
Kostya Serebryanybc9940e2012-12-14 12:15:09 +0000402 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
403 // Unpoison the bulk of the memory region.
404 if (size_rounded_down_to_granularity)
405 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
406 // Deal with the end of the region if size is not aligned to granularity.
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000407 if (size != size_rounded_down_to_granularity && fl.poison_heap) {
Kostya Serebryanybc9940e2012-12-14 12:15:09 +0000408 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
409 *shadow = size & (SHADOW_GRANULARITY - 1);
410 }
411
Alexey Samsonovc25e62b2013-03-20 10:11:24 +0000412 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryany376bab82012-12-20 08:53:41 +0000413 thread_stats.mallocs++;
414 thread_stats.malloced += size;
Kostya Serebryanye11c5c52012-12-21 12:26:31 +0000415 thread_stats.malloced_redzones += needed_size - size;
416 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
417 thread_stats.malloced_by_size[class_id]++;
418 if (needed_size > SizeClassMap::kMaxSize)
419 thread_stats.malloc_large++;
Kostya Serebryany376bab82012-12-20 08:53:41 +0000420
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000421 void *res = reinterpret_cast<void *>(user_beg);
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000422 if (can_fill && fl.max_malloc_fill_size) {
423 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
424 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
425 }
Sergey Matveev21e024e2013-06-21 15:50:49 +0000426#if CAN_SANITIZE_LEAKS
Sergey Matveev200afbd2013-06-21 14:51:52 +0000427 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
428 : __lsan::kDirectlyLeaked;
Sergey Matveev21e024e2013-06-21 15:50:49 +0000429#endif
Sergey Matveeve7ca0562013-05-20 11:25:18 +0000430 // Must be the last mutation of metadata in this function.
431 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000432 ASAN_MALLOC_HOOK(res, size);
433 return res;
434}
435
Alexey Samsonov049c9192013-06-06 08:25:31 +0000436static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
437 if (chunk_state == CHUNK_QUARANTINE)
438 ReportDoubleFree((uptr)ptr, stack);
439 else
440 ReportFreeNotMalloced((uptr)ptr, stack);
441}
442
Timur Iskhodzhanov41d69f42013-05-20 13:05:58 +0000443static void AtomicallySetQuarantineFlag(AsanChunk *m,
444 void *ptr, StackTrace *stack) {
Alexey Samsonov7dd282c2013-03-22 07:40:34 +0000445 u8 old_chunk_state = CHUNK_ALLOCATED;
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000446 // Flip the chunk_state atomically to avoid race on double-free.
Timur Iskhodzhanova05af3d2013-03-22 18:16:57 +0000447 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
Alexey Samsonov049c9192013-06-06 08:25:31 +0000448 CHUNK_QUARANTINE, memory_order_acquire))
449 ReportInvalidFree(ptr, old_chunk_state, stack);
Alexey Samsonov7dd282c2013-03-22 07:40:34 +0000450 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
Timur Iskhodzhanov41d69f42013-05-20 13:05:58 +0000451}
452
453// Expects the chunk to already be marked as quarantined by using
454// AtomicallySetQuarantineFlag.
455static void QuarantineChunk(AsanChunk *m, void *ptr,
456 StackTrace *stack, AllocType alloc_type) {
457 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
458
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000459 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
460 ReportAllocTypeMismatch((uptr)ptr, stack,
461 (AllocType)m->alloc_type, (AllocType)alloc_type);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000462
Kostya Serebryany0a504ec2012-12-17 06:31:53 +0000463 CHECK_GE(m->alloc_tid, 0);
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000464 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
465 CHECK_EQ(m->free_tid, kInvalidTid);
Alexey Samsonov89c13842013-03-20 09:23:28 +0000466 AsanThread *t = GetCurrentThread();
Kostya Serebryany0a504ec2012-12-17 06:31:53 +0000467 m->free_tid = t ? t->tid() : 0;
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000468 if (flags()->use_stack_depot) {
469 m->free_context_id = StackDepotPut(stack->trace, stack->size);
470 } else {
471 m->free_context_id = 0;
472 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
473 }
Kostya Serebryanybc9940e2012-12-14 12:15:09 +0000474 // Poison the region.
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000475 PoisonShadow(m->Beg(),
Kostya Serebryany111a0712012-12-26 04:52:07 +0000476 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
Kostya Serebryanybc9940e2012-12-14 12:15:09 +0000477 kAsanHeapFreeMagic);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000478
Alexey Samsonovc25e62b2013-03-20 10:11:24 +0000479 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryany376bab82012-12-20 08:53:41 +0000480 thread_stats.frees++;
481 thread_stats.freed += m->UsedSize();
482
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000483 // Push into quarantine.
484 if (t) {
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000485 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
486 AllocatorCache *ac = GetAllocatorCache(ms);
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000487 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
488 m, m->UsedSize());
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000489 } else {
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000490 SpinMutexLock l(&fallback_mutex);
491 AllocatorCache *ac = &fallback_allocator_cache;
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000492 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
493 m, m->UsedSize());
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000494 }
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000495}
496
Timur Iskhodzhanov41d69f42013-05-20 13:05:58 +0000497static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
498 uptr p = reinterpret_cast<uptr>(ptr);
499 if (p == 0) return;
500
501 uptr chunk_beg = p - kChunkHeaderSize;
502 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
Alexey Samsonov87bd39a2013-06-04 12:19:31 +0000503 ASAN_FREE_HOOK(ptr);
Timur Iskhodzhanov41d69f42013-05-20 13:05:58 +0000504 // Must mark the chunk as quarantined before any changes to its metadata.
505 AtomicallySetQuarantineFlag(m, ptr, stack);
506 QuarantineChunk(m, ptr, stack, alloc_type);
507}
508
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000509static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
510 CHECK(old_ptr && new_size);
511 uptr p = reinterpret_cast<uptr>(old_ptr);
512 uptr chunk_beg = p - kChunkHeaderSize;
513 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
514
Alexey Samsonovc25e62b2013-03-20 10:11:24 +0000515 AsanStats &thread_stats = GetCurrentThreadStats();
Kostya Serebryanye11c5c52012-12-21 12:26:31 +0000516 thread_stats.reallocs++;
517 thread_stats.realloced += new_size;
518
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000519 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000520 if (new_ptr) {
Alexey Samsonov049c9192013-06-06 08:25:31 +0000521 u8 chunk_state = m->chunk_state;
522 if (chunk_state != CHUNK_ALLOCATED)
523 ReportInvalidFree(old_ptr, chunk_state, stack);
Kostya Serebryanyf155fcc2013-02-26 12:59:06 +0000524 CHECK_NE(REAL(memcpy), (void*)0);
Alexey Samsonov87bd39a2013-06-04 12:19:31 +0000525 uptr memcpy_size = Min(new_size, m->UsedSize());
526 // If realloc() races with free(), we may start copying freed memory.
527 // However, we will report racy double-free later anyway.
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000528 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Alexey Samsonov87bd39a2013-06-04 12:19:31 +0000529 Deallocate(old_ptr, stack, FROM_MALLOC);
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000530 }
531 return new_ptr;
532}
533
Sergey Matveevba2169a2013-05-31 11:13:45 +0000534// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
535static AsanChunk *GetAsanChunk(void *alloc_beg) {
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000536 if (!alloc_beg) return 0;
Sergey Matveevba2169a2013-05-31 11:13:45 +0000537 if (!allocator.FromPrimary(alloc_beg)) {
538 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000539 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
540 return m;
541 }
Kostya Serebryany5d43b5a2013-06-10 10:46:27 +0000542 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
543 if (alloc_magic[0] == kAllocBegMagic)
544 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
545 return reinterpret_cast<AsanChunk *>(alloc_beg);
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000546}
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000547
Sergey Matveevba2169a2013-05-31 11:13:45 +0000548static AsanChunk *GetAsanChunkByAddr(uptr p) {
549 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
550 return GetAsanChunk(alloc_beg);
551}
552
553// Allocator must be locked when this function is called.
554static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
555 void *alloc_beg =
556 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
557 return GetAsanChunk(alloc_beg);
558}
559
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000560static uptr AllocationSize(uptr p) {
561 AsanChunk *m = GetAsanChunkByAddr(p);
562 if (!m) return 0;
563 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
564 if (m->Beg() != p) return 0;
565 return m->UsedSize();
566}
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000567
Kostya Serebryany2592d762012-12-19 08:32:50 +0000568// We have an address between two chunks, and we want to report just one.
569AsanChunk *ChooseChunk(uptr addr,
570 AsanChunk *left_chunk, AsanChunk *right_chunk) {
Kostya Serebryanyc35314a2012-12-26 10:41:24 +0000571 // Prefer an allocated chunk over freed chunk and freed chunk
572 // over available chunk.
573 if (left_chunk->chunk_state != right_chunk->chunk_state) {
574 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
575 return left_chunk;
576 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
577 return right_chunk;
578 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
579 return left_chunk;
580 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
581 return right_chunk;
582 }
583 // Same chunk_state: choose based on offset.
Evgeniy Stepanov589dcda2013-02-05 14:32:03 +0000584 sptr l_offset = 0, r_offset = 0;
Kostya Serebryany2592d762012-12-19 08:32:50 +0000585 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
586 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
587 if (l_offset < r_offset)
588 return left_chunk;
589 return right_chunk;
590}
591
592AsanChunkView FindHeapChunkByAddress(uptr addr) {
593 AsanChunk *m1 = GetAsanChunkByAddr(addr);
594 if (!m1) return AsanChunkView(m1);
Evgeniy Stepanov589dcda2013-02-05 14:32:03 +0000595 sptr offset = 0;
Kostya Serebryany2592d762012-12-19 08:32:50 +0000596 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
597 // The address is in the chunk's left redzone, so maybe it is actually
598 // a right buffer overflow from the other chunk to the left.
599 // Search a bit to the left to see if there is another chunk.
600 AsanChunk *m2 = 0;
601 for (uptr l = 1; l < GetPageSizeCached(); l++) {
602 m2 = GetAsanChunkByAddr(addr - l);
603 if (m2 == m1) continue; // Still the same chunk.
Kostya Serebryany2592d762012-12-19 08:32:50 +0000604 break;
605 }
606 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
607 m1 = ChooseChunk(addr, m2, m1);
608 }
609 return AsanChunkView(m1);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000610}
611
612void AsanThreadLocalMallocStorage::CommitBack() {
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000613 AllocatorCache *ac = GetAllocatorCache(this);
614 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
Kostya Serebryany376bab82012-12-20 08:53:41 +0000615 allocator.SwallowCache(GetAllocatorCache(this));
Kostya Serebryany321e1252012-12-11 09:02:36 +0000616}
617
Kostya Serebryany4b48f452012-12-27 14:09:19 +0000618void PrintInternalAllocatorStats() {
619 allocator.PrintStats();
620}
621
Kostya Serebryany321e1252012-12-11 09:02:36 +0000622SANITIZER_INTERFACE_ATTRIBUTE
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000623void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
624 AllocType alloc_type) {
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000625 return Allocate(size, alignment, stack, alloc_type, true);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000626}
627
628SANITIZER_INTERFACE_ATTRIBUTE
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000629void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
630 Deallocate(ptr, stack, alloc_type);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000631}
632
633SANITIZER_INTERFACE_ATTRIBUTE
634void *asan_malloc(uptr size, StackTrace *stack) {
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000635 return Allocate(size, 8, stack, FROM_MALLOC, true);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000636}
637
638void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
Kostya Serebryany48b7ee92013-09-06 09:51:50 +0000639 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
640 return AllocatorReturnNull();
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000641 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
Kostya Serebryanyb9e13192013-02-27 13:38:19 +0000642 // If the memory comes from the secondary allocator no need to clear it
643 // as it comes directly from mmap.
644 if (ptr && allocator.FromPrimary(ptr))
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000645 REAL(memset)(ptr, 0, nmemb * size);
Kostya Serebryanyc523d172012-12-17 13:43:47 +0000646 return ptr;
Kostya Serebryany321e1252012-12-11 09:02:36 +0000647}
648
649void *asan_realloc(void *p, uptr size, StackTrace *stack) {
Alexey Samsonovd9169932013-01-29 07:51:34 +0000650 if (p == 0)
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000651 return Allocate(size, 8, stack, FROM_MALLOC, true);
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000652 if (size == 0) {
Kostya Serebryanyfe6d9162012-12-21 08:53:59 +0000653 Deallocate(p, stack, FROM_MALLOC);
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000654 return 0;
655 }
Kostya Serebryanyd4d25942012-12-17 09:06:25 +0000656 return Reallocate(p, size, stack);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000657}
658
659void *asan_valloc(uptr size, StackTrace *stack) {
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000660 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000661}
662
663void *asan_pvalloc(uptr size, StackTrace *stack) {
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000664 uptr PageSize = GetPageSizeCached();
665 size = RoundUpTo(size, PageSize);
666 if (size == 0) {
667 // pvalloc(0) should allocate one page.
668 size = PageSize;
669 }
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000670 return Allocate(size, PageSize, stack, FROM_MALLOC, true);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000671}
672
673int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000674 StackTrace *stack) {
Kostya Serebryany2a3619e2013-04-04 11:17:14 +0000675 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
Kostya Serebryany1503e9b2012-12-14 13:16:19 +0000676 CHECK(IsAligned((uptr)ptr, alignment));
677 *memptr = ptr;
Kostya Serebryany321e1252012-12-11 09:02:36 +0000678 return 0;
679}
680
Kostya Serebryany72efa942013-06-10 13:28:33 +0000681SANITIZER_INTERFACE_ATTRIBUTE
Kostya Serebryany321e1252012-12-11 09:02:36 +0000682uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
Kostya Serebryanya93c02c2012-12-17 14:57:25 +0000683 CHECK(stack);
684 if (ptr == 0) return 0;
685 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
686 if (flags()->check_malloc_usable_size && (usable_size == 0))
687 ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
688 return usable_size;
Kostya Serebryany321e1252012-12-11 09:02:36 +0000689}
690
691uptr asan_mz_size(const void *ptr) {
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000692 return AllocationSize(reinterpret_cast<uptr>(ptr));
Kostya Serebryany321e1252012-12-11 09:02:36 +0000693}
694
695void asan_mz_force_lock() {
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000696 allocator.ForceLock();
697 fallback_mutex.Lock();
Kostya Serebryany321e1252012-12-11 09:02:36 +0000698}
699
700void asan_mz_force_unlock() {
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000701 fallback_mutex.Unlock();
702 allocator.ForceUnlock();
Kostya Serebryany321e1252012-12-11 09:02:36 +0000703}
704
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +0000705} // namespace __asan
Kostya Serebryany321e1252012-12-11 09:02:36 +0000706
Sergey Matveev79367ad2013-05-21 13:46:41 +0000707// --- Implementation of LSan-specific functions --- {{{1
708namespace __lsan {
709void LockAllocator() {
710 __asan::allocator.ForceLock();
711}
712
713void UnlockAllocator() {
714 __asan::allocator.ForceUnlock();
715}
716
717void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
718 *begin = (uptr)&__asan::allocator;
719 *end = *begin + sizeof(__asan::allocator);
720}
721
Sergey Matveevac78d002013-06-24 08:34:50 +0000722uptr PointsIntoChunk(void* p) {
Sergey Matveev79367ad2013-05-21 13:46:41 +0000723 uptr addr = reinterpret_cast<uptr>(p);
Sergey Matveevba2169a2013-05-31 11:13:45 +0000724 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
Sergey Matveev79367ad2013-05-21 13:46:41 +0000725 if (!m) return 0;
726 uptr chunk = m->Beg();
727 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
Sergey Matveevac78d002013-06-24 08:34:50 +0000728 return chunk;
Sergey Matveev79367ad2013-05-21 13:46:41 +0000729 return 0;
730}
731
Sergey Matveevac78d002013-06-24 08:34:50 +0000732uptr GetUserBegin(uptr chunk) {
Sergey Matveevba2169a2013-05-31 11:13:45 +0000733 __asan::AsanChunk *m =
Sergey Matveevac78d002013-06-24 08:34:50 +0000734 __asan::GetAsanChunkByAddrFastLocked(chunk);
Sergey Matveev79367ad2013-05-21 13:46:41 +0000735 CHECK(m);
Sergey Matveevac78d002013-06-24 08:34:50 +0000736 return m->Beg();
Sergey Matveev79367ad2013-05-21 13:46:41 +0000737}
738
Sergey Matveevac78d002013-06-24 08:34:50 +0000739LsanMetadata::LsanMetadata(uptr chunk) {
740 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
Sergey Matveev79367ad2013-05-21 13:46:41 +0000741}
742
743bool LsanMetadata::allocated() const {
744 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
745 return m->chunk_state == __asan::CHUNK_ALLOCATED;
746}
747
748ChunkTag LsanMetadata::tag() const {
749 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
750 return static_cast<ChunkTag>(m->lsan_tag);
751}
752
753void LsanMetadata::set_tag(ChunkTag value) {
754 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
755 m->lsan_tag = value;
756}
757
758uptr LsanMetadata::requested_size() const {
759 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
760 return m->UsedSize();
761}
762
763u32 LsanMetadata::stack_trace_id() const {
764 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
765 return m->alloc_context_id;
766}
767
Sergey Matveevac78d002013-06-24 08:34:50 +0000768void ForEachChunk(ForEachChunkCallback callback, void *arg) {
769 __asan::allocator.ForEachChunk(callback, arg);
Sergey Matveev79367ad2013-05-21 13:46:41 +0000770}
Sergey Matveevcd571e02013-06-06 14:17:56 +0000771
772IgnoreObjectResult IgnoreObjectLocked(const void *p) {
773 uptr addr = reinterpret_cast<uptr>(p);
774 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
775 if (!m) return kIgnoreObjectInvalid;
776 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000777 if (m->lsan_tag == kIgnored)
Sergey Matveevcd571e02013-06-06 14:17:56 +0000778 return kIgnoreObjectAlreadyIgnored;
Sergey Matveevb3b46da2013-06-11 15:26:20 +0000779 m->lsan_tag = __lsan::kIgnored;
Sergey Matveevcd571e02013-06-06 14:17:56 +0000780 return kIgnoreObjectSuccess;
781 } else {
782 return kIgnoreObjectInvalid;
783 }
784}
Sergey Matveev79367ad2013-05-21 13:46:41 +0000785} // namespace __lsan
786
Kostya Serebryany321e1252012-12-11 09:02:36 +0000787// ---------------------- Interface ---------------- {{{1
788using namespace __asan; // NOLINT
789
790// ASan allocator doesn't reserve extra bytes, so normally we would
Kostya Serebryany376bab82012-12-20 08:53:41 +0000791// just return "size". We don't want to expose our redzone sizes, etc here.
Kostya Serebryany321e1252012-12-11 09:02:36 +0000792uptr __asan_get_estimated_allocated_size(uptr size) {
Kostya Serebryany376bab82012-12-20 08:53:41 +0000793 return size;
Kostya Serebryany321e1252012-12-11 09:02:36 +0000794}
795
796bool __asan_get_ownership(const void *p) {
Alexey Samsonov220ba2f2013-01-17 13:25:17 +0000797 uptr ptr = reinterpret_cast<uptr>(p);
Alexey Samsonovd9169932013-01-29 07:51:34 +0000798 return (AllocationSize(ptr) > 0);
Kostya Serebryany321e1252012-12-11 09:02:36 +0000799}
800
801uptr __asan_get_allocated_size(const void *p) {
Kostya Serebryany376bab82012-12-20 08:53:41 +0000802 if (p == 0) return 0;
Alexey Samsonov220ba2f2013-01-17 13:25:17 +0000803 uptr ptr = reinterpret_cast<uptr>(p);
804 uptr allocated_size = AllocationSize(ptr);
Kostya Serebryany376bab82012-12-20 08:53:41 +0000805 // Die if p is not malloced or if it is already freed.
Alexey Samsonovd9169932013-01-29 07:51:34 +0000806 if (allocated_size == 0) {
Kostya Serebryany376bab82012-12-20 08:53:41 +0000807 GET_STACK_TRACE_FATAL_HERE;
Alexey Samsonov220ba2f2013-01-17 13:25:17 +0000808 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
Kostya Serebryany376bab82012-12-20 08:53:41 +0000809 }
810 return allocated_size;
Kostya Serebryany321e1252012-12-11 09:02:36 +0000811}
812
813#if !SANITIZER_SUPPORTS_WEAK_HOOKS
814// Provide default (no-op) implementation of malloc hooks.
815extern "C" {
Timur Iskhodzhanov3c80c6c2013-08-13 11:42:45 +0000816SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
Kostya Serebryany321e1252012-12-11 09:02:36 +0000817void __asan_malloc_hook(void *ptr, uptr size) {
818 (void)ptr;
819 (void)size;
820}
Timur Iskhodzhanov3c80c6c2013-08-13 11:42:45 +0000821SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
Kostya Serebryany321e1252012-12-11 09:02:36 +0000822void __asan_free_hook(void *ptr) {
823 (void)ptr;
824}
825} // extern "C"
826#endif