blob: 8c3cc00cf46f155621ef1c9b9455d0dfefa8a17b [file] [log] [blame]
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_thread.h"
36#include "asan_thread_registry.h"
37
Kostya Serebryany85822082012-01-30 20:55:02 +000038#ifdef _WIN32
39#include <intrin.h>
40#endif
41
Kostya Serebryany1e172b42011-11-30 01:07:02 +000042namespace __asan {
43
44#define REDZONE FLAG_redzone
45static const size_t kMinAllocSize = REDZONE * 2;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000046static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
47static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
Evgeniy Stepanov788e1d72012-02-16 13:35:11 +000048
Evgeniy Stepanov8ae44ac2012-02-27 13:07:29 +000049static const size_t kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
50static const size_t kMaxSizeForThreadLocalFreeList =
51 (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000052
53// Size classes less than kMallocSizeClassStep are powers of two.
54// All other size classes are multiples of kMallocSizeClassStep.
55static const size_t kMallocSizeClassStepLog = 26;
56static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
57
Evgeniy Stepanov8ae44ac2012-02-27 13:07:29 +000058static const size_t kMaxAllowedMallocSize =
59 (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000060
Kostya Serebryany1e172b42011-11-30 01:07:02 +000061static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
62 return (a & (alignment - 1)) == 0;
63}
64
Kostya Serebryany1e172b42011-11-30 01:07:02 +000065static inline size_t Log2(size_t x) {
66 CHECK(IsPowerOfTwo(x));
Alexander Potapenko6f045292012-01-27 15:15:04 +000067#if defined(_WIN64)
68 unsigned long ret; // NOLINT
69 _BitScanForward64(&ret, x);
70 return ret;
71#elif defined(_WIN32)
72 unsigned long ret; // NOLINT
73 _BitScanForward(&ret, x);
74 return ret;
75#else
Kostya Serebryany1e172b42011-11-30 01:07:02 +000076 return __builtin_ctzl(x);
Alexander Potapenko6f045292012-01-27 15:15:04 +000077#endif
78}
79
Kostya Serebryany1e172b42011-11-30 01:07:02 +000080static inline size_t RoundUpToPowerOfTwo(size_t size) {
81 CHECK(size);
82 if (IsPowerOfTwo(size)) return size;
Alexander Potapenko6f045292012-01-27 15:15:04 +000083
Alexey Samsonovf927ddc2012-02-03 08:50:16 +000084 unsigned long up; // NOLINT
85#if defined(_WIN64)
86 _BitScanReverse64(&up, size);
87#elif defined(_WIN32)
88 _BitScanReverse(&up, size);
89#else
90 up = __WORDSIZE - 1 - __builtin_clzl(size);
91#endif
92 CHECK(size < (1ULL << (up + 1)));
93 CHECK(size > (1ULL << up));
94 return 1UL << (up + 1);
Kostya Serebryany1e172b42011-11-30 01:07:02 +000095}
96
97static inline size_t SizeClassToSize(uint8_t size_class) {
98 CHECK(size_class < kNumberOfSizeClasses);
99 if (size_class <= kMallocSizeClassStepLog) {
100 return 1UL << size_class;
101 } else {
102 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
103 }
104}
105
106static inline uint8_t SizeToSizeClass(size_t size) {
107 uint8_t res = 0;
108 if (size <= kMallocSizeClassStep) {
109 size_t rounded = RoundUpToPowerOfTwo(size);
110 res = Log2(rounded);
111 } else {
112 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
113 + kMallocSizeClassStepLog;
114 }
115 CHECK(res < kNumberOfSizeClasses);
116 CHECK(size <= SizeClassToSize(res));
117 return res;
118}
119
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000120// Given REDZONE bytes, we need to mark first size bytes
121// as addressable and the rest REDZONE-size bytes as unaddressable.
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000122static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000123 CHECK(size <= REDZONE);
124 CHECK(IsAligned(mem, REDZONE));
125 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
126 CHECK(IsPowerOfTwo(REDZONE));
127 CHECK(REDZONE >= SHADOW_GRANULARITY);
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000128 PoisonShadowPartialRightRedzone(mem, size, REDZONE,
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000129 kAsanHeapRightRedzoneMagic);
130}
131
132static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
133 CHECK(IsAligned(size, kPageSize));
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000134 uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000135 PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
136 if (FLAG_debug) {
137 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
138 }
139 return res;
140}
141
142// Every chunk of memory allocated by this allocator can be in one of 3 states:
143// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
144// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
145// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
146//
147// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
148// the beginning of a AsanChunk (in which case 'next' contains the address
149// of the AsanChunk).
150//
151// The magic numbers for the enum values are taken randomly.
152enum {
153 CHUNK_AVAILABLE = 0x573B,
154 CHUNK_ALLOCATED = 0x3204,
155 CHUNK_QUARANTINE = 0x1978,
156 CHUNK_MEMALIGN = 0xDC68,
157};
158
159struct ChunkBase {
160 uint16_t chunk_state;
161 uint8_t size_class;
162 uint32_t offset; // User-visible memory starts at this+offset (beg()).
163 int32_t alloc_tid;
164 int32_t free_tid;
165 size_t used_size; // Size requested by the user.
166 AsanChunk *next;
167
168 uintptr_t beg() { return (uintptr_t)this + offset; }
169 size_t Size() { return SizeClassToSize(size_class); }
170 uint8_t SizeClass() { return size_class; }
171};
172
173struct AsanChunk: public ChunkBase {
174 uint32_t *compressed_alloc_stack() {
175 CHECK(REDZONE >= sizeof(ChunkBase));
176 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
177 }
178 uint32_t *compressed_free_stack() {
179 CHECK(REDZONE >= sizeof(ChunkBase));
180 return (uint32_t*)((uintptr_t)this + REDZONE);
181 }
182
183 // The left redzone after the ChunkBase is given to the alloc stack trace.
184 size_t compressed_alloc_stack_size() {
185 return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
186 }
187 size_t compressed_free_stack_size() {
188 return (REDZONE) / sizeof(uint32_t);
189 }
190
191 bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
192 if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
193 *offset = addr - beg();
194 return true;
195 }
196 return false;
197 }
198
199 bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
200 if (addr < beg()) {
201 *offset = beg() - addr;
202 return true;
203 }
204 return false;
205 }
206
207 bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
208 if (addr + access_size >= beg() + used_size) {
209 if (addr <= beg() + used_size)
210 *offset = 0;
211 else
212 *offset = addr - (beg() + used_size);
213 return true;
214 }
215 return false;
216 }
217
218 void DescribeAddress(uintptr_t addr, size_t access_size) {
219 size_t offset;
220 Printf("%p is located ", addr);
221 if (AddrIsInside(addr, access_size, &offset)) {
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000222 Printf("%zu bytes inside of", offset);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000223 } else if (AddrIsAtLeft(addr, access_size, &offset)) {
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000224 Printf("%zu bytes to the left of", offset);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000225 } else if (AddrIsAtRight(addr, access_size, &offset)) {
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000226 Printf("%zu bytes to the right of", offset);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000227 } else {
228 Printf(" somewhere around (this is AddressSanitizer bug!)");
229 }
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000230 Printf(" %zu-byte region [%p,%p)\n",
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000231 used_size, beg(), beg() + used_size);
232 }
233};
234
235static AsanChunk *PtrToChunk(uintptr_t ptr) {
236 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
237 if (m->chunk_state == CHUNK_MEMALIGN) {
238 m = m->next;
239 }
240 return m;
241}
242
243
244void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
Alexey Samsonove0460662012-02-27 09:06:10 +0000245 CHECK(q->size() > 0);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000246 if (last_) {
247 CHECK(first_);
248 CHECK(!last_->next);
249 last_->next = q->first_;
250 last_ = q->last_;
251 } else {
252 CHECK(!first_);
253 last_ = q->last_;
254 first_ = q->first_;
Alexey Samsonove0460662012-02-27 09:06:10 +0000255 CHECK(first_);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000256 }
Alexey Samsonove0460662012-02-27 09:06:10 +0000257 CHECK(last_);
258 CHECK(!last_->next);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000259 size_ += q->size();
260 q->clear();
261}
262
263void AsanChunkFifoList::Push(AsanChunk *n) {
264 CHECK(n->next == NULL);
265 if (last_) {
266 CHECK(first_);
267 CHECK(!last_->next);
268 last_->next = n;
269 last_ = n;
270 } else {
271 CHECK(!first_);
272 last_ = first_ = n;
273 }
274 size_ += n->Size();
275}
276
277// Interesting performance observation: this function takes up to 15% of overal
278// allocator time. That's because *first_ has been evicted from cache long time
279// ago. Not sure if we can or want to do anything with this.
280AsanChunk *AsanChunkFifoList::Pop() {
281 CHECK(first_);
282 AsanChunk *res = first_;
283 first_ = first_->next;
284 if (first_ == NULL)
285 last_ = NULL;
286 CHECK(size_ >= res->Size());
287 size_ -= res->Size();
288 if (last_) {
289 CHECK(!last_->next);
290 }
291 return res;
292}
293
294// All pages we ever allocated.
295struct PageGroup {
296 uintptr_t beg;
297 uintptr_t end;
298 size_t size_of_chunk;
299 uintptr_t last_chunk;
300 bool InRange(uintptr_t addr) {
301 return addr >= beg && addr < end;
302 }
303};
304
305class MallocInfo {
306 public:
307
308 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
309
310 AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
311 AsanChunk *m = NULL;
312 AsanChunk **fl = &free_lists_[size_class];
313 {
314 ScopedLock lock(&mu_);
315 for (size_t i = 0; i < n_chunks; i++) {
316 if (!(*fl)) {
317 *fl = GetNewChunks(size_class);
318 }
319 AsanChunk *t = *fl;
320 *fl = t->next;
321 t->next = m;
322 CHECK(t->chunk_state == CHUNK_AVAILABLE);
323 m = t;
324 }
325 }
326 return m;
327 }
328
329 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
330 bool eat_free_lists) {
331 CHECK(FLAG_quarantine_size > 0);
332 ScopedLock lock(&mu_);
333 AsanChunkFifoList *q = &x->quarantine_;
334 if (q->size() > 0) {
335 quarantine_.PushList(q);
336 while (quarantine_.size() > FLAG_quarantine_size) {
337 QuarantinePop();
338 }
339 }
340 if (eat_free_lists) {
341 for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
342 size_class++) {
343 AsanChunk *m = x->free_lists_[size_class];
344 while (m) {
345 AsanChunk *t = m->next;
346 m->next = free_lists_[size_class];
347 free_lists_[size_class] = m;
348 m = t;
349 }
350 x->free_lists_[size_class] = 0;
351 }
352 }
353 }
354
355 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
356 ScopedLock lock(&mu_);
357 quarantine_.Push(chunk);
358 }
359
360 AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
361 ScopedLock lock(&mu_);
362 return FindChunkByAddr(addr);
363 }
364
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000365 size_t AllocationSize(uintptr_t ptr) {
Alexey Samsonovca2278d2012-01-18 15:26:55 +0000366 if (!ptr) return 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000367 ScopedLock lock(&mu_);
368
369 // first, check if this is our memory
370 PageGroup *g = FindPageGroupUnlocked(ptr);
371 if (!g) return 0;
372 AsanChunk *m = PtrToChunk(ptr);
373 if (m->chunk_state == CHUNK_ALLOCATED) {
374 return m->used_size;
375 } else {
376 return 0;
377 }
378 }
379
380 void ForceLock() {
381 mu_.Lock();
382 }
383
384 void ForceUnlock() {
385 mu_.Unlock();
386 }
387
388 void PrintStatus() {
389 ScopedLock lock(&mu_);
390 size_t malloced = 0;
391
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000392 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000393 quarantine_.size() >> 20, malloced >> 20);
394 for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
395 AsanChunk *i = free_lists_[j];
396 if (!i) continue;
397 size_t t = 0;
398 for (; i; i = i->next) {
399 t += i->Size();
400 }
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000401 Printf("%zu:%zu ", j, t >> 20);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000402 }
403 Printf("\n");
404 }
405
406 PageGroup *FindPageGroup(uintptr_t addr) {
407 ScopedLock lock(&mu_);
408 return FindPageGroupUnlocked(addr);
409 }
410
411 private:
412 PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
Kostya Serebryany25c71782012-03-10 01:30:01 +0000413 int n = n_page_groups_;
414 // If the page groups are not sorted yet, sort them.
Alexey Samsonov9bdf0652012-03-13 06:46:32 +0000415 if (n_sorted_page_groups_ < n) {
Kostya Serebryany25c71782012-03-10 01:30:01 +0000416 SortArray((uintptr_t*)page_groups_, n);
Alexey Samsonov9bdf0652012-03-13 06:46:32 +0000417 n_sorted_page_groups_ = n;
Kostya Serebryany25c71782012-03-10 01:30:01 +0000418 }
Alexey Samsonov9bdf0652012-03-13 06:46:32 +0000419 // Binary search over the page groups.
Kostya Serebryany25c71782012-03-10 01:30:01 +0000420 int beg = 0, end = n;
421 while (beg < end) {
422 int med = (beg + end) / 2;
423 uintptr_t g = (uintptr_t)page_groups_[med];
424 if (addr > g) {
425 // 'g' points to the end of the group, so 'addr'
426 // may not belong to page_groups_[med] or any previous group.
427 beg = med + 1;
428 } else {
429 // 'addr' may belong to page_groups_[med] or a previous group.
430 end = med;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000431 }
432 }
Kostya Serebryany25c71782012-03-10 01:30:01 +0000433 if (beg >= n)
434 return NULL;
435 PageGroup *g = page_groups_[beg];
436 CHECK(g);
437 if (g->InRange(addr))
438 return g;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000439 return NULL;
440 }
441
442 // We have an address between two chunks, and we want to report just one.
443 AsanChunk *ChooseChunk(uintptr_t addr,
444 AsanChunk *left_chunk, AsanChunk *right_chunk) {
445 // Prefer an allocated chunk or a chunk from quarantine.
446 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
447 right_chunk->chunk_state != CHUNK_AVAILABLE)
448 return right_chunk;
449 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
450 left_chunk->chunk_state != CHUNK_AVAILABLE)
451 return left_chunk;
452 // Choose based on offset.
Daniel Dunbar514cebb2011-12-02 01:36:38 +0000453 size_t l_offset = 0, r_offset = 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000454 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
455 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
456 if (l_offset < r_offset)
457 return left_chunk;
458 return right_chunk;
459 }
460
461 AsanChunk *FindChunkByAddr(uintptr_t addr) {
462 PageGroup *g = FindPageGroupUnlocked(addr);
463 if (!g) return 0;
464 CHECK(g->size_of_chunk);
465 uintptr_t offset_from_beg = addr - g->beg;
466 uintptr_t this_chunk_addr = g->beg +
467 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
468 CHECK(g->InRange(this_chunk_addr));
469 AsanChunk *m = (AsanChunk*)this_chunk_addr;
470 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
471 m->chunk_state == CHUNK_AVAILABLE ||
472 m->chunk_state == CHUNK_QUARANTINE);
Daniel Dunbar514cebb2011-12-02 01:36:38 +0000473 size_t offset = 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000474 if (m->AddrIsInside(addr, 1, &offset))
475 return m;
476
477 if (m->AddrIsAtRight(addr, 1, &offset)) {
478 if (this_chunk_addr == g->last_chunk) // rightmost chunk
479 return m;
480 uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
481 CHECK(g->InRange(right_chunk_addr));
482 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
483 } else {
484 CHECK(m->AddrIsAtLeft(addr, 1, &offset));
485 if (this_chunk_addr == g->beg) // leftmost chunk
486 return m;
487 uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
488 CHECK(g->InRange(left_chunk_addr));
489 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
490 }
491 }
492
493 void QuarantinePop() {
494 CHECK(quarantine_.size() > 0);
495 AsanChunk *m = quarantine_.Pop();
496 CHECK(m);
497 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
498
499 CHECK(m->chunk_state == CHUNK_QUARANTINE);
500 m->chunk_state = CHUNK_AVAILABLE;
501 CHECK(m->alloc_tid >= 0);
502 CHECK(m->free_tid >= 0);
503
504 size_t size_class = m->SizeClass();
505 m->next = free_lists_[size_class];
506 free_lists_[size_class] = m;
507
Kostya Serebryany30743142011-12-05 19:17:53 +0000508 // Statistics.
509 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
510 thread_stats.real_frees++;
511 thread_stats.really_freed += m->used_size;
512 thread_stats.really_freed_redzones += m->Size() - m->used_size;
513 thread_stats.really_freed_by_size[m->SizeClass()]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000514 }
515
516 // Get a list of newly allocated chunks.
517 AsanChunk *GetNewChunks(uint8_t size_class) {
518 size_t size = SizeClassToSize(size_class);
519 CHECK(IsPowerOfTwo(kMinMmapSize));
520 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +0000521 size_t mmap_size = Max(size, kMinMmapSize);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000522 size_t n_chunks = mmap_size / size;
523 CHECK(n_chunks * size == mmap_size);
524 if (size < kPageSize) {
525 // Size is small, just poison the last chunk.
526 n_chunks--;
527 } else {
528 // Size is large, allocate an extra page at right and poison it.
529 mmap_size += kPageSize;
530 }
531 CHECK(n_chunks > 0);
532 uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
Kostya Serebryany30743142011-12-05 19:17:53 +0000533
534 // Statistics.
535 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
536 thread_stats.mmaps++;
537 thread_stats.mmaped += mmap_size;
538 thread_stats.mmaped_by_size[size_class] += n_chunks;
539
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000540 AsanChunk *res = NULL;
541 for (size_t i = 0; i < n_chunks; i++) {
542 AsanChunk *m = (AsanChunk*)(mem + i * size);
543 m->chunk_state = CHUNK_AVAILABLE;
544 m->size_class = size_class;
545 m->next = res;
546 res = m;
547 }
548 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
549 // This memory is already poisoned, no need to poison it again.
550 pg->beg = (uintptr_t)mem;
551 pg->end = pg->beg + mmap_size;
552 pg->size_of_chunk = size;
553 pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
554 int page_group_idx = AtomicInc(&n_page_groups_) - 1;
555 CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
556 page_groups_[page_group_idx] = pg;
557 return res;
558 }
559
560 AsanChunk *free_lists_[kNumberOfSizeClasses];
561 AsanChunkFifoList quarantine_;
562 AsanLock mu_;
563
564 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
565 int n_page_groups_; // atomic
Alexey Samsonov9bdf0652012-03-13 06:46:32 +0000566 int n_sorted_page_groups_;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000567};
568
569static MallocInfo malloc_info(LINKER_INITIALIZED);
570
571void AsanThreadLocalMallocStorage::CommitBack() {
572 malloc_info.SwallowThreadLocalMallocStorage(this, true);
573}
574
575static void Describe(uintptr_t addr, size_t access_size) {
576 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
577 if (!m) return;
578 m->DescribeAddress(addr, access_size);
579 CHECK(m->alloc_tid >= 0);
580 AsanThreadSummary *alloc_thread =
581 asanThreadRegistry().FindByTid(m->alloc_tid);
582 AsanStackTrace alloc_stack;
583 AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
584 m->compressed_alloc_stack_size());
585 AsanThread *t = asanThreadRegistry().GetCurrent();
586 CHECK(t);
587 if (m->free_tid >= 0) {
588 AsanThreadSummary *free_thread =
589 asanThreadRegistry().FindByTid(m->free_tid);
590 Printf("freed by thread T%d here:\n", free_thread->tid());
591 AsanStackTrace free_stack;
592 AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
593 m->compressed_free_stack_size());
594 free_stack.PrintStack();
595 Printf("previously allocated by thread T%d here:\n",
596 alloc_thread->tid());
597
598 alloc_stack.PrintStack();
599 t->summary()->Announce();
600 free_thread->Announce();
601 alloc_thread->Announce();
602 } else {
603 Printf("allocated by thread T%d here:\n", alloc_thread->tid());
604 alloc_stack.PrintStack();
605 t->summary()->Announce();
606 alloc_thread->Announce();
607 }
608}
609
610static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
611 __asan_init();
612 CHECK(stack);
613 if (size == 0) {
614 size = 1; // TODO(kcc): do something smarter
615 }
616 CHECK(IsPowerOfTwo(alignment));
617 size_t rounded_size = RoundUpTo(size, REDZONE);
618 size_t needed_size = rounded_size + REDZONE;
619 if (alignment > REDZONE) {
620 needed_size += alignment;
621 }
622 CHECK(IsAligned(needed_size, REDZONE));
623 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
624 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
625 return 0;
626 }
627
628 uint8_t size_class = SizeToSizeClass(needed_size);
629 size_t size_to_allocate = SizeClassToSize(size_class);
630 CHECK(size_to_allocate >= kMinAllocSize);
631 CHECK(size_to_allocate >= needed_size);
632 CHECK(IsAligned(size_to_allocate, REDZONE));
633
Alexander Potapenko108a2372012-03-27 16:37:16 +0000634 if (FLAG_v >= 3) {
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000635 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000636 alignment, size, size_class, size_to_allocate);
637 }
638
639 AsanThread *t = asanThreadRegistry().GetCurrent();
640 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
Kostya Serebryany30743142011-12-05 19:17:53 +0000641 // Statistics
642 thread_stats.mallocs++;
643 thread_stats.malloced += size;
644 thread_stats.malloced_redzones += size_to_allocate - size;
645 thread_stats.malloced_by_size[size_class]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000646
647 AsanChunk *m = NULL;
648 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
649 // get directly from global storage.
650 m = malloc_info.AllocateChunks(size_class, 1);
Kostya Serebryany30743142011-12-05 19:17:53 +0000651 thread_stats.malloc_large++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000652 } else {
653 // get from the thread-local storage.
654 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
655 if (!*fl) {
656 size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000657 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
Kostya Serebryany30743142011-12-05 19:17:53 +0000658 thread_stats.malloc_small_slow++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000659 }
660 m = *fl;
661 *fl = (*fl)->next;
662 }
663 CHECK(m);
664 CHECK(m->chunk_state == CHUNK_AVAILABLE);
665 m->chunk_state = CHUNK_ALLOCATED;
666 m->next = NULL;
667 CHECK(m->Size() == size_to_allocate);
668 uintptr_t addr = (uintptr_t)m + REDZONE;
669 CHECK(addr == (uintptr_t)m->compressed_free_stack());
670
671 if (alignment > REDZONE && (addr & (alignment - 1))) {
672 addr = RoundUpTo(addr, alignment);
673 CHECK((addr & (alignment - 1)) == 0);
674 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
675 p->chunk_state = CHUNK_MEMALIGN;
676 p->next = m;
677 }
678 CHECK(m == PtrToChunk(addr));
679 m->used_size = size;
680 m->offset = addr - (uintptr_t)m;
681 CHECK(m->beg() == addr);
682 m->alloc_tid = t ? t->tid() : 0;
683 m->free_tid = AsanThread::kInvalidTid;
684 AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
685 m->compressed_alloc_stack_size());
686 PoisonShadow(addr, rounded_size, 0);
687 if (size < rounded_size) {
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000688 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
689 size & (REDZONE - 1));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000690 }
691 if (size <= FLAG_max_malloc_fill_size) {
Alexey Samsonov09672ca2012-02-08 13:45:31 +0000692 REAL(memset)((void*)addr, 0, rounded_size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000693 }
694 return (uint8_t*)addr;
695}
696
697static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
698 if (!ptr) return;
699 CHECK(stack);
700
701 if (FLAG_debug) {
702 CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
703 }
704
705 // Printf("Deallocate %p\n", ptr);
706 AsanChunk *m = PtrToChunk((uintptr_t)ptr);
Kostya Serebryanyf1e82b82012-04-05 15:55:09 +0000707
708 // Flip the state atomically to avoid race on double-free.
709 uint16_t old_chunk_state = AtomicExchange(&m->chunk_state, CHUNK_QUARANTINE);
710
711 if (old_chunk_state == CHUNK_QUARANTINE) {
Kostya Serebryanyc1620132011-12-13 19:16:36 +0000712 Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000713 stack->PrintStack();
Kostya Serebryany27f49322012-02-08 00:42:29 +0000714 Describe((uintptr_t)ptr, 1);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000715 ShowStatsAndAbort();
Kostya Serebryanyf1e82b82012-04-05 15:55:09 +0000716 } else if (old_chunk_state != CHUNK_ALLOCATED) {
Kostya Serebryanyc1620132011-12-13 19:16:36 +0000717 Report("ERROR: AddressSanitizer attempting free on address which was not"
718 " malloc()-ed: %p\n", ptr);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000719 stack->PrintStack();
720 ShowStatsAndAbort();
721 }
Kostya Serebryanyf1e82b82012-04-05 15:55:09 +0000722 CHECK(old_chunk_state == CHUNK_ALLOCATED);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000723 CHECK(m->free_tid == AsanThread::kInvalidTid);
724 CHECK(m->alloc_tid >= 0);
725 AsanThread *t = asanThreadRegistry().GetCurrent();
726 m->free_tid = t ? t->tid() : 0;
727 AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
728 m->compressed_free_stack_size());
729 size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
730 PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
731
Kostya Serebryany30743142011-12-05 19:17:53 +0000732 // Statistics.
733 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
734 thread_stats.frees++;
735 thread_stats.freed += m->used_size;
736 thread_stats.freed_by_size[m->SizeClass()]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000737
Kostya Serebryanyf1e82b82012-04-05 15:55:09 +0000738 CHECK(m->chunk_state == CHUNK_QUARANTINE);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000739 if (t) {
740 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
741 CHECK(!m->next);
742 ms->quarantine_.Push(m);
743
744 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
745 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
746 }
747 } else {
748 CHECK(!m->next);
749 malloc_info.BypassThreadLocalQuarantine(m);
750 }
751}
752
753static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
754 AsanStackTrace *stack) {
755 CHECK(old_ptr && new_size);
Kostya Serebryany30743142011-12-05 19:17:53 +0000756
757 // Statistics.
758 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
759 thread_stats.reallocs++;
760 thread_stats.realloced += new_size;
761
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000762 AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
763 CHECK(m->chunk_state == CHUNK_ALLOCATED);
764 size_t old_size = m->used_size;
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +0000765 size_t memcpy_size = Min(new_size, old_size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000766 uint8_t *new_ptr = Allocate(0, new_size, stack);
767 if (new_ptr) {
Alexey Samsonov09672ca2012-02-08 13:45:31 +0000768 CHECK(REAL(memcpy) != NULL);
769 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000770 Deallocate(old_ptr, stack);
771 }
772 return new_ptr;
773}
774
775} // namespace __asan
776
777// Malloc hooks declaration.
778// ASAN_NEW_HOOK(ptr, size) is called immediately after
779// allocation of "size" bytes, which returned "ptr".
780// ASAN_DELETE_HOOK(ptr) is called immediately before
781// deallocation of "ptr".
782// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
783// program must provide implementation of this hook.
784// If macro is undefined, the hook is no-op.
785#ifdef ASAN_NEW_HOOK
786extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
787#else
788static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
789#endif
790
791#ifdef ASAN_DELETE_HOOK
792extern "C" void ASAN_DELETE_HOOK(void *ptr);
793#else
794static inline void ASAN_DELETE_HOOK(void *ptr) { }
795#endif
796
797namespace __asan {
798
799void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
800 void *ptr = (void*)Allocate(alignment, size, stack);
801 ASAN_NEW_HOOK(ptr, size);
802 return ptr;
803}
804
805void asan_free(void *ptr, AsanStackTrace *stack) {
806 ASAN_DELETE_HOOK(ptr);
807 Deallocate((uint8_t*)ptr, stack);
808}
809
810void *asan_malloc(size_t size, AsanStackTrace *stack) {
811 void *ptr = (void*)Allocate(0, size, stack);
812 ASAN_NEW_HOOK(ptr, size);
813 return ptr;
814}
815
816void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
817 void *ptr = (void*)Allocate(0, nmemb * size, stack);
818 if (ptr)
Alexey Samsonov09672ca2012-02-08 13:45:31 +0000819 REAL(memset)(ptr, 0, nmemb * size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000820 ASAN_NEW_HOOK(ptr, nmemb * size);
821 return ptr;
822}
823
824void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
825 if (p == NULL) {
826 void *ptr = (void*)Allocate(0, size, stack);
827 ASAN_NEW_HOOK(ptr, size);
828 return ptr;
829 } else if (size == 0) {
830 ASAN_DELETE_HOOK(p);
831 Deallocate((uint8_t*)p, stack);
832 return NULL;
833 }
834 return Reallocate((uint8_t*)p, size, stack);
835}
836
837void *asan_valloc(size_t size, AsanStackTrace *stack) {
838 void *ptr = (void*)Allocate(kPageSize, size, stack);
839 ASAN_NEW_HOOK(ptr, size);
840 return ptr;
841}
842
843void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
844 size = RoundUpTo(size, kPageSize);
845 if (size == 0) {
846 // pvalloc(0) should allocate one page.
847 size = kPageSize;
848 }
849 void *ptr = (void*)Allocate(kPageSize, size, stack);
850 ASAN_NEW_HOOK(ptr, size);
851 return ptr;
852}
853
854int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
855 AsanStackTrace *stack) {
856 void *ptr = Allocate(alignment, size, stack);
857 CHECK(IsAligned((uintptr_t)ptr, alignment));
858 ASAN_NEW_HOOK(ptr, size);
859 *memptr = ptr;
860 return 0;
861}
862
Alexey Samsonov4fd95f12012-01-17 06:39:10 +0000863size_t asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) {
Alexey Samsonovca2278d2012-01-18 15:26:55 +0000864 CHECK(stack);
865 if (ptr == NULL) return 0;
866 size_t usable_size = malloc_info.AllocationSize((uintptr_t)ptr);
867 if (usable_size == 0) {
Alexey Samsonov4fd95f12012-01-17 06:39:10 +0000868 Report("ERROR: AddressSanitizer attempting to call malloc_usable_size() "
869 "for pointer which is not owned: %p\n", ptr);
870 stack->PrintStack();
871 Describe((uintptr_t)ptr, 1);
872 ShowStatsAndAbort();
873 }
874 return usable_size;
875}
876
877size_t asan_mz_size(const void *ptr) {
Alexey Samsonovca2278d2012-01-18 15:26:55 +0000878 return malloc_info.AllocationSize((uintptr_t)ptr);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000879}
880
881void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
882 Describe(addr, access_size);
883}
884
Alexey Samsonov4fd95f12012-01-17 06:39:10 +0000885void asan_mz_force_lock() {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000886 malloc_info.ForceLock();
887}
888
Alexey Samsonov4fd95f12012-01-17 06:39:10 +0000889void asan_mz_force_unlock() {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000890 malloc_info.ForceUnlock();
891}
892
893// ---------------------- Fake stack-------------------- {{{1
894FakeStack::FakeStack() {
Alexey Samsonov09672ca2012-02-08 13:45:31 +0000895 CHECK(REAL(memset) != NULL);
896 REAL(memset)(this, 0, sizeof(*this));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000897}
898
899bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
900 uintptr_t mem = allocated_size_classes_[size_class];
901 uintptr_t size = ClassMmapSize(size_class);
902 bool res = mem && addr >= mem && addr < mem + size;
903 return res;
904}
905
906uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000907 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
908 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
909 }
910 return 0;
911}
912
913// We may want to compute this during compilation.
914inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
915 size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
916 size_t log = Log2(rounded_size);
917 CHECK(alloc_size <= (1UL << log));
918 if (!(alloc_size > (1UL << (log-1)))) {
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000919 Printf("alloc_size %zu log %zu\n", alloc_size, log);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000920 }
921 CHECK(alloc_size > (1UL << (log-1)));
922 size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
923 CHECK(res < kNumberOfSizeClasses);
924 CHECK(ClassSize(res) >= rounded_size);
925 return res;
926}
927
928void FakeFrameFifo::FifoPush(FakeFrame *node) {
929 CHECK(node);
930 node->next = 0;
931 if (first_ == 0 && last_ == 0) {
932 first_ = last_ = node;
933 } else {
934 CHECK(first_);
935 CHECK(last_);
936 last_->next = node;
937 last_ = node;
938 }
939}
940
941FakeFrame *FakeFrameFifo::FifoPop() {
942 CHECK(first_ && last_ && "Exhausted fake stack");
943 FakeFrame *res = 0;
944 if (first_ == last_) {
945 res = first_;
946 first_ = last_ = 0;
947 } else {
948 res = first_;
949 first_ = first_->next;
950 }
951 return res;
952}
953
954void FakeStack::Init(size_t stack_size) {
955 stack_size_ = stack_size;
956 alive_ = true;
957}
958
959void FakeStack::Cleanup() {
960 alive_ = false;
961 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
962 uintptr_t mem = allocated_size_classes_[i];
963 if (mem) {
964 PoisonShadow(mem, ClassMmapSize(i), 0);
965 allocated_size_classes_[i] = 0;
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000966 AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000967 }
968 }
969}
970
971size_t FakeStack::ClassMmapSize(size_t size_class) {
972 return RoundUpToPowerOfTwo(stack_size_);
973}
974
975void FakeStack::AllocateOneSizeClass(size_t size_class) {
976 CHECK(ClassMmapSize(size_class) >= kPageSize);
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000977 uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
978 ClassMmapSize(size_class), __FUNCTION__);
Evgeniy Stepanov739eb792012-03-21 11:32:46 +0000979 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000980 // asanThreadRegistry().GetCurrent()->tid(),
981 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
982 // ClassMmapSize(size_class));
983 size_t i;
984 for (i = 0; i < ClassMmapSize(size_class);
985 i += ClassSize(size_class)) {
986 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
987 }
988 CHECK(i == ClassMmapSize(size_class));
989 allocated_size_classes_[size_class] = new_mem;
990}
991
992uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
Kostya Serebryanyc4b34d92011-12-09 01:49:31 +0000993 if (!alive_) return real_stack;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000994 CHECK(size <= kMaxStackMallocSize && size > 1);
995 size_t size_class = ComputeSizeClass(size);
996 if (!allocated_size_classes_[size_class]) {
997 AllocateOneSizeClass(size_class);
998 }
999 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
1000 CHECK(fake_frame);
1001 fake_frame->size_minus_one = size - 1;
1002 fake_frame->real_stack = real_stack;
1003 while (FakeFrame *top = call_stack_.top()) {
1004 if (top->real_stack > real_stack) break;
1005 call_stack_.LifoPop();
1006 DeallocateFrame(top);
1007 }
1008 call_stack_.LifoPush(fake_frame);
1009 uintptr_t ptr = (uintptr_t)fake_frame;
1010 PoisonShadow(ptr, size, 0);
1011 return ptr;
1012}
1013
1014void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
1015 CHECK(alive_);
1016 size_t size = fake_frame->size_minus_one + 1;
1017 size_t size_class = ComputeSizeClass(size);
1018 CHECK(allocated_size_classes_[size_class]);
1019 uintptr_t ptr = (uintptr_t)fake_frame;
1020 CHECK(AddrIsInSizeClass(ptr, size_class));
1021 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
1022 size_classes_[size_class].FifoPush(fake_frame);
1023}
1024
1025void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
1026 FakeFrame *fake_frame = (FakeFrame*)ptr;
1027 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
1028 CHECK(fake_frame->descr != 0);
1029 CHECK(fake_frame->size_minus_one == size - 1);
1030 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1031}
1032
1033} // namespace __asan
1034
1035// ---------------------- Interface ---------------- {{{1
1036using namespace __asan; // NOLINT
1037
1038size_t __asan_stack_malloc(size_t size, size_t real_stack) {
1039 if (!FLAG_use_fake_stack) return real_stack;
1040 AsanThread *t = asanThreadRegistry().GetCurrent();
1041 if (!t) {
1042 // TSD is gone, use the real stack.
1043 return real_stack;
1044 }
1045 size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
Evgeniy Stepanov739eb792012-03-21 11:32:46 +00001046 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001047 return ptr;
1048}
1049
1050void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
1051 if (!FLAG_use_fake_stack) return;
1052 if (ptr != real_stack) {
1053 FakeStack::OnFree(ptr, size, real_stack);
1054 }
1055}
1056
1057// ASan allocator doesn't reserve extra bytes, so normally we would
1058// just return "size".
1059size_t __asan_get_estimated_allocated_size(size_t size) {
1060 if (size == 0) return 1;
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +00001061 return Min(size, kMaxAllowedMallocSize);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001062}
1063
1064bool __asan_get_ownership(const void *p) {
Alexey Samsonovca2278d2012-01-18 15:26:55 +00001065 return malloc_info.AllocationSize((uintptr_t)p) > 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001066}
1067
1068size_t __asan_get_allocated_size(const void *p) {
Alexey Samsonovca2278d2012-01-18 15:26:55 +00001069 if (p == NULL) return 0;
1070 size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001071 // Die if p is not malloced or if it is already freed.
Alexey Samsonovca2278d2012-01-18 15:26:55 +00001072 if (allocated_size == 0) {
Alexey Samsonov4fd95f12012-01-17 06:39:10 +00001073 Report("ERROR: AddressSanitizer attempting to call "
1074 "__asan_get_allocated_size() for pointer which is "
1075 "not owned: %p\n", p);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001076 PRINT_CURRENT_STACK();
Alexey Samsonov4fd95f12012-01-17 06:39:10 +00001077 Describe((uintptr_t)p, 1);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001078 ShowStatsAndAbort();
1079 }
1080 return allocated_size;
1081}