blob: aa0413c7ab7689a6827ce73320a535cd6cb2f083 [file] [log] [blame]
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_thread.h"
36#include "asan_thread_registry.h"
37
38#include <sys/mman.h>
39#include <stdint.h>
40#include <string.h>
41#include <unistd.h>
Kostya Serebryany1e172b42011-11-30 01:07:02 +000042
43namespace __asan {
44
45#define REDZONE FLAG_redzone
46static const size_t kMinAllocSize = REDZONE * 2;
47static const size_t kMinMmapSize = 4UL << 20; // 4M
48static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
49static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
50static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
51
52// Size classes less than kMallocSizeClassStep are powers of two.
53// All other size classes are multiples of kMallocSizeClassStep.
54static const size_t kMallocSizeClassStepLog = 26;
55static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
56
57#if __WORDSIZE == 32
58static const size_t kMaxAllowedMallocSize = 3UL << 30; // 3G
59#else
60static const size_t kMaxAllowedMallocSize = 8UL << 30; // 8G
61#endif
62
Kostya Serebryany1e172b42011-11-30 01:07:02 +000063static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
64 return (a & (alignment - 1)) == 0;
65}
66
Kostya Serebryany1e172b42011-11-30 01:07:02 +000067static inline size_t Log2(size_t x) {
68 CHECK(IsPowerOfTwo(x));
69 return __builtin_ctzl(x);
70}
71
Kostya Serebryany1e172b42011-11-30 01:07:02 +000072static inline size_t RoundUpToPowerOfTwo(size_t size) {
73 CHECK(size);
74 if (IsPowerOfTwo(size)) return size;
75 size_t up = __WORDSIZE - __builtin_clzl(size);
76 CHECK(size < (1ULL << up));
77 CHECK(size > (1ULL << (up - 1)));
78 return 1UL << up;
79}
80
81static inline size_t SizeClassToSize(uint8_t size_class) {
82 CHECK(size_class < kNumberOfSizeClasses);
83 if (size_class <= kMallocSizeClassStepLog) {
84 return 1UL << size_class;
85 } else {
86 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
87 }
88}
89
90static inline uint8_t SizeToSizeClass(size_t size) {
91 uint8_t res = 0;
92 if (size <= kMallocSizeClassStep) {
93 size_t rounded = RoundUpToPowerOfTwo(size);
94 res = Log2(rounded);
95 } else {
96 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
97 + kMallocSizeClassStepLog;
98 }
99 CHECK(res < kNumberOfSizeClasses);
100 CHECK(size <= SizeClassToSize(res));
101 return res;
102}
103
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000104// Given REDZONE bytes, we need to mark first size bytes
105// as addressable and the rest REDZONE-size bytes as unaddressable.
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000106static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000107 CHECK(size <= REDZONE);
108 CHECK(IsAligned(mem, REDZONE));
109 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
110 CHECK(IsPowerOfTwo(REDZONE));
111 CHECK(REDZONE >= SHADOW_GRANULARITY);
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000112 PoisonShadowPartialRightRedzone(mem, size, REDZONE,
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000113 kAsanHeapRightRedzoneMagic);
114}
115
116static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
117 CHECK(IsAligned(size, kPageSize));
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000118 uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000119 PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
120 if (FLAG_debug) {
121 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
122 }
123 return res;
124}
125
126// Every chunk of memory allocated by this allocator can be in one of 3 states:
127// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
128// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
129// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
130//
131// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
132// the beginning of a AsanChunk (in which case 'next' contains the address
133// of the AsanChunk).
134//
135// The magic numbers for the enum values are taken randomly.
136enum {
137 CHUNK_AVAILABLE = 0x573B,
138 CHUNK_ALLOCATED = 0x3204,
139 CHUNK_QUARANTINE = 0x1978,
140 CHUNK_MEMALIGN = 0xDC68,
141};
142
143struct ChunkBase {
144 uint16_t chunk_state;
145 uint8_t size_class;
146 uint32_t offset; // User-visible memory starts at this+offset (beg()).
147 int32_t alloc_tid;
148 int32_t free_tid;
149 size_t used_size; // Size requested by the user.
150 AsanChunk *next;
151
152 uintptr_t beg() { return (uintptr_t)this + offset; }
153 size_t Size() { return SizeClassToSize(size_class); }
154 uint8_t SizeClass() { return size_class; }
155};
156
157struct AsanChunk: public ChunkBase {
158 uint32_t *compressed_alloc_stack() {
159 CHECK(REDZONE >= sizeof(ChunkBase));
160 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
161 }
162 uint32_t *compressed_free_stack() {
163 CHECK(REDZONE >= sizeof(ChunkBase));
164 return (uint32_t*)((uintptr_t)this + REDZONE);
165 }
166
167 // The left redzone after the ChunkBase is given to the alloc stack trace.
168 size_t compressed_alloc_stack_size() {
169 return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
170 }
171 size_t compressed_free_stack_size() {
172 return (REDZONE) / sizeof(uint32_t);
173 }
174
175 bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
176 if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
177 *offset = addr - beg();
178 return true;
179 }
180 return false;
181 }
182
183 bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
184 if (addr < beg()) {
185 *offset = beg() - addr;
186 return true;
187 }
188 return false;
189 }
190
191 bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
192 if (addr + access_size >= beg() + used_size) {
193 if (addr <= beg() + used_size)
194 *offset = 0;
195 else
196 *offset = addr - (beg() + used_size);
197 return true;
198 }
199 return false;
200 }
201
202 void DescribeAddress(uintptr_t addr, size_t access_size) {
203 size_t offset;
204 Printf("%p is located ", addr);
205 if (AddrIsInside(addr, access_size, &offset)) {
206 Printf("%ld bytes inside of", offset);
207 } else if (AddrIsAtLeft(addr, access_size, &offset)) {
208 Printf("%ld bytes to the left of", offset);
209 } else if (AddrIsAtRight(addr, access_size, &offset)) {
210 Printf("%ld bytes to the right of", offset);
211 } else {
212 Printf(" somewhere around (this is AddressSanitizer bug!)");
213 }
214 Printf(" %lu-byte region [%p,%p)\n",
215 used_size, beg(), beg() + used_size);
216 }
217};
218
219static AsanChunk *PtrToChunk(uintptr_t ptr) {
220 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
221 if (m->chunk_state == CHUNK_MEMALIGN) {
222 m = m->next;
223 }
224 return m;
225}
226
227
228void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
229 if (last_) {
230 CHECK(first_);
231 CHECK(!last_->next);
232 last_->next = q->first_;
233 last_ = q->last_;
234 } else {
235 CHECK(!first_);
236 last_ = q->last_;
237 first_ = q->first_;
238 }
239 size_ += q->size();
240 q->clear();
241}
242
243void AsanChunkFifoList::Push(AsanChunk *n) {
244 CHECK(n->next == NULL);
245 if (last_) {
246 CHECK(first_);
247 CHECK(!last_->next);
248 last_->next = n;
249 last_ = n;
250 } else {
251 CHECK(!first_);
252 last_ = first_ = n;
253 }
254 size_ += n->Size();
255}
256
257// Interesting performance observation: this function takes up to 15% of overal
258// allocator time. That's because *first_ has been evicted from cache long time
259// ago. Not sure if we can or want to do anything with this.
260AsanChunk *AsanChunkFifoList::Pop() {
261 CHECK(first_);
262 AsanChunk *res = first_;
263 first_ = first_->next;
264 if (first_ == NULL)
265 last_ = NULL;
266 CHECK(size_ >= res->Size());
267 size_ -= res->Size();
268 if (last_) {
269 CHECK(!last_->next);
270 }
271 return res;
272}
273
274// All pages we ever allocated.
275struct PageGroup {
276 uintptr_t beg;
277 uintptr_t end;
278 size_t size_of_chunk;
279 uintptr_t last_chunk;
280 bool InRange(uintptr_t addr) {
281 return addr >= beg && addr < end;
282 }
283};
284
285class MallocInfo {
286 public:
287
288 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
289
290 AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
291 AsanChunk *m = NULL;
292 AsanChunk **fl = &free_lists_[size_class];
293 {
294 ScopedLock lock(&mu_);
295 for (size_t i = 0; i < n_chunks; i++) {
296 if (!(*fl)) {
297 *fl = GetNewChunks(size_class);
298 }
299 AsanChunk *t = *fl;
300 *fl = t->next;
301 t->next = m;
302 CHECK(t->chunk_state == CHUNK_AVAILABLE);
303 m = t;
304 }
305 }
306 return m;
307 }
308
309 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
310 bool eat_free_lists) {
311 CHECK(FLAG_quarantine_size > 0);
312 ScopedLock lock(&mu_);
313 AsanChunkFifoList *q = &x->quarantine_;
314 if (q->size() > 0) {
315 quarantine_.PushList(q);
316 while (quarantine_.size() > FLAG_quarantine_size) {
317 QuarantinePop();
318 }
319 }
320 if (eat_free_lists) {
321 for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
322 size_class++) {
323 AsanChunk *m = x->free_lists_[size_class];
324 while (m) {
325 AsanChunk *t = m->next;
326 m->next = free_lists_[size_class];
327 free_lists_[size_class] = m;
328 m = t;
329 }
330 x->free_lists_[size_class] = 0;
331 }
332 }
333 }
334
335 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
336 ScopedLock lock(&mu_);
337 quarantine_.Push(chunk);
338 }
339
340 AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
341 ScopedLock lock(&mu_);
342 return FindChunkByAddr(addr);
343 }
344
345 // TODO(glider): AllocationSize() may become very slow if the size of
346 // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
347 // but a better solution is to speed up the search somehow.
348 size_t AllocationSize(uintptr_t ptr) {
349 ScopedLock lock(&mu_);
350
351 // first, check if this is our memory
352 PageGroup *g = FindPageGroupUnlocked(ptr);
353 if (!g) return 0;
354 AsanChunk *m = PtrToChunk(ptr);
355 if (m->chunk_state == CHUNK_ALLOCATED) {
356 return m->used_size;
357 } else {
358 return 0;
359 }
360 }
361
362 void ForceLock() {
363 mu_.Lock();
364 }
365
366 void ForceUnlock() {
367 mu_.Unlock();
368 }
369
370 void PrintStatus() {
371 ScopedLock lock(&mu_);
372 size_t malloced = 0;
373
374 Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
375 quarantine_.size() >> 20, malloced >> 20);
376 for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
377 AsanChunk *i = free_lists_[j];
378 if (!i) continue;
379 size_t t = 0;
380 for (; i; i = i->next) {
381 t += i->Size();
382 }
383 Printf("%ld:%ld ", j, t >> 20);
384 }
385 Printf("\n");
386 }
387
388 PageGroup *FindPageGroup(uintptr_t addr) {
389 ScopedLock lock(&mu_);
390 return FindPageGroupUnlocked(addr);
391 }
392
393 private:
394 PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
395 for (int i = 0; i < n_page_groups_; i++) {
396 PageGroup *g = page_groups_[i];
397 if (g->InRange(addr)) {
398 return g;
399 }
400 }
401 return NULL;
402 }
403
404 // We have an address between two chunks, and we want to report just one.
405 AsanChunk *ChooseChunk(uintptr_t addr,
406 AsanChunk *left_chunk, AsanChunk *right_chunk) {
407 // Prefer an allocated chunk or a chunk from quarantine.
408 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
409 right_chunk->chunk_state != CHUNK_AVAILABLE)
410 return right_chunk;
411 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
412 left_chunk->chunk_state != CHUNK_AVAILABLE)
413 return left_chunk;
414 // Choose based on offset.
Daniel Dunbar514cebb2011-12-02 01:36:38 +0000415 size_t l_offset = 0, r_offset = 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000416 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
417 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
418 if (l_offset < r_offset)
419 return left_chunk;
420 return right_chunk;
421 }
422
423 AsanChunk *FindChunkByAddr(uintptr_t addr) {
424 PageGroup *g = FindPageGroupUnlocked(addr);
425 if (!g) return 0;
426 CHECK(g->size_of_chunk);
427 uintptr_t offset_from_beg = addr - g->beg;
428 uintptr_t this_chunk_addr = g->beg +
429 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
430 CHECK(g->InRange(this_chunk_addr));
431 AsanChunk *m = (AsanChunk*)this_chunk_addr;
432 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
433 m->chunk_state == CHUNK_AVAILABLE ||
434 m->chunk_state == CHUNK_QUARANTINE);
Daniel Dunbar514cebb2011-12-02 01:36:38 +0000435 size_t offset = 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000436 if (m->AddrIsInside(addr, 1, &offset))
437 return m;
438
439 if (m->AddrIsAtRight(addr, 1, &offset)) {
440 if (this_chunk_addr == g->last_chunk) // rightmost chunk
441 return m;
442 uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
443 CHECK(g->InRange(right_chunk_addr));
444 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
445 } else {
446 CHECK(m->AddrIsAtLeft(addr, 1, &offset));
447 if (this_chunk_addr == g->beg) // leftmost chunk
448 return m;
449 uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
450 CHECK(g->InRange(left_chunk_addr));
451 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
452 }
453 }
454
455 void QuarantinePop() {
456 CHECK(quarantine_.size() > 0);
457 AsanChunk *m = quarantine_.Pop();
458 CHECK(m);
459 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
460
461 CHECK(m->chunk_state == CHUNK_QUARANTINE);
462 m->chunk_state = CHUNK_AVAILABLE;
463 CHECK(m->alloc_tid >= 0);
464 CHECK(m->free_tid >= 0);
465
466 size_t size_class = m->SizeClass();
467 m->next = free_lists_[size_class];
468 free_lists_[size_class] = m;
469
Kostya Serebryany30743142011-12-05 19:17:53 +0000470 // Statistics.
471 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
472 thread_stats.real_frees++;
473 thread_stats.really_freed += m->used_size;
474 thread_stats.really_freed_redzones += m->Size() - m->used_size;
475 thread_stats.really_freed_by_size[m->SizeClass()]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000476 }
477
478 // Get a list of newly allocated chunks.
479 AsanChunk *GetNewChunks(uint8_t size_class) {
480 size_t size = SizeClassToSize(size_class);
481 CHECK(IsPowerOfTwo(kMinMmapSize));
482 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +0000483 size_t mmap_size = Max(size, kMinMmapSize);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000484 size_t n_chunks = mmap_size / size;
485 CHECK(n_chunks * size == mmap_size);
486 if (size < kPageSize) {
487 // Size is small, just poison the last chunk.
488 n_chunks--;
489 } else {
490 // Size is large, allocate an extra page at right and poison it.
491 mmap_size += kPageSize;
492 }
493 CHECK(n_chunks > 0);
494 uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
Kostya Serebryany30743142011-12-05 19:17:53 +0000495
496 // Statistics.
497 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
498 thread_stats.mmaps++;
499 thread_stats.mmaped += mmap_size;
500 thread_stats.mmaped_by_size[size_class] += n_chunks;
501
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000502 AsanChunk *res = NULL;
503 for (size_t i = 0; i < n_chunks; i++) {
504 AsanChunk *m = (AsanChunk*)(mem + i * size);
505 m->chunk_state = CHUNK_AVAILABLE;
506 m->size_class = size_class;
507 m->next = res;
508 res = m;
509 }
510 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
511 // This memory is already poisoned, no need to poison it again.
512 pg->beg = (uintptr_t)mem;
513 pg->end = pg->beg + mmap_size;
514 pg->size_of_chunk = size;
515 pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
516 int page_group_idx = AtomicInc(&n_page_groups_) - 1;
517 CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
518 page_groups_[page_group_idx] = pg;
519 return res;
520 }
521
522 AsanChunk *free_lists_[kNumberOfSizeClasses];
523 AsanChunkFifoList quarantine_;
524 AsanLock mu_;
525
526 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
527 int n_page_groups_; // atomic
528};
529
530static MallocInfo malloc_info(LINKER_INITIALIZED);
531
532void AsanThreadLocalMallocStorage::CommitBack() {
533 malloc_info.SwallowThreadLocalMallocStorage(this, true);
534}
535
536static void Describe(uintptr_t addr, size_t access_size) {
537 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
538 if (!m) return;
539 m->DescribeAddress(addr, access_size);
540 CHECK(m->alloc_tid >= 0);
541 AsanThreadSummary *alloc_thread =
542 asanThreadRegistry().FindByTid(m->alloc_tid);
543 AsanStackTrace alloc_stack;
544 AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
545 m->compressed_alloc_stack_size());
546 AsanThread *t = asanThreadRegistry().GetCurrent();
547 CHECK(t);
548 if (m->free_tid >= 0) {
549 AsanThreadSummary *free_thread =
550 asanThreadRegistry().FindByTid(m->free_tid);
551 Printf("freed by thread T%d here:\n", free_thread->tid());
552 AsanStackTrace free_stack;
553 AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
554 m->compressed_free_stack_size());
555 free_stack.PrintStack();
556 Printf("previously allocated by thread T%d here:\n",
557 alloc_thread->tid());
558
559 alloc_stack.PrintStack();
560 t->summary()->Announce();
561 free_thread->Announce();
562 alloc_thread->Announce();
563 } else {
564 Printf("allocated by thread T%d here:\n", alloc_thread->tid());
565 alloc_stack.PrintStack();
566 t->summary()->Announce();
567 alloc_thread->Announce();
568 }
569}
570
571static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
572 __asan_init();
573 CHECK(stack);
574 if (size == 0) {
575 size = 1; // TODO(kcc): do something smarter
576 }
577 CHECK(IsPowerOfTwo(alignment));
578 size_t rounded_size = RoundUpTo(size, REDZONE);
579 size_t needed_size = rounded_size + REDZONE;
580 if (alignment > REDZONE) {
581 needed_size += alignment;
582 }
583 CHECK(IsAligned(needed_size, REDZONE));
584 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
585 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
586 return 0;
587 }
588
589 uint8_t size_class = SizeToSizeClass(needed_size);
590 size_t size_to_allocate = SizeClassToSize(size_class);
591 CHECK(size_to_allocate >= kMinAllocSize);
592 CHECK(size_to_allocate >= needed_size);
593 CHECK(IsAligned(size_to_allocate, REDZONE));
594
595 if (FLAG_v >= 2) {
596 Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
597 alignment, size, size_class, size_to_allocate);
598 }
599
600 AsanThread *t = asanThreadRegistry().GetCurrent();
601 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
Kostya Serebryany30743142011-12-05 19:17:53 +0000602 // Statistics
603 thread_stats.mallocs++;
604 thread_stats.malloced += size;
605 thread_stats.malloced_redzones += size_to_allocate - size;
606 thread_stats.malloced_by_size[size_class]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000607
608 AsanChunk *m = NULL;
609 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
610 // get directly from global storage.
611 m = malloc_info.AllocateChunks(size_class, 1);
Kostya Serebryany30743142011-12-05 19:17:53 +0000612 thread_stats.malloc_large++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000613 } else {
614 // get from the thread-local storage.
615 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
616 if (!*fl) {
617 size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000618 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
Kostya Serebryany30743142011-12-05 19:17:53 +0000619 thread_stats.malloc_small_slow++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000620 }
621 m = *fl;
622 *fl = (*fl)->next;
623 }
624 CHECK(m);
625 CHECK(m->chunk_state == CHUNK_AVAILABLE);
626 m->chunk_state = CHUNK_ALLOCATED;
627 m->next = NULL;
628 CHECK(m->Size() == size_to_allocate);
629 uintptr_t addr = (uintptr_t)m + REDZONE;
630 CHECK(addr == (uintptr_t)m->compressed_free_stack());
631
632 if (alignment > REDZONE && (addr & (alignment - 1))) {
633 addr = RoundUpTo(addr, alignment);
634 CHECK((addr & (alignment - 1)) == 0);
635 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
636 p->chunk_state = CHUNK_MEMALIGN;
637 p->next = m;
638 }
639 CHECK(m == PtrToChunk(addr));
640 m->used_size = size;
641 m->offset = addr - (uintptr_t)m;
642 CHECK(m->beg() == addr);
643 m->alloc_tid = t ? t->tid() : 0;
644 m->free_tid = AsanThread::kInvalidTid;
645 AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
646 m->compressed_alloc_stack_size());
647 PoisonShadow(addr, rounded_size, 0);
648 if (size < rounded_size) {
Kostya Serebryany218a9b72011-11-30 18:50:23 +0000649 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
650 size & (REDZONE - 1));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000651 }
652 if (size <= FLAG_max_malloc_fill_size) {
653 real_memset((void*)addr, 0, rounded_size);
654 }
655 return (uint8_t*)addr;
656}
657
658static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
659 if (!ptr) return;
660 CHECK(stack);
661
662 if (FLAG_debug) {
663 CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
664 }
665
666 // Printf("Deallocate %p\n", ptr);
667 AsanChunk *m = PtrToChunk((uintptr_t)ptr);
668 if (m->chunk_state == CHUNK_QUARANTINE) {
Kostya Serebryanyc1620132011-12-13 19:16:36 +0000669 Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000670 stack->PrintStack();
671 m->DescribeAddress((uintptr_t)ptr, 1);
672 ShowStatsAndAbort();
673 } else if (m->chunk_state != CHUNK_ALLOCATED) {
Kostya Serebryanyc1620132011-12-13 19:16:36 +0000674 Report("ERROR: AddressSanitizer attempting free on address which was not"
675 " malloc()-ed: %p\n", ptr);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000676 stack->PrintStack();
677 ShowStatsAndAbort();
678 }
679 CHECK(m->chunk_state == CHUNK_ALLOCATED);
680 CHECK(m->free_tid == AsanThread::kInvalidTid);
681 CHECK(m->alloc_tid >= 0);
682 AsanThread *t = asanThreadRegistry().GetCurrent();
683 m->free_tid = t ? t->tid() : 0;
684 AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
685 m->compressed_free_stack_size());
686 size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
687 PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
688
Kostya Serebryany30743142011-12-05 19:17:53 +0000689 // Statistics.
690 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
691 thread_stats.frees++;
692 thread_stats.freed += m->used_size;
693 thread_stats.freed_by_size[m->SizeClass()]++;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000694
695 m->chunk_state = CHUNK_QUARANTINE;
696 if (t) {
697 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
698 CHECK(!m->next);
699 ms->quarantine_.Push(m);
700
701 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
702 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
703 }
704 } else {
705 CHECK(!m->next);
706 malloc_info.BypassThreadLocalQuarantine(m);
707 }
708}
709
710static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
711 AsanStackTrace *stack) {
712 CHECK(old_ptr && new_size);
Kostya Serebryany30743142011-12-05 19:17:53 +0000713
714 // Statistics.
715 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
716 thread_stats.reallocs++;
717 thread_stats.realloced += new_size;
718
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000719 AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
720 CHECK(m->chunk_state == CHUNK_ALLOCATED);
721 size_t old_size = m->used_size;
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +0000722 size_t memcpy_size = Min(new_size, old_size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000723 uint8_t *new_ptr = Allocate(0, new_size, stack);
724 if (new_ptr) {
725 real_memcpy(new_ptr, old_ptr, memcpy_size);
726 Deallocate(old_ptr, stack);
727 }
728 return new_ptr;
729}
730
731} // namespace __asan
732
733// Malloc hooks declaration.
734// ASAN_NEW_HOOK(ptr, size) is called immediately after
735// allocation of "size" bytes, which returned "ptr".
736// ASAN_DELETE_HOOK(ptr) is called immediately before
737// deallocation of "ptr".
738// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
739// program must provide implementation of this hook.
740// If macro is undefined, the hook is no-op.
741#ifdef ASAN_NEW_HOOK
742extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
743#else
744static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
745#endif
746
747#ifdef ASAN_DELETE_HOOK
748extern "C" void ASAN_DELETE_HOOK(void *ptr);
749#else
750static inline void ASAN_DELETE_HOOK(void *ptr) { }
751#endif
752
753namespace __asan {
754
755void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
756 void *ptr = (void*)Allocate(alignment, size, stack);
757 ASAN_NEW_HOOK(ptr, size);
758 return ptr;
759}
760
761void asan_free(void *ptr, AsanStackTrace *stack) {
762 ASAN_DELETE_HOOK(ptr);
763 Deallocate((uint8_t*)ptr, stack);
764}
765
766void *asan_malloc(size_t size, AsanStackTrace *stack) {
767 void *ptr = (void*)Allocate(0, size, stack);
768 ASAN_NEW_HOOK(ptr, size);
769 return ptr;
770}
771
772void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
773 void *ptr = (void*)Allocate(0, nmemb * size, stack);
774 if (ptr)
775 real_memset(ptr, 0, nmemb * size);
776 ASAN_NEW_HOOK(ptr, nmemb * size);
777 return ptr;
778}
779
780void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
781 if (p == NULL) {
782 void *ptr = (void*)Allocate(0, size, stack);
783 ASAN_NEW_HOOK(ptr, size);
784 return ptr;
785 } else if (size == 0) {
786 ASAN_DELETE_HOOK(p);
787 Deallocate((uint8_t*)p, stack);
788 return NULL;
789 }
790 return Reallocate((uint8_t*)p, size, stack);
791}
792
793void *asan_valloc(size_t size, AsanStackTrace *stack) {
794 void *ptr = (void*)Allocate(kPageSize, size, stack);
795 ASAN_NEW_HOOK(ptr, size);
796 return ptr;
797}
798
799void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
800 size = RoundUpTo(size, kPageSize);
801 if (size == 0) {
802 // pvalloc(0) should allocate one page.
803 size = kPageSize;
804 }
805 void *ptr = (void*)Allocate(kPageSize, size, stack);
806 ASAN_NEW_HOOK(ptr, size);
807 return ptr;
808}
809
810int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
811 AsanStackTrace *stack) {
812 void *ptr = Allocate(alignment, size, stack);
813 CHECK(IsAligned((uintptr_t)ptr, alignment));
814 ASAN_NEW_HOOK(ptr, size);
815 *memptr = ptr;
816 return 0;
817}
818
819size_t __asan_mz_size(const void *ptr) {
820 return malloc_info.AllocationSize((uintptr_t)ptr);
821}
822
823void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
824 Describe(addr, access_size);
825}
826
827void __asan_mz_force_lock() {
828 malloc_info.ForceLock();
829}
830
831void __asan_mz_force_unlock() {
832 malloc_info.ForceUnlock();
833}
834
835// ---------------------- Fake stack-------------------- {{{1
836FakeStack::FakeStack() {
837 CHECK(real_memset);
838 real_memset(this, 0, sizeof(*this));
839}
840
841bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
842 uintptr_t mem = allocated_size_classes_[size_class];
843 uintptr_t size = ClassMmapSize(size_class);
844 bool res = mem && addr >= mem && addr < mem + size;
845 return res;
846}
847
848uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000849 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
850 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
851 }
852 return 0;
853}
854
855// We may want to compute this during compilation.
856inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
857 size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
858 size_t log = Log2(rounded_size);
859 CHECK(alloc_size <= (1UL << log));
860 if (!(alloc_size > (1UL << (log-1)))) {
861 Printf("alloc_size %ld log %ld\n", alloc_size, log);
862 }
863 CHECK(alloc_size > (1UL << (log-1)));
864 size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
865 CHECK(res < kNumberOfSizeClasses);
866 CHECK(ClassSize(res) >= rounded_size);
867 return res;
868}
869
870void FakeFrameFifo::FifoPush(FakeFrame *node) {
871 CHECK(node);
872 node->next = 0;
873 if (first_ == 0 && last_ == 0) {
874 first_ = last_ = node;
875 } else {
876 CHECK(first_);
877 CHECK(last_);
878 last_->next = node;
879 last_ = node;
880 }
881}
882
883FakeFrame *FakeFrameFifo::FifoPop() {
884 CHECK(first_ && last_ && "Exhausted fake stack");
885 FakeFrame *res = 0;
886 if (first_ == last_) {
887 res = first_;
888 first_ = last_ = 0;
889 } else {
890 res = first_;
891 first_ = first_->next;
892 }
893 return res;
894}
895
896void FakeStack::Init(size_t stack_size) {
897 stack_size_ = stack_size;
898 alive_ = true;
899}
900
901void FakeStack::Cleanup() {
902 alive_ = false;
903 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
904 uintptr_t mem = allocated_size_classes_[i];
905 if (mem) {
906 PoisonShadow(mem, ClassMmapSize(i), 0);
907 allocated_size_classes_[i] = 0;
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000908 AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000909 }
910 }
911}
912
913size_t FakeStack::ClassMmapSize(size_t size_class) {
914 return RoundUpToPowerOfTwo(stack_size_);
915}
916
917void FakeStack::AllocateOneSizeClass(size_t size_class) {
918 CHECK(ClassMmapSize(size_class) >= kPageSize);
Kostya Serebryanyde496f42011-12-28 22:58:01 +0000919 uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
920 ClassMmapSize(size_class), __FUNCTION__);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000921 // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
922 // asanThreadRegistry().GetCurrent()->tid(),
923 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
924 // ClassMmapSize(size_class));
925 size_t i;
926 for (i = 0; i < ClassMmapSize(size_class);
927 i += ClassSize(size_class)) {
928 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
929 }
930 CHECK(i == ClassMmapSize(size_class));
931 allocated_size_classes_[size_class] = new_mem;
932}
933
934uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
Kostya Serebryanyc4b34d92011-12-09 01:49:31 +0000935 if (!alive_) return real_stack;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000936 CHECK(size <= kMaxStackMallocSize && size > 1);
937 size_t size_class = ComputeSizeClass(size);
938 if (!allocated_size_classes_[size_class]) {
939 AllocateOneSizeClass(size_class);
940 }
941 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
942 CHECK(fake_frame);
943 fake_frame->size_minus_one = size - 1;
944 fake_frame->real_stack = real_stack;
945 while (FakeFrame *top = call_stack_.top()) {
946 if (top->real_stack > real_stack) break;
947 call_stack_.LifoPop();
948 DeallocateFrame(top);
949 }
950 call_stack_.LifoPush(fake_frame);
951 uintptr_t ptr = (uintptr_t)fake_frame;
952 PoisonShadow(ptr, size, 0);
953 return ptr;
954}
955
956void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
957 CHECK(alive_);
958 size_t size = fake_frame->size_minus_one + 1;
959 size_t size_class = ComputeSizeClass(size);
960 CHECK(allocated_size_classes_[size_class]);
961 uintptr_t ptr = (uintptr_t)fake_frame;
962 CHECK(AddrIsInSizeClass(ptr, size_class));
963 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
964 size_classes_[size_class].FifoPush(fake_frame);
965}
966
967void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
968 FakeFrame *fake_frame = (FakeFrame*)ptr;
969 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
970 CHECK(fake_frame->descr != 0);
971 CHECK(fake_frame->size_minus_one == size - 1);
972 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
973}
974
975} // namespace __asan
976
977// ---------------------- Interface ---------------- {{{1
978using namespace __asan; // NOLINT
979
980size_t __asan_stack_malloc(size_t size, size_t real_stack) {
981 if (!FLAG_use_fake_stack) return real_stack;
982 AsanThread *t = asanThreadRegistry().GetCurrent();
983 if (!t) {
984 // TSD is gone, use the real stack.
985 return real_stack;
986 }
987 size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
988 // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
989 return ptr;
990}
991
992void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
993 if (!FLAG_use_fake_stack) return;
994 if (ptr != real_stack) {
995 FakeStack::OnFree(ptr, size, real_stack);
996 }
997}
998
999// ASan allocator doesn't reserve extra bytes, so normally we would
1000// just return "size".
1001size_t __asan_get_estimated_allocated_size(size_t size) {
1002 if (size == 0) return 1;
Kostya Serebryany2d8b3bd2011-12-02 18:42:04 +00001003 return Min(size, kMaxAllowedMallocSize);
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001004}
1005
1006bool __asan_get_ownership(const void *p) {
1007 return (p == NULL) ||
1008 (malloc_info.AllocationSize((uintptr_t)p) > 0);
1009}
1010
1011size_t __asan_get_allocated_size(const void *p) {
1012 if (p == NULL) return 0;
1013 size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
1014 // Die if p is not malloced or if it is already freed.
1015 if (allocated_size == 0) {
1016 Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
1017 PRINT_CURRENT_STACK();
1018 ShowStatsAndAbort();
1019 }
1020 return allocated_size;
1021}