blob: d573e0150ea04525d97cb38be061986e1b8bf85c [file] [log] [blame]
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_thread.h"
36#include "asan_thread_registry.h"
37
38#include <sys/mman.h>
39#include <stdint.h>
40#include <string.h>
41#include <unistd.h>
42#include <algorithm>
43
44namespace __asan {
45
46#define REDZONE FLAG_redzone
47static const size_t kMinAllocSize = REDZONE * 2;
48static const size_t kMinMmapSize = 4UL << 20; // 4M
49static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
50static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
51static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
52
53// Size classes less than kMallocSizeClassStep are powers of two.
54// All other size classes are multiples of kMallocSizeClassStep.
55static const size_t kMallocSizeClassStepLog = 26;
56static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
57
58#if __WORDSIZE == 32
59static const size_t kMaxAllowedMallocSize = 3UL << 30; // 3G
60#else
61static const size_t kMaxAllowedMallocSize = 8UL << 30; // 8G
62#endif
63
64static void OutOfMemoryMessage(const char *mem_type, size_t size) {
65 AsanThread *t = asanThreadRegistry().GetCurrent();
66 CHECK(t);
67 Report("ERROR: AddressSanitizer failed to allocate "
68 "0x%lx (%lu) bytes (%s) in T%d\n",
69 size, size, mem_type, t->tid());
70}
71
72static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
73 return (a & (alignment - 1)) == 0;
74}
75
76static inline bool IsPowerOfTwo(size_t x) {
77 return (x & (x - 1)) == 0;
78}
79
80static inline size_t Log2(size_t x) {
81 CHECK(IsPowerOfTwo(x));
82 return __builtin_ctzl(x);
83}
84
85static inline size_t RoundUpTo(size_t size, size_t boundary) {
86 CHECK(IsPowerOfTwo(boundary));
87 return (size + boundary - 1) & ~(boundary - 1);
88}
89
90static inline size_t RoundUpToPowerOfTwo(size_t size) {
91 CHECK(size);
92 if (IsPowerOfTwo(size)) return size;
93 size_t up = __WORDSIZE - __builtin_clzl(size);
94 CHECK(size < (1ULL << up));
95 CHECK(size > (1ULL << (up - 1)));
96 return 1UL << up;
97}
98
99static inline size_t SizeClassToSize(uint8_t size_class) {
100 CHECK(size_class < kNumberOfSizeClasses);
101 if (size_class <= kMallocSizeClassStepLog) {
102 return 1UL << size_class;
103 } else {
104 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
105 }
106}
107
108static inline uint8_t SizeToSizeClass(size_t size) {
109 uint8_t res = 0;
110 if (size <= kMallocSizeClassStep) {
111 size_t rounded = RoundUpToPowerOfTwo(size);
112 res = Log2(rounded);
113 } else {
114 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
115 + kMallocSizeClassStepLog;
116 }
117 CHECK(res < kNumberOfSizeClasses);
118 CHECK(size <= SizeClassToSize(res));
119 return res;
120}
121
122static void PoisonShadow(uintptr_t mem, size_t size, uint8_t poison) {
123 CHECK(IsAligned(mem, SHADOW_GRANULARITY));
124 CHECK(IsAligned(mem + size, SHADOW_GRANULARITY));
125 uintptr_t shadow_beg = MemToShadow(mem);
126 uintptr_t shadow_end = MemToShadow(mem + size);
127 real_memset((void*)shadow_beg, poison, shadow_end - shadow_beg);
128}
129
130// Given REDZONE bytes, we need to mark first size bytes
131// as addressable and the rest REDZONE-size bytes as unaddressable.
132static void PoisonMemoryPartialRightRedzone(uintptr_t mem, size_t size) {
133 CHECK(size <= REDZONE);
134 CHECK(IsAligned(mem, REDZONE));
135 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
136 CHECK(IsPowerOfTwo(REDZONE));
137 CHECK(REDZONE >= SHADOW_GRANULARITY);
138 uint8_t *shadow = (uint8_t*)MemToShadow(mem);
139 PoisonShadowPartialRightRedzone(shadow, size,
140 REDZONE, SHADOW_GRANULARITY,
141 kAsanHeapRightRedzoneMagic);
142}
143
144static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
145 CHECK(IsAligned(size, kPageSize));
146 uint8_t *res = (uint8_t*)asan_mmap(0, size,
147 PROT_READ | PROT_WRITE,
148 MAP_PRIVATE | MAP_ANON, -1, 0);
149 if (res == (uint8_t*)-1) {
150 OutOfMemoryMessage(__FUNCTION__, size);
151 PRINT_CURRENT_STACK();
152 ASAN_DIE;
153 }
154 PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
155 if (FLAG_debug) {
156 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
157 }
158 return res;
159}
160
161// Every chunk of memory allocated by this allocator can be in one of 3 states:
162// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
163// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
164// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
165//
166// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
167// the beginning of a AsanChunk (in which case 'next' contains the address
168// of the AsanChunk).
169//
170// The magic numbers for the enum values are taken randomly.
171enum {
172 CHUNK_AVAILABLE = 0x573B,
173 CHUNK_ALLOCATED = 0x3204,
174 CHUNK_QUARANTINE = 0x1978,
175 CHUNK_MEMALIGN = 0xDC68,
176};
177
178struct ChunkBase {
179 uint16_t chunk_state;
180 uint8_t size_class;
181 uint32_t offset; // User-visible memory starts at this+offset (beg()).
182 int32_t alloc_tid;
183 int32_t free_tid;
184 size_t used_size; // Size requested by the user.
185 AsanChunk *next;
186
187 uintptr_t beg() { return (uintptr_t)this + offset; }
188 size_t Size() { return SizeClassToSize(size_class); }
189 uint8_t SizeClass() { return size_class; }
190};
191
192struct AsanChunk: public ChunkBase {
193 uint32_t *compressed_alloc_stack() {
194 CHECK(REDZONE >= sizeof(ChunkBase));
195 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
196 }
197 uint32_t *compressed_free_stack() {
198 CHECK(REDZONE >= sizeof(ChunkBase));
199 return (uint32_t*)((uintptr_t)this + REDZONE);
200 }
201
202 // The left redzone after the ChunkBase is given to the alloc stack trace.
203 size_t compressed_alloc_stack_size() {
204 return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
205 }
206 size_t compressed_free_stack_size() {
207 return (REDZONE) / sizeof(uint32_t);
208 }
209
210 bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
211 if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
212 *offset = addr - beg();
213 return true;
214 }
215 return false;
216 }
217
218 bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
219 if (addr < beg()) {
220 *offset = beg() - addr;
221 return true;
222 }
223 return false;
224 }
225
226 bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
227 if (addr + access_size >= beg() + used_size) {
228 if (addr <= beg() + used_size)
229 *offset = 0;
230 else
231 *offset = addr - (beg() + used_size);
232 return true;
233 }
234 return false;
235 }
236
237 void DescribeAddress(uintptr_t addr, size_t access_size) {
238 size_t offset;
239 Printf("%p is located ", addr);
240 if (AddrIsInside(addr, access_size, &offset)) {
241 Printf("%ld bytes inside of", offset);
242 } else if (AddrIsAtLeft(addr, access_size, &offset)) {
243 Printf("%ld bytes to the left of", offset);
244 } else if (AddrIsAtRight(addr, access_size, &offset)) {
245 Printf("%ld bytes to the right of", offset);
246 } else {
247 Printf(" somewhere around (this is AddressSanitizer bug!)");
248 }
249 Printf(" %lu-byte region [%p,%p)\n",
250 used_size, beg(), beg() + used_size);
251 }
252};
253
254static AsanChunk *PtrToChunk(uintptr_t ptr) {
255 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
256 if (m->chunk_state == CHUNK_MEMALIGN) {
257 m = m->next;
258 }
259 return m;
260}
261
262
263void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
264 if (last_) {
265 CHECK(first_);
266 CHECK(!last_->next);
267 last_->next = q->first_;
268 last_ = q->last_;
269 } else {
270 CHECK(!first_);
271 last_ = q->last_;
272 first_ = q->first_;
273 }
274 size_ += q->size();
275 q->clear();
276}
277
278void AsanChunkFifoList::Push(AsanChunk *n) {
279 CHECK(n->next == NULL);
280 if (last_) {
281 CHECK(first_);
282 CHECK(!last_->next);
283 last_->next = n;
284 last_ = n;
285 } else {
286 CHECK(!first_);
287 last_ = first_ = n;
288 }
289 size_ += n->Size();
290}
291
292// Interesting performance observation: this function takes up to 15% of overal
293// allocator time. That's because *first_ has been evicted from cache long time
294// ago. Not sure if we can or want to do anything with this.
295AsanChunk *AsanChunkFifoList::Pop() {
296 CHECK(first_);
297 AsanChunk *res = first_;
298 first_ = first_->next;
299 if (first_ == NULL)
300 last_ = NULL;
301 CHECK(size_ >= res->Size());
302 size_ -= res->Size();
303 if (last_) {
304 CHECK(!last_->next);
305 }
306 return res;
307}
308
309// All pages we ever allocated.
310struct PageGroup {
311 uintptr_t beg;
312 uintptr_t end;
313 size_t size_of_chunk;
314 uintptr_t last_chunk;
315 bool InRange(uintptr_t addr) {
316 return addr >= beg && addr < end;
317 }
318};
319
320class MallocInfo {
321 public:
322
323 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
324
325 AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
326 AsanChunk *m = NULL;
327 AsanChunk **fl = &free_lists_[size_class];
328 {
329 ScopedLock lock(&mu_);
330 for (size_t i = 0; i < n_chunks; i++) {
331 if (!(*fl)) {
332 *fl = GetNewChunks(size_class);
333 }
334 AsanChunk *t = *fl;
335 *fl = t->next;
336 t->next = m;
337 CHECK(t->chunk_state == CHUNK_AVAILABLE);
338 m = t;
339 }
340 }
341 return m;
342 }
343
344 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
345 bool eat_free_lists) {
346 CHECK(FLAG_quarantine_size > 0);
347 ScopedLock lock(&mu_);
348 AsanChunkFifoList *q = &x->quarantine_;
349 if (q->size() > 0) {
350 quarantine_.PushList(q);
351 while (quarantine_.size() > FLAG_quarantine_size) {
352 QuarantinePop();
353 }
354 }
355 if (eat_free_lists) {
356 for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
357 size_class++) {
358 AsanChunk *m = x->free_lists_[size_class];
359 while (m) {
360 AsanChunk *t = m->next;
361 m->next = free_lists_[size_class];
362 free_lists_[size_class] = m;
363 m = t;
364 }
365 x->free_lists_[size_class] = 0;
366 }
367 }
368 }
369
370 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
371 ScopedLock lock(&mu_);
372 quarantine_.Push(chunk);
373 }
374
375 AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
376 ScopedLock lock(&mu_);
377 return FindChunkByAddr(addr);
378 }
379
380 // TODO(glider): AllocationSize() may become very slow if the size of
381 // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
382 // but a better solution is to speed up the search somehow.
383 size_t AllocationSize(uintptr_t ptr) {
384 ScopedLock lock(&mu_);
385
386 // first, check if this is our memory
387 PageGroup *g = FindPageGroupUnlocked(ptr);
388 if (!g) return 0;
389 AsanChunk *m = PtrToChunk(ptr);
390 if (m->chunk_state == CHUNK_ALLOCATED) {
391 return m->used_size;
392 } else {
393 return 0;
394 }
395 }
396
397 void ForceLock() {
398 mu_.Lock();
399 }
400
401 void ForceUnlock() {
402 mu_.Unlock();
403 }
404
405 void PrintStatus() {
406 ScopedLock lock(&mu_);
407 size_t malloced = 0;
408
409 Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
410 quarantine_.size() >> 20, malloced >> 20);
411 for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
412 AsanChunk *i = free_lists_[j];
413 if (!i) continue;
414 size_t t = 0;
415 for (; i; i = i->next) {
416 t += i->Size();
417 }
418 Printf("%ld:%ld ", j, t >> 20);
419 }
420 Printf("\n");
421 }
422
423 PageGroup *FindPageGroup(uintptr_t addr) {
424 ScopedLock lock(&mu_);
425 return FindPageGroupUnlocked(addr);
426 }
427
428 private:
429 PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
430 for (int i = 0; i < n_page_groups_; i++) {
431 PageGroup *g = page_groups_[i];
432 if (g->InRange(addr)) {
433 return g;
434 }
435 }
436 return NULL;
437 }
438
439 // We have an address between two chunks, and we want to report just one.
440 AsanChunk *ChooseChunk(uintptr_t addr,
441 AsanChunk *left_chunk, AsanChunk *right_chunk) {
442 // Prefer an allocated chunk or a chunk from quarantine.
443 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
444 right_chunk->chunk_state != CHUNK_AVAILABLE)
445 return right_chunk;
446 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
447 left_chunk->chunk_state != CHUNK_AVAILABLE)
448 return left_chunk;
449 // Choose based on offset.
450 uintptr_t l_offset = 0, r_offset = 0;
451 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
452 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
453 if (l_offset < r_offset)
454 return left_chunk;
455 return right_chunk;
456 }
457
458 AsanChunk *FindChunkByAddr(uintptr_t addr) {
459 PageGroup *g = FindPageGroupUnlocked(addr);
460 if (!g) return 0;
461 CHECK(g->size_of_chunk);
462 uintptr_t offset_from_beg = addr - g->beg;
463 uintptr_t this_chunk_addr = g->beg +
464 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
465 CHECK(g->InRange(this_chunk_addr));
466 AsanChunk *m = (AsanChunk*)this_chunk_addr;
467 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
468 m->chunk_state == CHUNK_AVAILABLE ||
469 m->chunk_state == CHUNK_QUARANTINE);
470 uintptr_t offset = 0;
471 if (m->AddrIsInside(addr, 1, &offset))
472 return m;
473
474 if (m->AddrIsAtRight(addr, 1, &offset)) {
475 if (this_chunk_addr == g->last_chunk) // rightmost chunk
476 return m;
477 uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
478 CHECK(g->InRange(right_chunk_addr));
479 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
480 } else {
481 CHECK(m->AddrIsAtLeft(addr, 1, &offset));
482 if (this_chunk_addr == g->beg) // leftmost chunk
483 return m;
484 uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
485 CHECK(g->InRange(left_chunk_addr));
486 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
487 }
488 }
489
490 void QuarantinePop() {
491 CHECK(quarantine_.size() > 0);
492 AsanChunk *m = quarantine_.Pop();
493 CHECK(m);
494 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
495
496 CHECK(m->chunk_state == CHUNK_QUARANTINE);
497 m->chunk_state = CHUNK_AVAILABLE;
498 CHECK(m->alloc_tid >= 0);
499 CHECK(m->free_tid >= 0);
500
501 size_t size_class = m->SizeClass();
502 m->next = free_lists_[size_class];
503 free_lists_[size_class] = m;
504
505 if (FLAG_stats) {
506 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
507 thread_stats.real_frees++;
508 thread_stats.really_freed += m->used_size;
509 thread_stats.really_freed_redzones += m->Size() - m->used_size;
510 thread_stats.really_freed_by_size[m->SizeClass()]++;
511 }
512 }
513
514 // Get a list of newly allocated chunks.
515 AsanChunk *GetNewChunks(uint8_t size_class) {
516 size_t size = SizeClassToSize(size_class);
517 CHECK(IsPowerOfTwo(kMinMmapSize));
518 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
519 size_t mmap_size = std::max(size, kMinMmapSize);
520 size_t n_chunks = mmap_size / size;
521 CHECK(n_chunks * size == mmap_size);
522 if (size < kPageSize) {
523 // Size is small, just poison the last chunk.
524 n_chunks--;
525 } else {
526 // Size is large, allocate an extra page at right and poison it.
527 mmap_size += kPageSize;
528 }
529 CHECK(n_chunks > 0);
530 uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
531 if (FLAG_stats) {
532 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
533 thread_stats.mmaps++;
534 thread_stats.mmaped += mmap_size;
535 thread_stats.mmaped_by_size[size_class] += n_chunks;
536 }
537 AsanChunk *res = NULL;
538 for (size_t i = 0; i < n_chunks; i++) {
539 AsanChunk *m = (AsanChunk*)(mem + i * size);
540 m->chunk_state = CHUNK_AVAILABLE;
541 m->size_class = size_class;
542 m->next = res;
543 res = m;
544 }
545 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
546 // This memory is already poisoned, no need to poison it again.
547 pg->beg = (uintptr_t)mem;
548 pg->end = pg->beg + mmap_size;
549 pg->size_of_chunk = size;
550 pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
551 int page_group_idx = AtomicInc(&n_page_groups_) - 1;
552 CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
553 page_groups_[page_group_idx] = pg;
554 return res;
555 }
556
557 AsanChunk *free_lists_[kNumberOfSizeClasses];
558 AsanChunkFifoList quarantine_;
559 AsanLock mu_;
560
561 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
562 int n_page_groups_; // atomic
563};
564
565static MallocInfo malloc_info(LINKER_INITIALIZED);
566
567void AsanThreadLocalMallocStorage::CommitBack() {
568 malloc_info.SwallowThreadLocalMallocStorage(this, true);
569}
570
571static void Describe(uintptr_t addr, size_t access_size) {
572 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
573 if (!m) return;
574 m->DescribeAddress(addr, access_size);
575 CHECK(m->alloc_tid >= 0);
576 AsanThreadSummary *alloc_thread =
577 asanThreadRegistry().FindByTid(m->alloc_tid);
578 AsanStackTrace alloc_stack;
579 AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
580 m->compressed_alloc_stack_size());
581 AsanThread *t = asanThreadRegistry().GetCurrent();
582 CHECK(t);
583 if (m->free_tid >= 0) {
584 AsanThreadSummary *free_thread =
585 asanThreadRegistry().FindByTid(m->free_tid);
586 Printf("freed by thread T%d here:\n", free_thread->tid());
587 AsanStackTrace free_stack;
588 AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
589 m->compressed_free_stack_size());
590 free_stack.PrintStack();
591 Printf("previously allocated by thread T%d here:\n",
592 alloc_thread->tid());
593
594 alloc_stack.PrintStack();
595 t->summary()->Announce();
596 free_thread->Announce();
597 alloc_thread->Announce();
598 } else {
599 Printf("allocated by thread T%d here:\n", alloc_thread->tid());
600 alloc_stack.PrintStack();
601 t->summary()->Announce();
602 alloc_thread->Announce();
603 }
604}
605
606static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
607 __asan_init();
608 CHECK(stack);
609 if (size == 0) {
610 size = 1; // TODO(kcc): do something smarter
611 }
612 CHECK(IsPowerOfTwo(alignment));
613 size_t rounded_size = RoundUpTo(size, REDZONE);
614 size_t needed_size = rounded_size + REDZONE;
615 if (alignment > REDZONE) {
616 needed_size += alignment;
617 }
618 CHECK(IsAligned(needed_size, REDZONE));
619 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
620 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
621 return 0;
622 }
623
624 uint8_t size_class = SizeToSizeClass(needed_size);
625 size_t size_to_allocate = SizeClassToSize(size_class);
626 CHECK(size_to_allocate >= kMinAllocSize);
627 CHECK(size_to_allocate >= needed_size);
628 CHECK(IsAligned(size_to_allocate, REDZONE));
629
630 if (FLAG_v >= 2) {
631 Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
632 alignment, size, size_class, size_to_allocate);
633 }
634
635 AsanThread *t = asanThreadRegistry().GetCurrent();
636 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
637 if (FLAG_stats) {
638 thread_stats.mallocs++;
639 thread_stats.malloced += size;
640 thread_stats.malloced_redzones += size_to_allocate - size;
641 thread_stats.malloced_by_size[size_class]++;
642 }
643
644 AsanChunk *m = NULL;
645 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
646 // get directly from global storage.
647 m = malloc_info.AllocateChunks(size_class, 1);
648 if (FLAG_stats) {
649 thread_stats.malloc_large++;
650 }
651 } else {
652 // get from the thread-local storage.
653 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
654 if (!*fl) {
655 size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
656 // n_new_chunks = std::min((size_t)32, n_new_chunks);
657 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
658 if (FLAG_stats) {
659 thread_stats.malloc_small_slow++;
660 }
661 }
662 m = *fl;
663 *fl = (*fl)->next;
664 }
665 CHECK(m);
666 CHECK(m->chunk_state == CHUNK_AVAILABLE);
667 m->chunk_state = CHUNK_ALLOCATED;
668 m->next = NULL;
669 CHECK(m->Size() == size_to_allocate);
670 uintptr_t addr = (uintptr_t)m + REDZONE;
671 CHECK(addr == (uintptr_t)m->compressed_free_stack());
672
673 if (alignment > REDZONE && (addr & (alignment - 1))) {
674 addr = RoundUpTo(addr, alignment);
675 CHECK((addr & (alignment - 1)) == 0);
676 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
677 p->chunk_state = CHUNK_MEMALIGN;
678 p->next = m;
679 }
680 CHECK(m == PtrToChunk(addr));
681 m->used_size = size;
682 m->offset = addr - (uintptr_t)m;
683 CHECK(m->beg() == addr);
684 m->alloc_tid = t ? t->tid() : 0;
685 m->free_tid = AsanThread::kInvalidTid;
686 AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
687 m->compressed_alloc_stack_size());
688 PoisonShadow(addr, rounded_size, 0);
689 if (size < rounded_size) {
690 PoisonMemoryPartialRightRedzone(addr + rounded_size - REDZONE,
691 size & (REDZONE - 1));
692 }
693 if (size <= FLAG_max_malloc_fill_size) {
694 real_memset((void*)addr, 0, rounded_size);
695 }
696 return (uint8_t*)addr;
697}
698
699static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
700 if (!ptr) return;
701 CHECK(stack);
702
703 if (FLAG_debug) {
704 CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
705 }
706
707 // Printf("Deallocate %p\n", ptr);
708 AsanChunk *m = PtrToChunk((uintptr_t)ptr);
709 if (m->chunk_state == CHUNK_QUARANTINE) {
710 Printf("attempting double-free on %p:\n", ptr);
711 stack->PrintStack();
712 m->DescribeAddress((uintptr_t)ptr, 1);
713 ShowStatsAndAbort();
714 } else if (m->chunk_state != CHUNK_ALLOCATED) {
715 Printf("attempting free on address which was not malloc()-ed: %p\n", ptr);
716 stack->PrintStack();
717 ShowStatsAndAbort();
718 }
719 CHECK(m->chunk_state == CHUNK_ALLOCATED);
720 CHECK(m->free_tid == AsanThread::kInvalidTid);
721 CHECK(m->alloc_tid >= 0);
722 AsanThread *t = asanThreadRegistry().GetCurrent();
723 m->free_tid = t ? t->tid() : 0;
724 AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
725 m->compressed_free_stack_size());
726 size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
727 PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
728
729 if (FLAG_stats) {
730 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
731 thread_stats.frees++;
732 thread_stats.freed += m->used_size;
733 thread_stats.freed_by_size[m->SizeClass()]++;
734 }
735
736 m->chunk_state = CHUNK_QUARANTINE;
737 if (t) {
738 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
739 CHECK(!m->next);
740 ms->quarantine_.Push(m);
741
742 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
743 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
744 }
745 } else {
746 CHECK(!m->next);
747 malloc_info.BypassThreadLocalQuarantine(m);
748 }
749}
750
751static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
752 AsanStackTrace *stack) {
753 CHECK(old_ptr && new_size);
754 if (FLAG_stats) {
755 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
756 thread_stats.reallocs++;
757 thread_stats.realloced += new_size;
758 }
759 AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
760 CHECK(m->chunk_state == CHUNK_ALLOCATED);
761 size_t old_size = m->used_size;
762 size_t memcpy_size = std::min(new_size, old_size);
763 uint8_t *new_ptr = Allocate(0, new_size, stack);
764 if (new_ptr) {
765 real_memcpy(new_ptr, old_ptr, memcpy_size);
766 Deallocate(old_ptr, stack);
767 }
768 return new_ptr;
769}
770
771} // namespace __asan
772
773// Malloc hooks declaration.
774// ASAN_NEW_HOOK(ptr, size) is called immediately after
775// allocation of "size" bytes, which returned "ptr".
776// ASAN_DELETE_HOOK(ptr) is called immediately before
777// deallocation of "ptr".
778// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
779// program must provide implementation of this hook.
780// If macro is undefined, the hook is no-op.
781#ifdef ASAN_NEW_HOOK
782extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
783#else
784static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
785#endif
786
787#ifdef ASAN_DELETE_HOOK
788extern "C" void ASAN_DELETE_HOOK(void *ptr);
789#else
790static inline void ASAN_DELETE_HOOK(void *ptr) { }
791#endif
792
793namespace __asan {
794
795void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
796 void *ptr = (void*)Allocate(alignment, size, stack);
797 ASAN_NEW_HOOK(ptr, size);
798 return ptr;
799}
800
801void asan_free(void *ptr, AsanStackTrace *stack) {
802 ASAN_DELETE_HOOK(ptr);
803 Deallocate((uint8_t*)ptr, stack);
804}
805
806void *asan_malloc(size_t size, AsanStackTrace *stack) {
807 void *ptr = (void*)Allocate(0, size, stack);
808 ASAN_NEW_HOOK(ptr, size);
809 return ptr;
810}
811
812void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
813 void *ptr = (void*)Allocate(0, nmemb * size, stack);
814 if (ptr)
815 real_memset(ptr, 0, nmemb * size);
816 ASAN_NEW_HOOK(ptr, nmemb * size);
817 return ptr;
818}
819
820void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
821 if (p == NULL) {
822 void *ptr = (void*)Allocate(0, size, stack);
823 ASAN_NEW_HOOK(ptr, size);
824 return ptr;
825 } else if (size == 0) {
826 ASAN_DELETE_HOOK(p);
827 Deallocate((uint8_t*)p, stack);
828 return NULL;
829 }
830 return Reallocate((uint8_t*)p, size, stack);
831}
832
833void *asan_valloc(size_t size, AsanStackTrace *stack) {
834 void *ptr = (void*)Allocate(kPageSize, size, stack);
835 ASAN_NEW_HOOK(ptr, size);
836 return ptr;
837}
838
839void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
840 size = RoundUpTo(size, kPageSize);
841 if (size == 0) {
842 // pvalloc(0) should allocate one page.
843 size = kPageSize;
844 }
845 void *ptr = (void*)Allocate(kPageSize, size, stack);
846 ASAN_NEW_HOOK(ptr, size);
847 return ptr;
848}
849
850int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
851 AsanStackTrace *stack) {
852 void *ptr = Allocate(alignment, size, stack);
853 CHECK(IsAligned((uintptr_t)ptr, alignment));
854 ASAN_NEW_HOOK(ptr, size);
855 *memptr = ptr;
856 return 0;
857}
858
859size_t __asan_mz_size(const void *ptr) {
860 return malloc_info.AllocationSize((uintptr_t)ptr);
861}
862
863void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
864 Describe(addr, access_size);
865}
866
867void __asan_mz_force_lock() {
868 malloc_info.ForceLock();
869}
870
871void __asan_mz_force_unlock() {
872 malloc_info.ForceUnlock();
873}
874
875// ---------------------- Fake stack-------------------- {{{1
876FakeStack::FakeStack() {
877 CHECK(real_memset);
878 real_memset(this, 0, sizeof(*this));
879}
880
881bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
882 uintptr_t mem = allocated_size_classes_[size_class];
883 uintptr_t size = ClassMmapSize(size_class);
884 bool res = mem && addr >= mem && addr < mem + size;
885 return res;
886}
887
888uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
889 if (!alive_) return 0;
890 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
891 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
892 }
893 return 0;
894}
895
896// We may want to compute this during compilation.
897inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
898 size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
899 size_t log = Log2(rounded_size);
900 CHECK(alloc_size <= (1UL << log));
901 if (!(alloc_size > (1UL << (log-1)))) {
902 Printf("alloc_size %ld log %ld\n", alloc_size, log);
903 }
904 CHECK(alloc_size > (1UL << (log-1)));
905 size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
906 CHECK(res < kNumberOfSizeClasses);
907 CHECK(ClassSize(res) >= rounded_size);
908 return res;
909}
910
911void FakeFrameFifo::FifoPush(FakeFrame *node) {
912 CHECK(node);
913 node->next = 0;
914 if (first_ == 0 && last_ == 0) {
915 first_ = last_ = node;
916 } else {
917 CHECK(first_);
918 CHECK(last_);
919 last_->next = node;
920 last_ = node;
921 }
922}
923
924FakeFrame *FakeFrameFifo::FifoPop() {
925 CHECK(first_ && last_ && "Exhausted fake stack");
926 FakeFrame *res = 0;
927 if (first_ == last_) {
928 res = first_;
929 first_ = last_ = 0;
930 } else {
931 res = first_;
932 first_ = first_->next;
933 }
934 return res;
935}
936
937void FakeStack::Init(size_t stack_size) {
938 stack_size_ = stack_size;
939 alive_ = true;
940}
941
942void FakeStack::Cleanup() {
943 alive_ = false;
944 for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
945 uintptr_t mem = allocated_size_classes_[i];
946 if (mem) {
947 PoisonShadow(mem, ClassMmapSize(i), 0);
948 allocated_size_classes_[i] = 0;
949 int munmap_res = munmap((void*)mem, ClassMmapSize(i));
950 CHECK(munmap_res == 0);
951 }
952 }
953}
954
955size_t FakeStack::ClassMmapSize(size_t size_class) {
956 return RoundUpToPowerOfTwo(stack_size_);
957}
958
959void FakeStack::AllocateOneSizeClass(size_t size_class) {
960 CHECK(ClassMmapSize(size_class) >= kPageSize);
961 uintptr_t new_mem = (uintptr_t)asan_mmap(0, ClassMmapSize(size_class),
962 PROT_READ | PROT_WRITE,
963 MAP_PRIVATE | MAP_ANON, -1, 0);
964 CHECK(new_mem != (uintptr_t)-1);
965 // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
966 // asanThreadRegistry().GetCurrent()->tid(),
967 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
968 // ClassMmapSize(size_class));
969 size_t i;
970 for (i = 0; i < ClassMmapSize(size_class);
971 i += ClassSize(size_class)) {
972 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
973 }
974 CHECK(i == ClassMmapSize(size_class));
975 allocated_size_classes_[size_class] = new_mem;
976}
977
978uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
979 CHECK(alive_);
980 CHECK(size <= kMaxStackMallocSize && size > 1);
981 size_t size_class = ComputeSizeClass(size);
982 if (!allocated_size_classes_[size_class]) {
983 AllocateOneSizeClass(size_class);
984 }
985 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
986 CHECK(fake_frame);
987 fake_frame->size_minus_one = size - 1;
988 fake_frame->real_stack = real_stack;
989 while (FakeFrame *top = call_stack_.top()) {
990 if (top->real_stack > real_stack) break;
991 call_stack_.LifoPop();
992 DeallocateFrame(top);
993 }
994 call_stack_.LifoPush(fake_frame);
995 uintptr_t ptr = (uintptr_t)fake_frame;
996 PoisonShadow(ptr, size, 0);
997 return ptr;
998}
999
1000void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
1001 CHECK(alive_);
1002 size_t size = fake_frame->size_minus_one + 1;
1003 size_t size_class = ComputeSizeClass(size);
1004 CHECK(allocated_size_classes_[size_class]);
1005 uintptr_t ptr = (uintptr_t)fake_frame;
1006 CHECK(AddrIsInSizeClass(ptr, size_class));
1007 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
1008 size_classes_[size_class].FifoPush(fake_frame);
1009}
1010
1011void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
1012 FakeFrame *fake_frame = (FakeFrame*)ptr;
1013 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
1014 CHECK(fake_frame->descr != 0);
1015 CHECK(fake_frame->size_minus_one == size - 1);
1016 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1017}
1018
1019} // namespace __asan
1020
1021// ---------------------- Interface ---------------- {{{1
1022using namespace __asan; // NOLINT
1023
1024size_t __asan_stack_malloc(size_t size, size_t real_stack) {
1025 if (!FLAG_use_fake_stack) return real_stack;
1026 AsanThread *t = asanThreadRegistry().GetCurrent();
1027 if (!t) {
1028 // TSD is gone, use the real stack.
1029 return real_stack;
1030 }
1031 size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
1032 // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
1033 return ptr;
1034}
1035
1036void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
1037 if (!FLAG_use_fake_stack) return;
1038 if (ptr != real_stack) {
1039 FakeStack::OnFree(ptr, size, real_stack);
1040 }
1041}
1042
1043// ASan allocator doesn't reserve extra bytes, so normally we would
1044// just return "size".
1045size_t __asan_get_estimated_allocated_size(size_t size) {
1046 if (size == 0) return 1;
1047 return std::min(size, kMaxAllowedMallocSize);
1048}
1049
1050bool __asan_get_ownership(const void *p) {
1051 return (p == NULL) ||
1052 (malloc_info.AllocationSize((uintptr_t)p) > 0);
1053}
1054
1055size_t __asan_get_allocated_size(const void *p) {
1056 if (p == NULL) return 0;
1057 size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
1058 // Die if p is not malloced or if it is already freed.
1059 if (allocated_size == 0) {
1060 Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
1061 PRINT_CURRENT_STACK();
1062 ShowStatsAndAbort();
1063 }
1064 return allocated_size;
1065}