blob: 38477c0116398d370a445b962a793701d4830c40 [file] [log] [blame]
Kostya Serebryany019b76f2011-11-30 01:07:02 +00001//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000020#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000021
Kostya Serebryany14282a92012-12-10 13:52:55 +000022// We are in the process of transitioning from the old allocator (version 1)
23// to a new one (version 2). The change is quite intrusive so both allocators
24// will co-exist in the source base for a while. The actual allocator is chosen
Dmitry Vyukov9de857a2013-01-14 09:03:24 +000025// at build time by redefining this macro.
Kostya Serebryanyd8b1f782013-01-15 13:57:39 +000026#ifndef ASAN_ALLOCATOR_VERSION
Evgeniy Stepanov6488b2b2013-03-18 09:22:58 +000027#define ASAN_ALLOCATOR_VERSION 2
Kostya Serebryanyd8b1f782013-01-15 13:57:39 +000028#endif // ASAN_ALLOCATOR_VERSION
Kostya Serebryany14282a92012-12-10 13:52:55 +000029
Kostya Serebryany019b76f2011-11-30 01:07:02 +000030namespace __asan {
31
Kostya Serebryany3674c6b2012-12-21 08:53:59 +000032enum AllocType {
33 FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
34 FROM_NEW = 2, // Memory block came from operator new.
35 FROM_NEW_BR = 3 // Memory block came from operator new [ ]
36};
37
Kostya Serebryany8d032042012-05-31 14:35:53 +000038static const uptr kNumberOfSizeClasses = 255;
Kostya Serebryany9d1eee92011-11-30 17:33:13 +000039struct AsanChunk;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000040
Kostya Serebryany61761f12013-01-28 08:05:47 +000041void InitializeAllocator();
42
Alexey Samsonov86614652012-09-18 07:38:10 +000043class AsanChunkView {
44 public:
45 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
46 bool IsValid() { return chunk_ != 0; }
47 uptr Beg(); // first byte of user memory.
48 uptr End(); // last byte of user memory.
49 uptr UsedSize(); // size requested by the user.
50 uptr AllocTid();
51 uptr FreeTid();
52 void GetAllocStack(StackTrace *stack);
53 void GetFreeStack(StackTrace *stack);
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000054 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000055 if (addr >= Beg() && (addr + access_size) <= End()) {
56 *offset = addr - Beg();
57 return true;
58 }
59 return false;
60 }
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000061 bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
Alexander Potapenko602a09f2012-12-12 12:32:57 +000062 (void)access_size;
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000063 if (addr < Beg()) {
64 *offset = Beg() - addr;
65 return true;
66 }
67 return false;
68 }
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000069 bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
Evgeniy Stepanov0b805cc2013-02-08 12:59:42 +000070 if (addr + access_size > End()) {
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000071 *offset = addr - End();
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000072 return true;
73 }
74 return false;
75 }
76
Alexey Samsonov86614652012-09-18 07:38:10 +000077 private:
78 AsanChunk *const chunk_;
79};
80
81AsanChunkView FindHeapChunkByAddress(uptr address);
82
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000083// List of AsanChunks with total size.
84class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
Kostya Serebryany019b76f2011-11-30 01:07:02 +000085 public:
86 explicit AsanChunkFifoList(LinkerInitialized) { }
87 AsanChunkFifoList() { clear(); }
88 void Push(AsanChunk *n);
89 void PushList(AsanChunkFifoList *q);
90 AsanChunk *Pop();
Kostya Serebryany8d032042012-05-31 14:35:53 +000091 uptr size() { return size_; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +000092 void clear() {
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000093 IntrusiveList<AsanChunk>::clear();
Kostya Serebryany019b76f2011-11-30 01:07:02 +000094 size_ = 0;
95 }
96 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +000097 uptr size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000098};
99
100struct AsanThreadLocalMallocStorage {
101 explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000102#if ASAN_ALLOCATOR_VERSION == 1
103 : quarantine_(x)
104#endif
105 { }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000106 AsanThreadLocalMallocStorage() {
Alexey Samsonove7254782012-02-08 13:45:31 +0000107 CHECK(REAL(memset));
108 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000109 }
110
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000111#if ASAN_ALLOCATOR_VERSION == 1
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000112 AsanChunkFifoList quarantine_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000113 AsanChunk *free_lists_[kNumberOfSizeClasses];
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000114#else
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000115 uptr quarantine_cache[16];
Dmitry Vyukov0d46b2b2013-01-15 10:45:18 +0000116 uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
Kostya Serebryanyec339f72012-12-17 13:43:47 +0000117#endif
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000118 void CommitBack();
119};
120
121// Fake stack frame contains local variables of one function.
122// This struct should fit into a stack redzone (32 bytes).
123struct FakeFrame {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000124 uptr magic; // Modified by the instrumented code.
125 uptr descr; // Modified by the instrumented code.
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000126 FakeFrame *next;
Kostya Serebryany1d35d152012-05-31 15:02:07 +0000127 u64 real_stack : 48;
128 u64 size_minus_one : 16;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000129};
130
131struct FakeFrameFifo {
132 public:
133 void FifoPush(FakeFrame *node);
134 FakeFrame *FifoPop();
135 private:
136 FakeFrame *first_, *last_;
137};
138
139class FakeFrameLifo {
140 public:
141 void LifoPush(FakeFrame *node) {
142 node->next = top_;
143 top_ = node;
144 }
145 void LifoPop() {
146 CHECK(top_);
147 top_ = top_->next;
148 }
149 FakeFrame *top() { return top_; }
150 private:
151 FakeFrame *top_;
152};
153
154// For each thread we create a fake stack and place stack objects on this fake
155// stack instead of the real stack. The fake stack is not really a stack but
156// a fast malloc-like allocator so that when a function exits the fake stack
157// is not poped but remains there for quite some time until gets used again.
158// So, we poison the objects on the fake stack when function returns.
159// It helps us find use-after-return bugs.
160// We can not rely on __asan_stack_free being called on every function exit,
161// so we maintain a lifo list of all current fake frames and update it on every
162// call to __asan_stack_malloc.
163class FakeStack {
164 public:
165 FakeStack();
166 explicit FakeStack(LinkerInitialized) {}
Kostya Serebryany8d032042012-05-31 14:35:53 +0000167 void Init(uptr stack_size);
Kostya Serebryany72fde372011-12-09 01:49:31 +0000168 void StopUsingFakeStack() { alive_ = false; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000169 void Cleanup();
Kostya Serebryany8d032042012-05-31 14:35:53 +0000170 uptr AllocateStack(uptr size, uptr real_stack);
171 static void OnFree(uptr ptr, uptr size, uptr real_stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000172 // Return the bottom of the maped region.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000173 uptr AddrIsInFakeStack(uptr addr);
Alexander Potapenko0be25d52012-02-21 08:45:41 +0000174 bool StackSize() { return stack_size_; }
Alexey Samsonovc3a81192012-08-30 14:22:21 +0000175
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000176 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000177 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
178 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
179 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
180 static const uptr kNumberOfSizeClasses =
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000181 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
182
Kostya Serebryany8d032042012-05-31 14:35:53 +0000183 bool AddrIsInSizeClass(uptr addr, uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000184
185 // Each size class should be large enough to hold all frames.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000186 uptr ClassMmapSize(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000187
Kostya Serebryany8d032042012-05-31 14:35:53 +0000188 uptr ClassSize(uptr size_class) {
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000189 return 1UL << (size_class + kMinStackFrameSizeLog);
190 }
191
192 void DeallocateFrame(FakeFrame *fake_frame);
193
Kostya Serebryany8d032042012-05-31 14:35:53 +0000194 uptr ComputeSizeClass(uptr alloc_size);
195 void AllocateOneSizeClass(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000196
Kostya Serebryany8d032042012-05-31 14:35:53 +0000197 uptr stack_size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000198 bool alive_;
199
Kostya Serebryany8d032042012-05-31 14:35:53 +0000200 uptr allocated_size_classes_[kNumberOfSizeClasses];
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000201 FakeFrameFifo size_classes_[kNumberOfSizeClasses];
202 FakeFrameLifo call_stack_;
203};
204
Kostya Serebryany3674c6b2012-12-21 08:53:59 +0000205void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
206 AllocType alloc_type);
207void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000208
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000209void *asan_malloc(uptr size, StackTrace *stack);
210void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
211void *asan_realloc(void *p, uptr size, StackTrace *stack);
212void *asan_valloc(uptr size, StackTrace *stack);
213void *asan_pvalloc(uptr size, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000214
Kostya Serebryany8d032042012-05-31 14:35:53 +0000215int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000216 StackTrace *stack);
217uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000218
Kostya Serebryany8d032042012-05-31 14:35:53 +0000219uptr asan_mz_size(const void *ptr);
Alexey Samsonov209c5142012-01-17 06:39:10 +0000220void asan_mz_force_lock();
221void asan_mz_force_unlock();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000222
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000223void PrintInternalAllocatorStats();
224
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000225} // namespace __asan
226#endif // ASAN_ALLOCATOR_H