blob: c80c906e63fbe06a65fb755e30737cef6cc451ef [file] [log] [blame]
Kostya Serebryany1e172b42011-11-30 01:07:02 +00001//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
Kostya Serebryanyc88059c2012-12-17 07:54:29 +000020#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany1e172b42011-11-30 01:07:02 +000021
Kostya Serebryany8b0a7ce2012-12-10 13:52:55 +000022// We are in the process of transitioning from the old allocator (version 1)
23// to a new one (version 2). The change is quite intrusive so both allocators
24// will co-exist in the source base for a while. The actual allocator is chosen
25// at build time by redefining this macrozz.
26#define ASAN_ALLOCATOR_VERSION 1
27
Kostya Serebryany1e172b42011-11-30 01:07:02 +000028namespace __asan {
29
Kostya Serebryany3f4c3872012-05-31 14:35:53 +000030static const uptr kNumberOfSizeClasses = 255;
Kostya Serebryanycbab9112011-11-30 17:33:13 +000031struct AsanChunk;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000032
Alexey Samsonov5c153fa2012-09-18 07:38:10 +000033class AsanChunkView {
34 public:
35 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
36 bool IsValid() { return chunk_ != 0; }
37 uptr Beg(); // first byte of user memory.
38 uptr End(); // last byte of user memory.
39 uptr UsedSize(); // size requested by the user.
40 uptr AllocTid();
41 uptr FreeTid();
42 void GetAllocStack(StackTrace *stack);
43 void GetFreeStack(StackTrace *stack);
Kostya Serebryany321e1252012-12-11 09:02:36 +000044 bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
45 if (addr >= Beg() && (addr + access_size) <= End()) {
46 *offset = addr - Beg();
47 return true;
48 }
49 return false;
50 }
51 bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
Alexander Potapenko2ca12222012-12-12 12:32:57 +000052 (void)access_size;
Kostya Serebryany321e1252012-12-11 09:02:36 +000053 if (addr < Beg()) {
54 *offset = Beg() - addr;
55 return true;
56 }
57 return false;
58 }
59 bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
60 if (addr + access_size >= End()) {
61 if (addr <= End())
62 *offset = 0;
63 else
64 *offset = addr - End();
65 return true;
66 }
67 return false;
68 }
69
Alexey Samsonov5c153fa2012-09-18 07:38:10 +000070 private:
71 AsanChunk *const chunk_;
72};
73
74AsanChunkView FindHeapChunkByAddress(uptr address);
75
Kostya Serebryanyc88059c2012-12-17 07:54:29 +000076// List of AsanChunks with total size.
77class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
Kostya Serebryany1e172b42011-11-30 01:07:02 +000078 public:
79 explicit AsanChunkFifoList(LinkerInitialized) { }
80 AsanChunkFifoList() { clear(); }
81 void Push(AsanChunk *n);
82 void PushList(AsanChunkFifoList *q);
83 AsanChunk *Pop();
Kostya Serebryany3f4c3872012-05-31 14:35:53 +000084 uptr size() { return size_; }
Kostya Serebryany1e172b42011-11-30 01:07:02 +000085 void clear() {
Kostya Serebryanyc88059c2012-12-17 07:54:29 +000086 IntrusiveList<AsanChunk>::clear();
Kostya Serebryany1e172b42011-11-30 01:07:02 +000087 size_ = 0;
88 }
89 private:
Kostya Serebryany3f4c3872012-05-31 14:35:53 +000090 uptr size_;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000091};
92
93struct AsanThreadLocalMallocStorage {
94 explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
95 : quarantine_(x) { }
96 AsanThreadLocalMallocStorage() {
Alexey Samsonov09672ca2012-02-08 13:45:31 +000097 CHECK(REAL(memset));
98 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
Kostya Serebryany1e172b42011-11-30 01:07:02 +000099 }
100
101 AsanChunkFifoList quarantine_;
102 AsanChunk *free_lists_[kNumberOfSizeClasses];
Kostya Serebryanyc523d172012-12-17 13:43:47 +0000103#if ASAN_ALLOCATOR_VERSION == 2
104 uptr allocator2_cache[1024]; // Opaque.
105#endif
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000106 void CommitBack();
107};
108
109// Fake stack frame contains local variables of one function.
110// This struct should fit into a stack redzone (32 bytes).
111struct FakeFrame {
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000112 uptr magic; // Modified by the instrumented code.
113 uptr descr; // Modified by the instrumented code.
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000114 FakeFrame *next;
Kostya Serebryanyee392552012-05-31 15:02:07 +0000115 u64 real_stack : 48;
116 u64 size_minus_one : 16;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000117};
118
119struct FakeFrameFifo {
120 public:
121 void FifoPush(FakeFrame *node);
122 FakeFrame *FifoPop();
123 private:
124 FakeFrame *first_, *last_;
125};
126
127class FakeFrameLifo {
128 public:
129 void LifoPush(FakeFrame *node) {
130 node->next = top_;
131 top_ = node;
132 }
133 void LifoPop() {
134 CHECK(top_);
135 top_ = top_->next;
136 }
137 FakeFrame *top() { return top_; }
138 private:
139 FakeFrame *top_;
140};
141
142// For each thread we create a fake stack and place stack objects on this fake
143// stack instead of the real stack. The fake stack is not really a stack but
144// a fast malloc-like allocator so that when a function exits the fake stack
145// is not poped but remains there for quite some time until gets used again.
146// So, we poison the objects on the fake stack when function returns.
147// It helps us find use-after-return bugs.
148// We can not rely on __asan_stack_free being called on every function exit,
149// so we maintain a lifo list of all current fake frames and update it on every
150// call to __asan_stack_malloc.
151class FakeStack {
152 public:
153 FakeStack();
154 explicit FakeStack(LinkerInitialized) {}
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000155 void Init(uptr stack_size);
Kostya Serebryanyc4b34d92011-12-09 01:49:31 +0000156 void StopUsingFakeStack() { alive_ = false; }
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000157 void Cleanup();
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000158 uptr AllocateStack(uptr size, uptr real_stack);
159 static void OnFree(uptr ptr, uptr size, uptr real_stack);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000160 // Return the bottom of the maped region.
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000161 uptr AddrIsInFakeStack(uptr addr);
Alexander Potapenko60490e02012-02-21 08:45:41 +0000162 bool StackSize() { return stack_size_; }
Alexey Samsonov50926822012-08-30 14:22:21 +0000163
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000164 private:
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000165 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
166 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
167 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
168 static const uptr kNumberOfSizeClasses =
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000169 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
170
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000171 bool AddrIsInSizeClass(uptr addr, uptr size_class);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000172
173 // Each size class should be large enough to hold all frames.
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000174 uptr ClassMmapSize(uptr size_class);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000175
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000176 uptr ClassSize(uptr size_class) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000177 return 1UL << (size_class + kMinStackFrameSizeLog);
178 }
179
180 void DeallocateFrame(FakeFrame *fake_frame);
181
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000182 uptr ComputeSizeClass(uptr alloc_size);
183 void AllocateOneSizeClass(uptr size_class);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000184
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000185 uptr stack_size_;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000186 bool alive_;
187
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000188 uptr allocated_size_classes_[kNumberOfSizeClasses];
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000189 FakeFrameFifo size_classes_[kNumberOfSizeClasses];
190 FakeFrameLifo call_stack_;
191};
192
Kostya Serebryanyc3390df2012-08-28 11:54:30 +0000193void *asan_memalign(uptr alignment, uptr size, StackTrace *stack);
194void asan_free(void *ptr, StackTrace *stack);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000195
Kostya Serebryanyc3390df2012-08-28 11:54:30 +0000196void *asan_malloc(uptr size, StackTrace *stack);
197void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
198void *asan_realloc(void *p, uptr size, StackTrace *stack);
199void *asan_valloc(uptr size, StackTrace *stack);
200void *asan_pvalloc(uptr size, StackTrace *stack);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000201
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000202int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryanyc3390df2012-08-28 11:54:30 +0000203 StackTrace *stack);
204uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000205
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000206uptr asan_mz_size(const void *ptr);
Alexey Samsonov4fd95f12012-01-17 06:39:10 +0000207void asan_mz_force_lock();
208void asan_mz_force_unlock();
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000209
Kostya Serebryany2679f192012-12-10 14:19:15 +0000210// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
Kostya Serebryany956ad472012-12-11 07:27:59 +0000211#if defined(_WIN32) && !defined(__clang__)
Timur Iskhodzhanovd923f2b2012-12-11 12:23:00 +0000212extern "C" {
Timur Iskhodzhanov8416e212012-12-11 12:24:41 +0000213unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
214unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
Timur Iskhodzhanov4e773522012-12-11 12:03:06 +0000215#if defined(_WIN64)
Timur Iskhodzhanov8416e212012-12-11 12:24:41 +0000216unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
217unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
Timur Iskhodzhanov4e773522012-12-11 12:03:06 +0000218#endif
Timur Iskhodzhanovd923f2b2012-12-11 12:23:00 +0000219}
Kostya Serebryany956ad472012-12-11 07:27:59 +0000220#endif
Kostya Serebryany2679f192012-12-10 14:19:15 +0000221
222static inline uptr Log2(uptr x) {
223 CHECK(IsPowerOfTwo(x));
224#if !defined(_WIN32) || defined(__clang__)
225 return __builtin_ctzl(x);
226#elif defined(_WIN64)
227 unsigned long ret; // NOLINT
228 _BitScanForward64(&ret, x);
229 return ret;
230#else
231 unsigned long ret; // NOLINT
232 _BitScanForward(&ret, x);
233 return ret;
234#endif
235}
236
237static inline uptr RoundUpToPowerOfTwo(uptr size) {
238 CHECK(size);
239 if (IsPowerOfTwo(size)) return size;
240
241 unsigned long up; // NOLINT
242#if !defined(_WIN32) || defined(__clang__)
243 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
244#elif defined(_WIN64)
245 _BitScanReverse64(&up, size);
246#else
247 _BitScanReverse(&up, size);
248#endif
249 CHECK(size < (1ULL << (up + 1)));
250 CHECK(size > (1ULL << up));
251 return 1UL << (up + 1);
252}
253
254
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000255} // namespace __asan
256#endif // ASAN_ALLOCATOR_H