blob: 8754aaa291f2a3962003fe3c497a38d2a8906a6d [file] [log] [blame]
Kostya Serebryany019b76f2011-11-30 01:07:02 +00001//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000020#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000021
Kostya Serebryany14282a92012-12-10 13:52:55 +000022// We are in the process of transitioning from the old allocator (version 1)
23// to a new one (version 2). The change is quite intrusive so both allocators
24// will co-exist in the source base for a while. The actual allocator is chosen
25// at build time by redefining this macrozz.
26#define ASAN_ALLOCATOR_VERSION 1
27
Kostya Serebryany019b76f2011-11-30 01:07:02 +000028namespace __asan {
29
Kostya Serebryany8d032042012-05-31 14:35:53 +000030static const uptr kNumberOfSizeClasses = 255;
Kostya Serebryany9d1eee92011-11-30 17:33:13 +000031struct AsanChunk;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000032
Alexey Samsonov86614652012-09-18 07:38:10 +000033class AsanChunkView {
34 public:
35 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
36 bool IsValid() { return chunk_ != 0; }
37 uptr Beg(); // first byte of user memory.
38 uptr End(); // last byte of user memory.
39 uptr UsedSize(); // size requested by the user.
40 uptr AllocTid();
41 uptr FreeTid();
42 void GetAllocStack(StackTrace *stack);
43 void GetFreeStack(StackTrace *stack);
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000044 bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
45 if (addr >= Beg() && (addr + access_size) <= End()) {
46 *offset = addr - Beg();
47 return true;
48 }
49 return false;
50 }
51 bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
Alexander Potapenko602a09f2012-12-12 12:32:57 +000052 (void)access_size;
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000053 if (addr < Beg()) {
54 *offset = Beg() - addr;
55 return true;
56 }
57 return false;
58 }
59 bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
60 if (addr + access_size >= End()) {
61 if (addr <= End())
62 *offset = 0;
63 else
64 *offset = addr - End();
65 return true;
66 }
67 return false;
68 }
69
Alexey Samsonov86614652012-09-18 07:38:10 +000070 private:
71 AsanChunk *const chunk_;
72};
73
74AsanChunkView FindHeapChunkByAddress(uptr address);
75
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000076// List of AsanChunks with total size.
77class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
Kostya Serebryany019b76f2011-11-30 01:07:02 +000078 public:
79 explicit AsanChunkFifoList(LinkerInitialized) { }
80 AsanChunkFifoList() { clear(); }
81 void Push(AsanChunk *n);
82 void PushList(AsanChunkFifoList *q);
83 AsanChunk *Pop();
Kostya Serebryany8d032042012-05-31 14:35:53 +000084 uptr size() { return size_; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +000085 void clear() {
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000086 IntrusiveList<AsanChunk>::clear();
Kostya Serebryany019b76f2011-11-30 01:07:02 +000087 size_ = 0;
88 }
89 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +000090 uptr size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000091};
92
93struct AsanThreadLocalMallocStorage {
94 explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
95 : quarantine_(x) { }
96 AsanThreadLocalMallocStorage() {
Alexey Samsonove7254782012-02-08 13:45:31 +000097 CHECK(REAL(memset));
98 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
Kostya Serebryany019b76f2011-11-30 01:07:02 +000099 }
100
101 AsanChunkFifoList quarantine_;
102 AsanChunk *free_lists_[kNumberOfSizeClasses];
103 void CommitBack();
104};
105
106// Fake stack frame contains local variables of one function.
107// This struct should fit into a stack redzone (32 bytes).
108struct FakeFrame {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000109 uptr magic; // Modified by the instrumented code.
110 uptr descr; // Modified by the instrumented code.
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000111 FakeFrame *next;
Kostya Serebryany1d35d152012-05-31 15:02:07 +0000112 u64 real_stack : 48;
113 u64 size_minus_one : 16;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000114};
115
116struct FakeFrameFifo {
117 public:
118 void FifoPush(FakeFrame *node);
119 FakeFrame *FifoPop();
120 private:
121 FakeFrame *first_, *last_;
122};
123
124class FakeFrameLifo {
125 public:
126 void LifoPush(FakeFrame *node) {
127 node->next = top_;
128 top_ = node;
129 }
130 void LifoPop() {
131 CHECK(top_);
132 top_ = top_->next;
133 }
134 FakeFrame *top() { return top_; }
135 private:
136 FakeFrame *top_;
137};
138
139// For each thread we create a fake stack and place stack objects on this fake
140// stack instead of the real stack. The fake stack is not really a stack but
141// a fast malloc-like allocator so that when a function exits the fake stack
142// is not poped but remains there for quite some time until gets used again.
143// So, we poison the objects on the fake stack when function returns.
144// It helps us find use-after-return bugs.
145// We can not rely on __asan_stack_free being called on every function exit,
146// so we maintain a lifo list of all current fake frames and update it on every
147// call to __asan_stack_malloc.
148class FakeStack {
149 public:
150 FakeStack();
151 explicit FakeStack(LinkerInitialized) {}
Kostya Serebryany8d032042012-05-31 14:35:53 +0000152 void Init(uptr stack_size);
Kostya Serebryany72fde372011-12-09 01:49:31 +0000153 void StopUsingFakeStack() { alive_ = false; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000154 void Cleanup();
Kostya Serebryany8d032042012-05-31 14:35:53 +0000155 uptr AllocateStack(uptr size, uptr real_stack);
156 static void OnFree(uptr ptr, uptr size, uptr real_stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000157 // Return the bottom of the maped region.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000158 uptr AddrIsInFakeStack(uptr addr);
Alexander Potapenko0be25d52012-02-21 08:45:41 +0000159 bool StackSize() { return stack_size_; }
Alexey Samsonovc3a81192012-08-30 14:22:21 +0000160
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000161 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000162 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
163 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
164 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
165 static const uptr kNumberOfSizeClasses =
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000166 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
167
Kostya Serebryany8d032042012-05-31 14:35:53 +0000168 bool AddrIsInSizeClass(uptr addr, uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000169
170 // Each size class should be large enough to hold all frames.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000171 uptr ClassMmapSize(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000172
Kostya Serebryany8d032042012-05-31 14:35:53 +0000173 uptr ClassSize(uptr size_class) {
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000174 return 1UL << (size_class + kMinStackFrameSizeLog);
175 }
176
177 void DeallocateFrame(FakeFrame *fake_frame);
178
Kostya Serebryany8d032042012-05-31 14:35:53 +0000179 uptr ComputeSizeClass(uptr alloc_size);
180 void AllocateOneSizeClass(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000181
Kostya Serebryany8d032042012-05-31 14:35:53 +0000182 uptr stack_size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000183 bool alive_;
184
Kostya Serebryany8d032042012-05-31 14:35:53 +0000185 uptr allocated_size_classes_[kNumberOfSizeClasses];
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000186 FakeFrameFifo size_classes_[kNumberOfSizeClasses];
187 FakeFrameLifo call_stack_;
188};
189
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000190void *asan_memalign(uptr alignment, uptr size, StackTrace *stack);
191void asan_free(void *ptr, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000192
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000193void *asan_malloc(uptr size, StackTrace *stack);
194void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
195void *asan_realloc(void *p, uptr size, StackTrace *stack);
196void *asan_valloc(uptr size, StackTrace *stack);
197void *asan_pvalloc(uptr size, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000198
Kostya Serebryany8d032042012-05-31 14:35:53 +0000199int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000200 StackTrace *stack);
201uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000202
Kostya Serebryany8d032042012-05-31 14:35:53 +0000203uptr asan_mz_size(const void *ptr);
Alexey Samsonov209c5142012-01-17 06:39:10 +0000204void asan_mz_force_lock();
205void asan_mz_force_unlock();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000206
Kostya Serebryany13bdbe62012-12-10 14:19:15 +0000207// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
Kostya Serebryany54af6602012-12-11 07:27:59 +0000208#if defined(_WIN32) && !defined(__clang__)
Timur Iskhodzhanov9b1d42a2012-12-11 12:23:00 +0000209extern "C" {
Timur Iskhodzhanov4c976042012-12-11 12:24:41 +0000210unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
211unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
Timur Iskhodzhanovaacd3d72012-12-11 12:03:06 +0000212#if defined(_WIN64)
Timur Iskhodzhanov4c976042012-12-11 12:24:41 +0000213unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
214unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
Timur Iskhodzhanovaacd3d72012-12-11 12:03:06 +0000215#endif
Timur Iskhodzhanov9b1d42a2012-12-11 12:23:00 +0000216}
Kostya Serebryany54af6602012-12-11 07:27:59 +0000217#endif
Kostya Serebryany13bdbe62012-12-10 14:19:15 +0000218
219static inline uptr Log2(uptr x) {
220 CHECK(IsPowerOfTwo(x));
221#if !defined(_WIN32) || defined(__clang__)
222 return __builtin_ctzl(x);
223#elif defined(_WIN64)
224 unsigned long ret; // NOLINT
225 _BitScanForward64(&ret, x);
226 return ret;
227#else
228 unsigned long ret; // NOLINT
229 _BitScanForward(&ret, x);
230 return ret;
231#endif
232}
233
234static inline uptr RoundUpToPowerOfTwo(uptr size) {
235 CHECK(size);
236 if (IsPowerOfTwo(size)) return size;
237
238 unsigned long up; // NOLINT
239#if !defined(_WIN32) || defined(__clang__)
240 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
241#elif defined(_WIN64)
242 _BitScanReverse64(&up, size);
243#else
244 _BitScanReverse(&up, size);
245#endif
246 CHECK(size < (1ULL << (up + 1)));
247 CHECK(size > (1ULL << up));
248 return 1UL << (up + 1);
249}
250
251
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000252} // namespace __asan
253#endif // ASAN_ALLOCATOR_H