blob: 2de6b982ac7e67cff800e33e8f77d51124e6cece [file] [log] [blame]
Kostya Serebryany019b76f2011-11-30 01:07:02 +00001//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000020#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000021
Kostya Serebryany14282a92012-12-10 13:52:55 +000022// We are in the process of transitioning from the old allocator (version 1)
23// to a new one (version 2). The change is quite intrusive so both allocators
24// will co-exist in the source base for a while. The actual allocator is chosen
Dmitry Vyukov9de857a2013-01-14 09:03:24 +000025// at build time by redefining this macro.
Kostya Serebryanyd8b1f782013-01-15 13:57:39 +000026#ifndef ASAN_ALLOCATOR_VERSION
27# if ASAN_LINUX && !ASAN_ANDROID
28# define ASAN_ALLOCATOR_VERSION 2
29# else
30# define ASAN_ALLOCATOR_VERSION 1
31# endif
32#endif // ASAN_ALLOCATOR_VERSION
Kostya Serebryany14282a92012-12-10 13:52:55 +000033
Kostya Serebryany019b76f2011-11-30 01:07:02 +000034namespace __asan {
35
Kostya Serebryany3674c6b2012-12-21 08:53:59 +000036enum AllocType {
37 FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
38 FROM_NEW = 2, // Memory block came from operator new.
39 FROM_NEW_BR = 3 // Memory block came from operator new [ ]
40};
41
Kostya Serebryany8d032042012-05-31 14:35:53 +000042static const uptr kNumberOfSizeClasses = 255;
Kostya Serebryany9d1eee92011-11-30 17:33:13 +000043struct AsanChunk;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000044
Kostya Serebryany61761f12013-01-28 08:05:47 +000045void InitializeAllocator();
46
Alexey Samsonov86614652012-09-18 07:38:10 +000047class AsanChunkView {
48 public:
49 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
50 bool IsValid() { return chunk_ != 0; }
51 uptr Beg(); // first byte of user memory.
52 uptr End(); // last byte of user memory.
53 uptr UsedSize(); // size requested by the user.
54 uptr AllocTid();
55 uptr FreeTid();
56 void GetAllocStack(StackTrace *stack);
57 void GetFreeStack(StackTrace *stack);
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000058 bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
59 if (addr >= Beg() && (addr + access_size) <= End()) {
60 *offset = addr - Beg();
61 return true;
62 }
63 return false;
64 }
65 bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
Alexander Potapenko602a09f2012-12-12 12:32:57 +000066 (void)access_size;
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000067 if (addr < Beg()) {
68 *offset = Beg() - addr;
69 return true;
70 }
71 return false;
72 }
73 bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
74 if (addr + access_size >= End()) {
75 if (addr <= End())
76 *offset = 0;
77 else
78 *offset = addr - End();
79 return true;
80 }
81 return false;
82 }
83
Alexey Samsonov86614652012-09-18 07:38:10 +000084 private:
85 AsanChunk *const chunk_;
86};
87
88AsanChunkView FindHeapChunkByAddress(uptr address);
89
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000090// List of AsanChunks with total size.
91class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
Kostya Serebryany019b76f2011-11-30 01:07:02 +000092 public:
93 explicit AsanChunkFifoList(LinkerInitialized) { }
94 AsanChunkFifoList() { clear(); }
95 void Push(AsanChunk *n);
96 void PushList(AsanChunkFifoList *q);
97 AsanChunk *Pop();
Kostya Serebryany8d032042012-05-31 14:35:53 +000098 uptr size() { return size_; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +000099 void clear() {
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +0000100 IntrusiveList<AsanChunk>::clear();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000101 size_ = 0;
102 }
103 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000104 uptr size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000105};
106
107struct AsanThreadLocalMallocStorage {
108 explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000109#if ASAN_ALLOCATOR_VERSION == 1
110 : quarantine_(x)
111#endif
112 { }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000113 AsanThreadLocalMallocStorage() {
Alexey Samsonove7254782012-02-08 13:45:31 +0000114 CHECK(REAL(memset));
115 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000116 }
117
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000118#if ASAN_ALLOCATOR_VERSION == 1
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000119 AsanChunkFifoList quarantine_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000120 AsanChunk *free_lists_[kNumberOfSizeClasses];
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000121#else
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000122 uptr quarantine_cache[16];
Dmitry Vyukov0d46b2b2013-01-15 10:45:18 +0000123 uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
Kostya Serebryanyec339f72012-12-17 13:43:47 +0000124#endif
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000125 void CommitBack();
126};
127
128// Fake stack frame contains local variables of one function.
129// This struct should fit into a stack redzone (32 bytes).
130struct FakeFrame {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000131 uptr magic; // Modified by the instrumented code.
132 uptr descr; // Modified by the instrumented code.
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000133 FakeFrame *next;
Kostya Serebryany1d35d152012-05-31 15:02:07 +0000134 u64 real_stack : 48;
135 u64 size_minus_one : 16;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000136};
137
138struct FakeFrameFifo {
139 public:
140 void FifoPush(FakeFrame *node);
141 FakeFrame *FifoPop();
142 private:
143 FakeFrame *first_, *last_;
144};
145
146class FakeFrameLifo {
147 public:
148 void LifoPush(FakeFrame *node) {
149 node->next = top_;
150 top_ = node;
151 }
152 void LifoPop() {
153 CHECK(top_);
154 top_ = top_->next;
155 }
156 FakeFrame *top() { return top_; }
157 private:
158 FakeFrame *top_;
159};
160
161// For each thread we create a fake stack and place stack objects on this fake
162// stack instead of the real stack. The fake stack is not really a stack but
163// a fast malloc-like allocator so that when a function exits the fake stack
164// is not poped but remains there for quite some time until gets used again.
165// So, we poison the objects on the fake stack when function returns.
166// It helps us find use-after-return bugs.
167// We can not rely on __asan_stack_free being called on every function exit,
168// so we maintain a lifo list of all current fake frames and update it on every
169// call to __asan_stack_malloc.
170class FakeStack {
171 public:
172 FakeStack();
173 explicit FakeStack(LinkerInitialized) {}
Kostya Serebryany8d032042012-05-31 14:35:53 +0000174 void Init(uptr stack_size);
Kostya Serebryany72fde372011-12-09 01:49:31 +0000175 void StopUsingFakeStack() { alive_ = false; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000176 void Cleanup();
Kostya Serebryany8d032042012-05-31 14:35:53 +0000177 uptr AllocateStack(uptr size, uptr real_stack);
178 static void OnFree(uptr ptr, uptr size, uptr real_stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000179 // Return the bottom of the maped region.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000180 uptr AddrIsInFakeStack(uptr addr);
Alexander Potapenko0be25d52012-02-21 08:45:41 +0000181 bool StackSize() { return stack_size_; }
Alexey Samsonovc3a81192012-08-30 14:22:21 +0000182
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000183 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000184 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
185 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
186 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
187 static const uptr kNumberOfSizeClasses =
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000188 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
189
Kostya Serebryany8d032042012-05-31 14:35:53 +0000190 bool AddrIsInSizeClass(uptr addr, uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000191
192 // Each size class should be large enough to hold all frames.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000193 uptr ClassMmapSize(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000194
Kostya Serebryany8d032042012-05-31 14:35:53 +0000195 uptr ClassSize(uptr size_class) {
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000196 return 1UL << (size_class + kMinStackFrameSizeLog);
197 }
198
199 void DeallocateFrame(FakeFrame *fake_frame);
200
Kostya Serebryany8d032042012-05-31 14:35:53 +0000201 uptr ComputeSizeClass(uptr alloc_size);
202 void AllocateOneSizeClass(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000203
Kostya Serebryany8d032042012-05-31 14:35:53 +0000204 uptr stack_size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000205 bool alive_;
206
Kostya Serebryany8d032042012-05-31 14:35:53 +0000207 uptr allocated_size_classes_[kNumberOfSizeClasses];
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000208 FakeFrameFifo size_classes_[kNumberOfSizeClasses];
209 FakeFrameLifo call_stack_;
210};
211
Kostya Serebryany3674c6b2012-12-21 08:53:59 +0000212void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
213 AllocType alloc_type);
214void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000215
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000216void *asan_malloc(uptr size, StackTrace *stack);
217void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
218void *asan_realloc(void *p, uptr size, StackTrace *stack);
219void *asan_valloc(uptr size, StackTrace *stack);
220void *asan_pvalloc(uptr size, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000221
Kostya Serebryany8d032042012-05-31 14:35:53 +0000222int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000223 StackTrace *stack);
224uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000225
Kostya Serebryany8d032042012-05-31 14:35:53 +0000226uptr asan_mz_size(const void *ptr);
Alexey Samsonov209c5142012-01-17 06:39:10 +0000227void asan_mz_force_lock();
228void asan_mz_force_unlock();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000229
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000230void PrintInternalAllocatorStats();
231
Kostya Serebryany13bdbe62012-12-10 14:19:15 +0000232// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
Kostya Serebryany54af6602012-12-11 07:27:59 +0000233#if defined(_WIN32) && !defined(__clang__)
Timur Iskhodzhanov9b1d42a2012-12-11 12:23:00 +0000234extern "C" {
Timur Iskhodzhanov4c976042012-12-11 12:24:41 +0000235unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
236unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
Timur Iskhodzhanovaacd3d72012-12-11 12:03:06 +0000237#if defined(_WIN64)
Timur Iskhodzhanov4c976042012-12-11 12:24:41 +0000238unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
239unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
Timur Iskhodzhanovaacd3d72012-12-11 12:03:06 +0000240#endif
Timur Iskhodzhanov9b1d42a2012-12-11 12:23:00 +0000241}
Kostya Serebryany54af6602012-12-11 07:27:59 +0000242#endif
Kostya Serebryany13bdbe62012-12-10 14:19:15 +0000243
244static inline uptr Log2(uptr x) {
245 CHECK(IsPowerOfTwo(x));
246#if !defined(_WIN32) || defined(__clang__)
247 return __builtin_ctzl(x);
248#elif defined(_WIN64)
249 unsigned long ret; // NOLINT
250 _BitScanForward64(&ret, x);
251 return ret;
252#else
253 unsigned long ret; // NOLINT
254 _BitScanForward(&ret, x);
255 return ret;
256#endif
257}
258
259static inline uptr RoundUpToPowerOfTwo(uptr size) {
260 CHECK(size);
261 if (IsPowerOfTwo(size)) return size;
262
263 unsigned long up; // NOLINT
264#if !defined(_WIN32) || defined(__clang__)
265 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
266#elif defined(_WIN64)
267 _BitScanReverse64(&up, size);
268#else
269 _BitScanReverse(&up, size);
270#endif
271 CHECK(size < (1ULL << (up + 1)));
272 CHECK(size > (1ULL << up));
273 return 1UL << (up + 1);
274}
275
276
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000277} // namespace __asan
278#endif // ASAN_ALLOCATOR_H