blob: 9ba254245058757190c0170534e8c118673ae411 [file] [log] [blame]
Kostya Serebryany019b76f2011-11-30 01:07:02 +00001//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// ASan-private header for asan_allocator.cc.
13//===----------------------------------------------------------------------===//
14
15#ifndef ASAN_ALLOCATOR_H
16#define ASAN_ALLOCATOR_H
17
18#include "asan_internal.h"
19#include "asan_interceptors.h"
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000020#include "sanitizer_common/sanitizer_list.h"
Kostya Serebryany019b76f2011-11-30 01:07:02 +000021
Kostya Serebryany14282a92012-12-10 13:52:55 +000022// We are in the process of transitioning from the old allocator (version 1)
23// to a new one (version 2). The change is quite intrusive so both allocators
24// will co-exist in the source base for a while. The actual allocator is chosen
Dmitry Vyukov9de857a2013-01-14 09:03:24 +000025// at build time by redefining this macro.
Kostya Serebryanyd8b1f782013-01-15 13:57:39 +000026#ifndef ASAN_ALLOCATOR_VERSION
Timur Iskhodzhanov167f9e42013-02-08 12:02:00 +000027# if (ASAN_LINUX && !ASAN_ANDROID) || ASAN_MAC || ASAN_WINDOWS
Kostya Serebryanyd8b1f782013-01-15 13:57:39 +000028# define ASAN_ALLOCATOR_VERSION 2
29# else
30# define ASAN_ALLOCATOR_VERSION 1
31# endif
32#endif // ASAN_ALLOCATOR_VERSION
Kostya Serebryany14282a92012-12-10 13:52:55 +000033
Kostya Serebryany019b76f2011-11-30 01:07:02 +000034namespace __asan {
35
Kostya Serebryany3674c6b2012-12-21 08:53:59 +000036enum AllocType {
37 FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
38 FROM_NEW = 2, // Memory block came from operator new.
39 FROM_NEW_BR = 3 // Memory block came from operator new [ ]
40};
41
Kostya Serebryany8d032042012-05-31 14:35:53 +000042static const uptr kNumberOfSizeClasses = 255;
Kostya Serebryany9d1eee92011-11-30 17:33:13 +000043struct AsanChunk;
Kostya Serebryany019b76f2011-11-30 01:07:02 +000044
Kostya Serebryany61761f12013-01-28 08:05:47 +000045void InitializeAllocator();
46
Alexey Samsonov86614652012-09-18 07:38:10 +000047class AsanChunkView {
48 public:
49 explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
50 bool IsValid() { return chunk_ != 0; }
51 uptr Beg(); // first byte of user memory.
52 uptr End(); // last byte of user memory.
53 uptr UsedSize(); // size requested by the user.
54 uptr AllocTid();
55 uptr FreeTid();
56 void GetAllocStack(StackTrace *stack);
57 void GetFreeStack(StackTrace *stack);
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000058 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000059 if (addr >= Beg() && (addr + access_size) <= End()) {
60 *offset = addr - Beg();
61 return true;
62 }
63 return false;
64 }
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000065 bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
Alexander Potapenko602a09f2012-12-12 12:32:57 +000066 (void)access_size;
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000067 if (addr < Beg()) {
68 *offset = Beg() - addr;
69 return true;
70 }
71 return false;
72 }
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000073 bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
Evgeniy Stepanov0b805cc2013-02-08 12:59:42 +000074 if (addr + access_size > End()) {
Evgeniy Stepanov1bc72982013-02-05 14:32:03 +000075 *offset = addr - End();
Kostya Serebryany5e2a7ac2012-12-11 09:02:36 +000076 return true;
77 }
78 return false;
79 }
80
Alexey Samsonov86614652012-09-18 07:38:10 +000081 private:
82 AsanChunk *const chunk_;
83};
84
85AsanChunkView FindHeapChunkByAddress(uptr address);
86
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000087// List of AsanChunks with total size.
88class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
Kostya Serebryany019b76f2011-11-30 01:07:02 +000089 public:
90 explicit AsanChunkFifoList(LinkerInitialized) { }
91 AsanChunkFifoList() { clear(); }
92 void Push(AsanChunk *n);
93 void PushList(AsanChunkFifoList *q);
94 AsanChunk *Pop();
Kostya Serebryany8d032042012-05-31 14:35:53 +000095 uptr size() { return size_; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +000096 void clear() {
Kostya Serebryany41ffe3d2012-12-17 07:54:29 +000097 IntrusiveList<AsanChunk>::clear();
Kostya Serebryany019b76f2011-11-30 01:07:02 +000098 size_ = 0;
99 }
100 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000101 uptr size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000102};
103
104struct AsanThreadLocalMallocStorage {
105 explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000106#if ASAN_ALLOCATOR_VERSION == 1
107 : quarantine_(x)
108#endif
109 { }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000110 AsanThreadLocalMallocStorage() {
Alexey Samsonove7254782012-02-08 13:45:31 +0000111 CHECK(REAL(memset));
112 REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000113 }
114
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000115#if ASAN_ALLOCATOR_VERSION == 1
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000116 AsanChunkFifoList quarantine_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000117 AsanChunk *free_lists_[kNumberOfSizeClasses];
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000118#else
Dmitry Vyukovdb0cf872013-01-11 08:07:43 +0000119 uptr quarantine_cache[16];
Dmitry Vyukov0d46b2b2013-01-15 10:45:18 +0000120 uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
Kostya Serebryanyec339f72012-12-17 13:43:47 +0000121#endif
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000122 void CommitBack();
123};
124
125// Fake stack frame contains local variables of one function.
126// This struct should fit into a stack redzone (32 bytes).
127struct FakeFrame {
Kostya Serebryany8d032042012-05-31 14:35:53 +0000128 uptr magic; // Modified by the instrumented code.
129 uptr descr; // Modified by the instrumented code.
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000130 FakeFrame *next;
Kostya Serebryany1d35d152012-05-31 15:02:07 +0000131 u64 real_stack : 48;
132 u64 size_minus_one : 16;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000133};
134
135struct FakeFrameFifo {
136 public:
137 void FifoPush(FakeFrame *node);
138 FakeFrame *FifoPop();
139 private:
140 FakeFrame *first_, *last_;
141};
142
143class FakeFrameLifo {
144 public:
145 void LifoPush(FakeFrame *node) {
146 node->next = top_;
147 top_ = node;
148 }
149 void LifoPop() {
150 CHECK(top_);
151 top_ = top_->next;
152 }
153 FakeFrame *top() { return top_; }
154 private:
155 FakeFrame *top_;
156};
157
158// For each thread we create a fake stack and place stack objects on this fake
159// stack instead of the real stack. The fake stack is not really a stack but
160// a fast malloc-like allocator so that when a function exits the fake stack
161// is not poped but remains there for quite some time until gets used again.
162// So, we poison the objects on the fake stack when function returns.
163// It helps us find use-after-return bugs.
164// We can not rely on __asan_stack_free being called on every function exit,
165// so we maintain a lifo list of all current fake frames and update it on every
166// call to __asan_stack_malloc.
167class FakeStack {
168 public:
169 FakeStack();
170 explicit FakeStack(LinkerInitialized) {}
Kostya Serebryany8d032042012-05-31 14:35:53 +0000171 void Init(uptr stack_size);
Kostya Serebryany72fde372011-12-09 01:49:31 +0000172 void StopUsingFakeStack() { alive_ = false; }
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000173 void Cleanup();
Kostya Serebryany8d032042012-05-31 14:35:53 +0000174 uptr AllocateStack(uptr size, uptr real_stack);
175 static void OnFree(uptr ptr, uptr size, uptr real_stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000176 // Return the bottom of the maped region.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000177 uptr AddrIsInFakeStack(uptr addr);
Alexander Potapenko0be25d52012-02-21 08:45:41 +0000178 bool StackSize() { return stack_size_; }
Alexey Samsonovc3a81192012-08-30 14:22:21 +0000179
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000180 private:
Kostya Serebryany8d032042012-05-31 14:35:53 +0000181 static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
182 static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
183 static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
184 static const uptr kNumberOfSizeClasses =
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000185 kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
186
Kostya Serebryany8d032042012-05-31 14:35:53 +0000187 bool AddrIsInSizeClass(uptr addr, uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000188
189 // Each size class should be large enough to hold all frames.
Kostya Serebryany8d032042012-05-31 14:35:53 +0000190 uptr ClassMmapSize(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000191
Kostya Serebryany8d032042012-05-31 14:35:53 +0000192 uptr ClassSize(uptr size_class) {
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000193 return 1UL << (size_class + kMinStackFrameSizeLog);
194 }
195
196 void DeallocateFrame(FakeFrame *fake_frame);
197
Kostya Serebryany8d032042012-05-31 14:35:53 +0000198 uptr ComputeSizeClass(uptr alloc_size);
199 void AllocateOneSizeClass(uptr size_class);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000200
Kostya Serebryany8d032042012-05-31 14:35:53 +0000201 uptr stack_size_;
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000202 bool alive_;
203
Kostya Serebryany8d032042012-05-31 14:35:53 +0000204 uptr allocated_size_classes_[kNumberOfSizeClasses];
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000205 FakeFrameFifo size_classes_[kNumberOfSizeClasses];
206 FakeFrameLifo call_stack_;
207};
208
Kostya Serebryany3674c6b2012-12-21 08:53:59 +0000209void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
210 AllocType alloc_type);
211void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000212
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000213void *asan_malloc(uptr size, StackTrace *stack);
214void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
215void *asan_realloc(void *p, uptr size, StackTrace *stack);
216void *asan_valloc(uptr size, StackTrace *stack);
217void *asan_pvalloc(uptr size, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000218
Kostya Serebryany8d032042012-05-31 14:35:53 +0000219int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
Kostya Serebryany6b0d7752012-08-28 11:54:30 +0000220 StackTrace *stack);
221uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000222
Kostya Serebryany8d032042012-05-31 14:35:53 +0000223uptr asan_mz_size(const void *ptr);
Alexey Samsonov209c5142012-01-17 06:39:10 +0000224void asan_mz_force_lock();
225void asan_mz_force_unlock();
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000226
Kostya Serebryany4a42cf62012-12-27 14:09:19 +0000227void PrintInternalAllocatorStats();
228
Kostya Serebryany019b76f2011-11-30 01:07:02 +0000229} // namespace __asan
230#endif // ASAN_ALLOCATOR_H