blob: 03b3e83153de93d26108e7de3fd4b09792234e4d [file] [log] [blame]
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +00001//===-- sanitizer_allocator.cc --------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000012// This allocator is used inside run-times.
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000013//===----------------------------------------------------------------------===//
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000014#include "sanitizer_allocator.h"
15#include "sanitizer_allocator_internal.h"
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000016#include "sanitizer_common.h"
17
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000018namespace __sanitizer {
19
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000020// ThreadSanitizer for Go uses libc malloc/free.
Stephen Hines2d1fdb22014-05-28 23:58:16 -070021#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000022# if SANITIZER_LINUX && !SANITIZER_ANDROID
23extern "C" void *__libc_malloc(uptr size);
24extern "C" void __libc_free(void *ptr);
25# define LIBC_MALLOC __libc_malloc
26# define LIBC_FREE __libc_free
27# else
28# include <stdlib.h>
29# define LIBC_MALLOC malloc
30# define LIBC_FREE free
31# endif
32
33static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
34 (void)cache;
35 return LIBC_MALLOC(size);
36}
37
38static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
39 (void)cache;
40 LIBC_FREE(ptr);
41}
42
43InternalAllocator *internal_allocator() {
44 return 0;
45}
46
47#else // SANITIZER_GO
48
Alexey Samsonov845abaf2013-05-29 10:41:53 +000049static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000050static atomic_uint8_t internal_allocator_initialized;
51static StaticSpinMutex internal_alloc_init_mu;
52
53static InternalAllocatorCache internal_allocator_cache;
54static StaticSpinMutex internal_allocator_cache_mu;
55
56InternalAllocator *internal_allocator() {
57 InternalAllocator *internal_allocator_instance =
58 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
59 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
60 SpinMutexLock l(&internal_alloc_init_mu);
61 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
62 0) {
Stephen Hines86277eb2015-03-23 12:06:32 -070063 internal_allocator_instance->Init(/* may_return_null*/ false);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000064 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
65 }
66 }
67 return internal_allocator_instance;
68}
69
70static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
71 if (cache == 0) {
72 SpinMutexLock l(&internal_allocator_cache_mu);
73 return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
74 false);
75 }
76 return internal_allocator()->Allocate(cache, size, 8, false);
77}
78
79static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
80 if (cache == 0) {
81 SpinMutexLock l(&internal_allocator_cache_mu);
82 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
83 }
84 internal_allocator()->Deallocate(cache, ptr);
85}
86
87#endif // SANITIZER_GO
88
Dmitry Vyukov225f5312012-06-25 15:09:24 +000089const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000090
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000091void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
Dmitry Vyukovb78caa62012-07-05 16:18:28 +000092 if (size + sizeof(u64) < size)
93 return 0;
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000094 void *p = RawInternalAlloc(size + sizeof(u64), cache);
Dmitry Vyukovb78caa62012-07-05 16:18:28 +000095 if (p == 0)
96 return 0;
Dmitry Vyukov225f5312012-06-25 15:09:24 +000097 ((u64*)p)[0] = kBlockMagic;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000098 return (char*)p + sizeof(u64);
99}
100
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000101void InternalFree(void *addr, InternalAllocatorCache *cache) {
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000102 if (addr == 0)
103 return;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000104 addr = (char*)addr - sizeof(u64);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000105 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000106 ((u64*)addr)[0] = 0;
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000107 RawInternalFree(addr, cache);
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000108}
109
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000110// LowLevelAllocator
111static LowLevelAllocateCallback low_level_alloc_callback;
112
113void *LowLevelAllocator::Allocate(uptr size) {
Alexey Samsonovd883c802012-08-27 14:51:36 +0000114 // Align allocation size.
115 size = RoundUpTo(size, 8);
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000116 if (allocated_end_ - allocated_current_ < (sptr)size) {
Kostya Serebryanye89f1842012-11-24 05:03:11 +0000117 uptr size_to_allocate = Max(size, GetPageSizeCached());
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000118 allocated_current_ =
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700119 (char*)MmapOrDie(size_to_allocate, __func__);
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000120 allocated_end_ = allocated_current_ + size_to_allocate;
121 if (low_level_alloc_callback) {
122 low_level_alloc_callback((uptr)allocated_current_,
123 size_to_allocate);
124 }
125 }
126 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
127 void *res = allocated_current_;
128 allocated_current_ += size;
129 return res;
130}
131
132void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
133 low_level_alloc_callback = callback;
134}
135
Kostya Serebryany65199f12013-01-25 11:46:22 +0000136bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
Kostya Serebryanyb2efe132013-01-25 12:22:21 +0000137 if (!size) return false;
138 uptr max = (uptr)-1L;
139 return (max / size) < n;
Kostya Serebryany65199f12013-01-25 11:46:22 +0000140}
141
Stephen Hines86277eb2015-03-23 12:06:32 -0700142void NORETURN ReportAllocatorCannotReturnNull() {
Kostya Serebryany9150f392013-09-06 09:25:11 +0000143 Report("%s's allocator is terminating the process instead of returning 0\n",
144 SanitizerToolName);
145 Report("If you don't like this behavior set allocator_may_return_null=1\n");
146 CHECK(0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700147 Die();
Kostya Serebryany9150f392013-09-06 09:25:11 +0000148}
149
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000150} // namespace __sanitizer