blob: 538e2db95d4ea3b18eed59e0df63226fc7eb95c4 [file] [log] [blame]
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +00001//===-- sanitizer_allocator.cc --------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000012// This allocator is used inside run-times.
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000013//===----------------------------------------------------------------------===//
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080014
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000015#include "sanitizer_allocator.h"
16#include "sanitizer_allocator_internal.h"
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000017#include "sanitizer_common.h"
18
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000019namespace __sanitizer {
20
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000021// ThreadSanitizer for Go uses libc malloc/free.
Stephen Hines2d1fdb22014-05-28 23:58:16 -070022#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000023# if SANITIZER_LINUX && !SANITIZER_ANDROID
24extern "C" void *__libc_malloc(uptr size);
25extern "C" void __libc_free(void *ptr);
26# define LIBC_MALLOC __libc_malloc
27# define LIBC_FREE __libc_free
28# else
29# include <stdlib.h>
30# define LIBC_MALLOC malloc
31# define LIBC_FREE free
32# endif
33
34static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
35 (void)cache;
36 return LIBC_MALLOC(size);
37}
38
39static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
40 (void)cache;
41 LIBC_FREE(ptr);
42}
43
44InternalAllocator *internal_allocator() {
45 return 0;
46}
47
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080048#else // SANITIZER_GO
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000049
Alexey Samsonov845abaf2013-05-29 10:41:53 +000050static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000051static atomic_uint8_t internal_allocator_initialized;
52static StaticSpinMutex internal_alloc_init_mu;
53
54static InternalAllocatorCache internal_allocator_cache;
55static StaticSpinMutex internal_allocator_cache_mu;
56
57InternalAllocator *internal_allocator() {
58 InternalAllocator *internal_allocator_instance =
59 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
60 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
61 SpinMutexLock l(&internal_alloc_init_mu);
62 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
63 0) {
Stephen Hines86277eb2015-03-23 12:06:32 -070064 internal_allocator_instance->Init(/* may_return_null*/ false);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000065 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
66 }
67 }
68 return internal_allocator_instance;
69}
70
71static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
72 if (cache == 0) {
73 SpinMutexLock l(&internal_allocator_cache_mu);
74 return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
75 false);
76 }
77 return internal_allocator()->Allocate(cache, size, 8, false);
78}
79
80static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080081 if (!cache) {
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000082 SpinMutexLock l(&internal_allocator_cache_mu);
83 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
84 }
85 internal_allocator()->Deallocate(cache, ptr);
86}
87
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080088#endif // SANITIZER_GO
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000089
Dmitry Vyukov225f5312012-06-25 15:09:24 +000090const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000091
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000092void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
Dmitry Vyukovb78caa62012-07-05 16:18:28 +000093 if (size + sizeof(u64) < size)
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080094 return nullptr;
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000095 void *p = RawInternalAlloc(size + sizeof(u64), cache);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080096 if (!p)
97 return nullptr;
Dmitry Vyukov225f5312012-06-25 15:09:24 +000098 ((u64*)p)[0] = kBlockMagic;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +000099 return (char*)p + sizeof(u64);
100}
101
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000102void InternalFree(void *addr, InternalAllocatorCache *cache) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800103 if (!addr)
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000104 return;
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000105 addr = (char*)addr - sizeof(u64);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000106 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000107 ((u64*)addr)[0] = 0;
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000108 RawInternalFree(addr, cache);
Alexey Samsonov6e0c3a42012-06-07 08:52:56 +0000109}
110
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000111// LowLevelAllocator
112static LowLevelAllocateCallback low_level_alloc_callback;
113
114void *LowLevelAllocator::Allocate(uptr size) {
Alexey Samsonovd883c802012-08-27 14:51:36 +0000115 // Align allocation size.
116 size = RoundUpTo(size, 8);
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000117 if (allocated_end_ - allocated_current_ < (sptr)size) {
Kostya Serebryanye89f1842012-11-24 05:03:11 +0000118 uptr size_to_allocate = Max(size, GetPageSizeCached());
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000119 allocated_current_ =
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700120 (char*)MmapOrDie(size_to_allocate, __func__);
Alexey Samsonov70e177e2012-08-27 09:30:58 +0000121 allocated_end_ = allocated_current_ + size_to_allocate;
122 if (low_level_alloc_callback) {
123 low_level_alloc_callback((uptr)allocated_current_,
124 size_to_allocate);
125 }
126 }
127 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
128 void *res = allocated_current_;
129 allocated_current_ += size;
130 return res;
131}
132
133void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
134 low_level_alloc_callback = callback;
135}
136
Kostya Serebryany65199f12013-01-25 11:46:22 +0000137bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
Kostya Serebryanyb2efe132013-01-25 12:22:21 +0000138 if (!size) return false;
139 uptr max = (uptr)-1L;
140 return (max / size) < n;
Kostya Serebryany65199f12013-01-25 11:46:22 +0000141}
142
Stephen Hines86277eb2015-03-23 12:06:32 -0700143void NORETURN ReportAllocatorCannotReturnNull() {
Kostya Serebryany9150f392013-09-06 09:25:11 +0000144 Report("%s's allocator is terminating the process instead of returning 0\n",
145 SanitizerToolName);
146 Report("If you don't like this behavior set allocator_may_return_null=1\n");
147 CHECK(0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700148 Die();
Kostya Serebryany9150f392013-09-06 09:25:11 +0000149}
150
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800151} // namespace __sanitizer